content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import requests
from datetime import datetime
import psycopg2
import time
def setup():
# Create database connection
conn = psycopg2.connect(database="postgres", user="postgres",
password="password", host="127.0.0.1", port="5432")
return conn
def call_api():
URL = "https://api.coindesk.com/v1/bpi/currentprice.json"
conn = setup()
while 1:
r = requests.get(url=URL)
current_time = datetime.now()
data = r.json()
price = data["bpi"]["USD"]["rate_float"]
cur = conn.cursor()
cur.execute(
f"INSERT INTO BT_Price (Created_at,Price) VALUES ('{str(current_time)}', {price})")
conn.commit()
time.sleep(15)
if __name__ == "__main__":
call_api()
| 24.870968 | 95 | 0.597925 | [
"MIT"
] | johnjdailey/JS-Realtime-Dashboard | server/models/bitcoin_price_API.py | 771 | Python |
import sys
import re
import pandas as pd
network_filename = sys.argv[1]
m = re.match("networks/(?P<dataset>.*?)_similarity", network_filename)
dataset = m.groupdict()['dataset']
G=nx.read_gml(network_filename)
labels=pd.read_csv(f"munged_data/{dataset}/labels.csv", index_col=0)
metadata = pd.read_csv(f"data/intermediate/{dataset}/metadata.csv", index_col=0)
features = pd.read_csv(f"data/intermediate/{dataset}/features.csv", index_col=0)
train = pd.read_csv(f"data/intermediate/{dataset}/train.csv", header = None)[0].values
testing = pd.Series({i:(i in test) for i in labels.index})
labels = labels.mask(testing, other=0)
propagator,nodes=make_propagator(G)
df,df_time=propagate(propagator, nodes, moas)
df.to_csv(f"predictions/{dataset}/predicted_by_propagation.csv")
| 33.869565 | 86 | 0.762516 | [
"MIT"
] | menchelab/UMAPanalysis | src/classification/predict_with_umap.py | 779 | Python |
"""
This file offers the methods to automatically retrieve the graph Streptomyces sp. NRRLF5008.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def StreptomycesSpNrrlf5008(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Streptomyces sp. NRRLF5008 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Streptomyces sp. NRRLF5008 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="StreptomycesSpNrrlf5008",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.009259 | 223 | 0.677419 | [
"MIT"
] | AnacletoLAB/ensmallen | bindings/python/ensmallen/datasets/string/streptomycesspnrrlf5008.py | 3,565 | Python |
from django.db import models
from django.contrib.auth.models import User
# Which data the user already has:
# SuperUserInformation
# User: Jose
# Email: [email protected]
# Password: testpassword
# Create your models here.
class UserProfileInfo(models.Model):
# Create relationship (don't inherit from User!)
user = models.OneToOneField(User, on_delete=models.CASCADE)
# Add any additional attributes to the user you want
portfolio_site = models.URLField(blank=True)
# pip install pillow to use this, so that users do not need to upload their pic if they
#...do not want it
profile_pic = models.ImageField(upload_to='basic_app/profile_pics',blank=True)
def __str__(self):
# Built-in attribute of django.contrib.auth.models.User !
return self.user.username
| 33.6 | 92 | 0.717857 | [
"MIT"
] | warpalatino/public | Python learnings/Django projects/learning_users/basic_app/models.py | 840 | Python |
#pythran export compute_mask(int[:,:], int[:,:])
#runas import numpy as np; coords = np.array([[0, 0, 1, 1, 2, 2]]); indices = np.array([[0, 3, 2]]); compute_mask(coords, indices)
import numpy as np
def compute_mask(coords, indices): # pragma: no cover
"""
Gets the mask for the coords given the indices in slice format.
Works with either start-stop ranges of matching indices into coords
called "pairs" (start-stop pairs) or filters the mask directly, based
on which is faster.
Exploits the structure in sorted coords, which is that for a constant
value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted.
Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]``
is always sorted. It uses this sortedness to find sub-pairs for each
dimension given the previous, and so on. This is efficient for small
slices or ints, but not for large ones.
After it detects that working with pairs is rather inefficient (or after
going through each possible index), it constructs a filtered mask from the
start-stop pairs.
Parameters
----------
coords : np.ndarray
The coordinates of the array.
indices : np.ndarray
The indices in the form of slices such that indices[:, 0] are starts,
indices[:, 1] are stops and indices[:, 2] are steps.
Returns
-------
mask : np.ndarray
The starts and stops in the mask.
is_slice : bool
Whether or not the array represents a continuous slice.
Examples
--------
Let's create some mock coords and indices
>>> import numpy as np
>>> coords = np.array([[0, 0, 1, 1, 2, 2]])
>>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2)
Now let's get the mask. Notice that the indices of ``0`` and ``2`` are matched.
>>> _compute_mask(coords, indices)
(array([0, 1, 4, 5]), False)
Now, let's try with a more "continuous" slice. Matches ``0`` and ``1``.
>>> indices = np.array([[0, 2, 1]])
>>> _compute_mask(coords, indices)
(array([0, 4]), True)
This is equivalent to mask being ``slice(0, 4, 1)``.
"""
# Set the initial mask to be the entire range of coordinates.
starts = [0]
stops = [coords.shape[1]]
n_matches = coords.shape[1]
i = 0
while i < len(indices):
# Guesstimate whether working with pairs is more efficient or
# working with the mask directly.
# One side is the estimate of time taken for binary searches
# (n_searches * log(avg_length))
# The other is an estimated time of a linear filter for the mask.
n_pairs = len(starts)
n_current_slices = _get_slice_len(indices[i]) * n_pairs + 2
if n_current_slices * np.log(n_current_slices / max(n_pairs, 1)) > \
n_matches + n_pairs:
break
# For each of the pairs, search inside the coordinates for other
# matching sub-pairs.
# This gets the start-end coordinates in coords for each 'sub-array'
# Which would come out of indexing a single integer.
starts, stops, n_matches = _get_mask_pairs(starts, stops, coords[i], indices[i])
i += 1
# Combine adjacent pairs
starts, stops = _join_adjacent_pairs(starts, stops)
# If just one pair is left over, treat it as a slice.
if i == len(indices) and len(starts) == 1:
return np.array([starts[0], stops[0]]), True
# Convert start-stop pairs into mask, filtering by remaining
# coordinates.
mask = _filter_pairs(starts, stops, coords[i:], indices[i:])
return np.array(mask, dtype=np.intp), False
def _get_slice_len(idx):
"""
Get the number of elements in a slice.
Parameters
----------
idx : np.ndarray
A (3,) shaped array containing start, stop, step
Returns
-------
n : int
The length of the slice.
Examples
--------
>>> idx = np.array([5, 15, 5])
>>> _get_slice_len(idx)
2
"""
start, stop, step = idx[0], idx[1], idx[2]
if step > 0:
return (stop - start + step - 1) // step
else:
return (start - stop - step - 1) // (-step)
def _get_mask_pairs(starts_old, stops_old, c, idx): # pragma: no cover
"""
Gets the pairs for a following dimension given the pairs for
a dimension.
For each pair, it searches in the following dimension for
matching coords and returns those.
The total combined length of all pairs is returned to
help with the performance guesstimate.
Parameters
----------
starts_old, stops_old : list[int]
The starts and stops from the previous index.
c : np.ndarray
The coords for this index's dimension.
idx : np.ndarray
The index in the form of a slice.
idx[0], idx[1], idx[2] = start, stop, step
Returns
-------
starts, stops: list
The starts and stops after applying the current index.
n_matches : int
The sum of elements in all ranges.
Examples
--------
>>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2])
>>> starts_old = [4]
>>> stops_old = [8]
>>> idx = np.array([1, 2, 1])
>>> _get_mask_pairs(starts_old, stops_old, c, idx)
([4], [6], 2)
"""
starts = []
stops = []
n_matches = 0
for j in range(len(starts_old)):
# For each matching "integer" in the slice, search within the "sub-coords"
# Using binary search.
for p_match in range(idx[0], idx[1], idx[2]):
start = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match) + starts_old[j]
stop = np.searchsorted(c[starts_old[j]:stops_old[j]], p_match + 1) + starts_old[j]
if start != stop:
starts.append(start)
stops.append(stop)
n_matches += stop - start
return starts, stops, n_matches
def _join_adjacent_pairs(starts_old, stops_old): # pragma: no cover
"""
Joins adjacent pairs into one. For example, 2-5 and 5-7
will reduce to 2-7 (a single pair). This may help in
returning a slice in the end which could be faster.
Parameters
----------
starts_old, stops_old : list[int]
The input starts and stops
Returns
-------
starts, stops : list[int]
The reduced starts and stops.
Examples
--------
>>> starts = [2, 5]
>>> stops = [5, 7]
>>> _join_adjacent_pairs(starts, stops)
([2], [7])
"""
if len(starts_old) <= 1:
return starts_old, stops_old
starts = [starts_old[0]]
stops = []
for i in range(1, len(starts_old)):
if starts_old[i] != stops_old[i - 1]:
starts.append(starts_old[i])
stops.append(stops_old[i - 1])
stops.append(stops_old[-1])
return starts, stops
def _filter_pairs(starts, stops, coords, indices): # pragma: no cover
"""
Converts all the pairs into a single integer mask, additionally filtering
by the indices.
Parameters
----------
starts, stops : list[int]
The starts and stops to convert into an array.
coords : np.ndarray
The coordinates to filter by.
indices : np.ndarray
The indices in the form of slices such that indices[:, 0] are starts,
indices[:, 1] are stops and indices[:, 2] are steps.
Returns
-------
mask : list
The output integer mask.
Examples
--------
>>> import numpy as np
>>> starts = [2]
>>> stops = [7]
>>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]])
>>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs
>>> _filter_pairs(starts, stops, coords, indices)
[2, 4, 6]
"""
mask = []
# For each pair,
for i in range(len(starts)):
# For each element match within the pair range
for j in range(starts[i], stops[i]):
match = True
# Check if it matches all indices
for k in range(len(indices)):
idx = indices[k]
elem = coords[k, j]
match &= ((elem - idx[0]) % idx[2] == 0 and
((idx[2] > 0 and idx[0] <= elem < idx[1])
or (idx[2] < 0 and idx[0] >= elem > idx[1])))
# and append to the mask if so.
if match:
mask.append(j)
return mask
| 30.714286 | 130 | 0.584377 | [
"BSD-3-Clause"
] | AlifeLines/pythran | pythran/tests/pydata/compute_mask.py | 8,385 | Python |
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
import mmcv
from mmocr.utils import convert_annotations
def collect_files(img_dir, gt_dir):
"""Collect all images and their corresponding groundtruth files.
Args:
img_dir (str): The image directory
gt_dir (str): The groundtruth directory
Returns:
files (list): The list of tuples (img_file, groundtruth_file)
"""
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
ann_list, imgs_list = [], []
for gt_file in os.listdir(gt_dir):
ann_list.append(osp.join(gt_dir, gt_file))
imgs_list.append(osp.join(img_dir, gt_file.replace('.json', '.png')))
files = list(zip(sorted(imgs_list), sorted(ann_list)))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
"""Collect the annotation information.
Args:
files (list): The list of tuples (image_file, groundtruth_file)
nproc (int): The number of process to collect annotations
Returns:
images (list): The list of image information dicts
"""
assert isinstance(files, list)
assert isinstance(nproc, int)
if nproc > 1:
images = mmcv.track_parallel_progress(
load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
def load_img_info(files):
"""Load the information of one image.
Args:
files (tuple): The tuple of (img_file, groundtruth_file)
Returns:
img_info (dict): The dict of the img and annotation information
"""
assert isinstance(files, tuple)
img_file, gt_file = files
assert osp.basename(gt_file).split('.')[0] == osp.basename(img_file).split(
'.')[0]
# read imgs while ignoring orientations
img = mmcv.imread(img_file, 'unchanged')
img_info = dict(
file_name=osp.join(osp.basename(img_file)),
height=img.shape[0],
width=img.shape[1],
segm_file=osp.join(osp.basename(gt_file)))
if osp.splitext(gt_file)[1] == '.json':
img_info = load_json_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info
def load_json_info(gt_file, img_info):
"""Collect the annotation information.
Args:
gt_file (str): The path to ground-truth
img_info (dict): The dict of the img and annotation information
Returns:
img_info (dict): The dict of the img and annotation information
"""
annotation = mmcv.load(gt_file)
anno_info = []
for form in annotation['form']:
for ann in form['words']:
iscrowd = 1 if len(ann['text']) == 0 else 0
x1, y1, x2, y2 = ann['box']
x = max(0, min(math.floor(x1), math.floor(x2)))
y = max(0, min(math.floor(y1), math.floor(y2)))
w, h = math.ceil(abs(x2 - x1)), math.ceil(abs(y2 - y1))
bbox = [x, y, w, h]
segmentation = [x, y, x + w, y, x + w, y + h, x, y + h]
anno = dict(
iscrowd=iscrowd,
category_id=1,
bbox=bbox,
area=w * h,
segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info
def parse_args():
parser = argparse.ArgumentParser(
description='Generate training and test set of FUNSD ')
parser.add_argument('root_path', help='Root dir path of FUNSD')
parser.add_argument(
'--nproc', default=1, type=int, help='Number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
root_path = args.root_path
for split in ['training', 'test']:
print(f'Processing {split} set...')
with mmcv.Timer(print_tmpl='It takes {}s to convert FUNSD annotation'):
files = collect_files(
osp.join(root_path, 'imgs'),
osp.join(root_path, 'annotations', split))
image_infos = collect_annotations(files, nproc=args.nproc)
convert_annotations(
image_infos, osp.join(root_path,
'instances_' + split + '.json'))
if __name__ == '__main__':
main()
| 28.240506 | 79 | 0.613178 | [
"Apache-2.0"
] | GHuiXin/mmocr | tools/data/textdet/funsd_converter.py | 4,462 | Python |
# Thai Thien
# 1351040
import pytest
import cv2
import sys
import sys, os
import numpy as np
import upload
# make sure it can find matcher.py file
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/.."))
import util
from matcher import Matcher
# make sure it can find detector.py file
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/.."))
from detector import Detector
prefix = './image/meowdata/'
chuot1_path = prefix + 'chuot1.jpg'
chuot2_path = prefix +'chuot1.jpg'
chuot3_path = prefix +'chuot1.jpg'
dau1_path = prefix +'dau1.jpg'
dau2_path = prefix +'dau2.jpg'
dau3_path = prefix +'dau3.jpg'
dau4_path = prefix +'dau4.jpg'
keyboard1_path = prefix +'keyboard1.jpg'
keyboard2_path = prefix +'keyboard2.jpg'
keyboard3_path = prefix +'keyboard3.jpg'
keyboard4_path = prefix +'keyboard4.jpg'
chuot1 = cv2.imread(chuot1_path)
chuot2 = cv2.imread(chuot2_path)
chuot3 = cv2.imread(chuot3_path)
dau1 = cv2.imread(dau1_path)
dau2 = cv2.imread(dau2_path)
keyboard1 = cv2.imread(keyboard1_path)
keyboard2 = cv2.imread(keyboard2_path)
isUpload = False
class TestMatcher():
def test_matches_dog_sift(self):
_matcher = Matcher()
_name = 'chuot1_2_dog_sift'
_file = './output/'+_name+'.png'
matches, result = _matcher.dog_match(chuot1, chuot2, 20)
cv2.imwrite(_file, result)
if (isUpload):
upload.imgur(_file,_name)
_name = 'keyboard1_2_dog_sift'
_file = './output/'+_name+'.png'
matches, result = _matcher.dog_match(chuot1, chuot2, 20)
cv2.imwrite(_file, result)
if (isUpload):
upload.imgur(_file, _name)
| 25.121212 | 66 | 0.693004 | [
"MIT"
] | ttpro1995/CV_Assignment02 | test/test_meow_data.py | 1,658 | Python |
# Generated by Django 3.2.4 on 2021-07-23 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pythons_auth', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='pythonsuser',
name='is_staff',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='pythonsuser',
name='is_superuser',
field=models.BooleanField(default=False),
),
]
| 23.041667 | 53 | 0.584087 | [
"MIT"
] | BoyanPeychinov/python_web_framework | pythons/pythons/pythons_auth/migrations/0002_auto_20210723_1847.py | 553 | Python |
"""
Classes and functions useful for rewriting expressions for optimized code
generation. Some languages (or standards thereof), e.g. C99, offer specialized
math functions for better performance and/or precision.
Using the ``optimize`` function in this module, together with a collection of
rules (represented as instances of ``Optimization``), one can rewrite the
expressions for this purpose::
>>> from sympy import Symbol, exp, log
>>> from sympy.codegen.rewriting import optimize, optims_c99
>>> x = Symbol('x')
>>> optimize(3*exp(2*x) - 3, optims_c99)
3*expm1(2*x)
>>> optimize(exp(2*x) - 3, optims_c99)
exp(2*x) - 3
>>> optimize(log(3*x + 3), optims_c99)
log1p(x) + log(3)
>>> optimize(log(2*x + 3), optims_c99)
log(2*x + 3)
The ``optims_c99`` imported above is tuple containing the following instances
(which may be imported from ``sympy.codegen.rewriting``):
- ``expm1_opt``
- ``log1p_opt``
- ``exp2_opt``
- ``log2_opt``
- ``log2const_opt``
"""
from itertools import chain
from sympy import log, exp, Max, Min, Wild, expand_log, Dummy
from sympy.assumptions import Q, ask
from sympy.codegen.cfunctions import log1p, log2, exp2, expm1
from sympy.codegen.matrix_nodes import MatrixSolve
from sympy.core.expr import UnevaluatedExpr
from sympy.core.mul import Mul
from sympy.matrices.expressions.matexpr import MatrixSymbol
from sympy.utilities.iterables import sift
class Optimization:
""" Abstract base class for rewriting optimization.
Subclasses should implement ``__call__`` taking an expression
as argument.
Parameters
==========
cost_function : callable returning number
priority : number
"""
def __init__(self, cost_function=None, priority=1):
self.cost_function = cost_function
self.priority=priority
class ReplaceOptim(Optimization):
""" Rewriting optimization calling replace on expressions.
The instance can be used as a function on expressions for which
it will apply the ``replace`` method (see
:meth:`sympy.core.basic.Basic.replace`).
Parameters
==========
query : first argument passed to replace
value : second argument passed to replace
Examples
========
>>> from sympy import Symbol, Pow
>>> from sympy.codegen.rewriting import ReplaceOptim
>>> from sympy.codegen.cfunctions import exp2
>>> x = Symbol('x')
>>> exp2_opt = ReplaceOptim(lambda p: p.is_Pow and p.base == 2,
... lambda p: exp2(p.exp))
>>> exp2_opt(2**x)
exp2(x)
"""
def __init__(self, query, value, **kwargs):
super().__init__(**kwargs)
self.query = query
self.value = value
def __call__(self, expr):
return expr.replace(self.query, self.value)
def optimize(expr, optimizations):
""" Apply optimizations to an expression.
Parameters
==========
expr : expression
optimizations : iterable of ``Optimization`` instances
The optimizations will be sorted with respect to ``priority`` (highest first).
Examples
========
>>> from sympy import log, Symbol
>>> from sympy.codegen.rewriting import optims_c99, optimize
>>> x = Symbol('x')
>>> optimize(log(x+3)/log(2) + log(x**2 + 1), optims_c99)
log1p(x**2) + log2(x + 3)
"""
for optim in sorted(optimizations, key=lambda opt: opt.priority, reverse=True):
new_expr = optim(expr)
if optim.cost_function is None:
expr = new_expr
else:
before, after = map(lambda x: optim.cost_function(x), (expr, new_expr))
if before > after:
expr = new_expr
return expr
exp2_opt = ReplaceOptim(
lambda p: p.is_Pow and p.base == 2,
lambda p: exp2(p.exp)
)
_d = Wild('d', properties=[lambda x: x.is_Dummy])
_u = Wild('u', properties=[lambda x: not x.is_number and not x.is_Add])
_v = Wild('v')
_w = Wild('w')
log2_opt = ReplaceOptim(_v*log(_w)/log(2), _v*log2(_w), cost_function=lambda expr: expr.count(
lambda e: ( # division & eval of transcendentals are expensive floating point operations...
e.is_Pow and e.exp.is_negative # division
or (isinstance(e, (log, log2)) and not e.args[0].is_number)) # transcendental
)
)
log2const_opt = ReplaceOptim(log(2)*log2(_w), log(_w))
logsumexp_2terms_opt = ReplaceOptim(
lambda l: (isinstance(l, log)
and l.args[0].is_Add
and len(l.args[0].args) == 2
and all(isinstance(t, exp) for t in l.args[0].args)),
lambda l: (
Max(*[e.args[0] for e in l.args[0].args]) +
log1p(exp(Min(*[e.args[0] for e in l.args[0].args])))
)
)
def _try_expm1(expr):
protected, old_new = expr.replace(exp, lambda arg: Dummy(), map=True)
factored = protected.factor()
new_old = {v: k for k, v in old_new.items()}
return factored.replace(_d - 1, lambda d: expm1(new_old[d].args[0])).xreplace(new_old)
def _expm1_value(e):
numbers, non_num = sift(e.args, lambda arg: arg.is_number, binary=True)
non_num_exp, non_num_other = sift(non_num, lambda arg: arg.has(exp),
binary=True)
numsum = sum(numbers)
new_exp_terms, done = [], False
for exp_term in non_num_exp:
if done:
new_exp_terms.append(exp_term)
else:
looking_at = exp_term + numsum
attempt = _try_expm1(looking_at)
if looking_at == attempt:
new_exp_terms.append(exp_term)
else:
done = True
new_exp_terms.append(attempt)
if not done:
new_exp_terms.append(numsum)
return e.func(*chain(new_exp_terms, non_num_other))
expm1_opt = ReplaceOptim(lambda e: e.is_Add, _expm1_value)
log1p_opt = ReplaceOptim(
lambda e: isinstance(e, log),
lambda l: expand_log(l.replace(
log, lambda arg: log(arg.factor())
)).replace(log(_u+1), log1p(_u))
)
def create_expand_pow_optimization(limit):
""" Creates an instance of :class:`ReplaceOptim` for expanding ``Pow``.
The requirements for expansions are that the base needs to be a symbol
and the exponent needs to be an Integer (and be less than or equal to
``limit``).
Parameters
==========
limit : int
The highest power which is expanded into multiplication.
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.codegen.rewriting import create_expand_pow_optimization
>>> x = Symbol('x')
>>> expand_opt = create_expand_pow_optimization(3)
>>> expand_opt(x**5 + x**3)
x**5 + x*x*x
>>> expand_opt(x**5 + x**3 + sin(x)**3)
x**5 + sin(x)**3 + x*x*x
"""
return ReplaceOptim(
lambda e: e.is_Pow and e.base.is_symbol and e.exp.is_Integer and abs(e.exp) <= limit,
lambda p: (
UnevaluatedExpr(Mul(*([p.base]*+p.exp), evaluate=False)) if p.exp > 0 else
1/UnevaluatedExpr(Mul(*([p.base]*-p.exp), evaluate=False))
))
# Optimization procedures for turning A**(-1) * x into MatrixSolve(A, x)
def _matinv_predicate(expr):
# TODO: We should be able to support more than 2 elements
if expr.is_MatMul and len(expr.args) == 2:
left, right = expr.args
if left.is_Inverse and right.shape[1] == 1:
inv_arg = left.arg
if isinstance(inv_arg, MatrixSymbol):
return bool(ask(Q.fullrank(left.arg)))
return False
def _matinv_transform(expr):
left, right = expr.args
inv_arg = left.arg
return MatrixSolve(inv_arg, right)
matinv_opt = ReplaceOptim(_matinv_predicate, _matinv_transform)
# Collections of optimizations:
optims_c99 = (expm1_opt, log1p_opt, exp2_opt, log2_opt, log2const_opt)
| 30.278431 | 96 | 0.643051 | [
"BSD-3-Clause"
] | Abhishek-IOT/sympy | sympy/codegen/rewriting.py | 7,721 | Python |
import logging
from django.conf import settings
from daiquiri.core.utils import import_class
from .adapter import OaiAdapter
from .models import Record
logger = logging.getLogger(__name__)
def get_metadata_format(metadata_prefix):
return next(metadata_format for metadata_format in settings.OAI_METADATA_FORMATS
if metadata_format['prefix'] == metadata_prefix)
def get_renderer(metadata_prefix):
renderer_class = get_metadata_format(metadata_prefix)['renderer_class']
return import_class(renderer_class)()
def update_records(resource_type, resource):
logger.debug('update_records %s %s', resource_type, resource)
adapter = OaiAdapter()
try:
resource_id, identifier, datestamp, set_spec, public = adapter.get_record(resource_type, resource)
except TypeError:
raise RuntimeError('Could not obtain record for %s %s' % (resource_type, resource))
if public is True:
for metadata_prefix in adapter.resource_types[resource_type]:
try:
record = Record.objects.get(identifier=identifier, metadata_prefix=metadata_prefix)
except Record.DoesNotExist:
record = Record(identifier=identifier, metadata_prefix=metadata_prefix)
record.datestamp = datestamp
record.set_spec = set_spec
record.deleted = False
record.resource_type = resource_type
record.resource_id = resource_id
record.save()
else:
delete_records(resource_type, resource)
def delete_records(resource_type, resource):
logger.debug('delete_records %s %s', resource_type, resource)
adapter = OaiAdapter()
try:
resource_id, identifier, datestamp, set_spec, public = adapter.get_record(resource_type, resource)
except TypeError:
raise RuntimeError('Could not obtain record for %s %s' % (resource_type, resource))
for metadata_prefix in adapter.resource_types[resource_type]:
try:
record = Record.objects.get(identifier=identifier, metadata_prefix=metadata_prefix)
record.datestamp = datestamp
record.set_spec = set_spec
record.deleted = True
record.save()
except Record.DoesNotExist:
pass
| 33.15942 | 106 | 0.697115 | [
"Apache-2.0"
] | UCBerkeleySETI/daiquiri | daiquiri/oai/utils.py | 2,288 | Python |
from redbot.core.bot import Red
from cogwhitelist.cogwhitelist import CogWhitelist
def setup(bot: Red):
bot.add_cog(CogWhitelist(bot))
| 20.142857 | 50 | 0.787234 | [
"MIT"
] | Tominous/Swift-Cogs | cogwhitelist/__init__.py | 141 | Python |
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
"""PySide2 port of the network/fortuneserver example from Qt v5.x"""
import random
from PySide2 import QtCore, QtWidgets, QtNetwork
class Server(QtWidgets.QDialog):
def __init__(self, parent=None):
super(Server, self).__init__(parent)
statusLabel = QtWidgets.QLabel()
quitButton = QtWidgets.QPushButton("Quit")
quitButton.setAutoDefault(False)
self.tcpServer = QtNetwork.QTcpServer(self)
if not self.tcpServer.listen():
QtWidgets.QMessageBox.critical(self, "Fortune Server",
"Unable to start the server: %s." % self.tcpServer.errorString())
self.close()
return
statusLabel.setText("The server is running on port %d.\nRun the "
"Fortune Client example now." % self.tcpServer.serverPort())
self.fortunes = (
"You've been leading a dog's life. Stay off the furniture.",
"You've got to think about tomorrow.",
"You will be surprised by a loud noise.",
"You will feel hungry again in another hour.",
"You might have mail.",
"You cannot kill time without injuring eternity.",
"Computers are not intelligent. They only think they are.")
quitButton.clicked.connect(self.close)
self.tcpServer.newConnection.connect(self.sendFortune)
buttonLayout = QtWidgets.QHBoxLayout()
buttonLayout.addStretch(1)
buttonLayout.addWidget(quitButton)
buttonLayout.addStretch(1)
mainLayout = QtWidgets.QVBoxLayout()
mainLayout.addWidget(statusLabel)
mainLayout.addLayout(buttonLayout)
self.setLayout(mainLayout)
self.setWindowTitle("Fortune Server")
def sendFortune(self):
block = QtCore.QByteArray()
out = QtCore.QDataStream(block, QtCore.QIODevice.WriteOnly)
out.setVersion(QtCore.QDataStream.Qt_4_0)
out.writeUInt16(0)
fortune = self.fortunes[random.randint(0, len(self.fortunes) - 1)]
out.writeString(fortune)
out.device().seek(0)
out.writeUInt16(block.size() - 2)
clientConnection = self.tcpServer.nextPendingConnection()
clientConnection.disconnected.connect(clientConnection.deleteLater)
clientConnection.write(block)
clientConnection.disconnectFromHost()
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
server = Server()
random.seed(None)
sys.exit(server.exec_())
| 38.779661 | 85 | 0.664773 | [
"MIT"
] | 4RCAN3/GenPai | gen/Lib/site-packages/PySide2/examples/network/fortuneserver.py | 4,576 | Python |
"""This module takes the data.log file produced by main.cpp and
fetches Bogota's addresses based on the coordinates in the file.
TODO: check if system has requirements - if not, install them
* requests
* subprocess (upcoming)
TODO: include exact time of match
TODO: progress bar
FIXME: select best from multiple addresses
"""
import requests
GOOGLE_MAPS_API_URL = 'http://maps.googleapis.com/maps/api/geocode/json'
LOGNAME = "data-wr.log"
DATANAME = "data-wr-addr.log"
def main():
"""Main function. Read coordinates, fetch addresses and write on file."""
logfile = open(LOGNAME, "r")
datafile = open(DATANAME, "w")
logfile.readline() # first line is always a date
print("fetching addresses...")
line = logfile.readline()
while not line.startswith("***") and line.strip():
cat, lat, lng = line.split(';')
latlng = "%s,%s" % (lat, lng)
params = {
'latlng': latlng
}
req = requests.get(GOOGLE_MAPS_API_URL, params=params)
res = req.json()
print(res)
result = res['results'][0]
address = result['formatted_address']
datafile.write("%s en %s |%s,%s" % (cat, address.partition(",")[0], lat, lng))
line = logfile.readline()
logfile.close()
datafile.close()
print("done.")
main()
| 24.62963 | 86 | 0.630827 | [
"MIT"
] | jdnietov/wazeReading | locate.py | 1,330 | Python |
"""
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
YOUR DESCRIPTION HERE
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
space = (width - GRAPH_MARGIN_SIZE*2) / len(YEARS) # space between two lines
x_coordinate = GRAPH_MARGIN_SIZE + year_index * space # x coordinate of the vertical line
return x_coordinate
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# Write your code below this line
#################################
# horizontal line (upper)
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE,
GRAPH_MARGIN_SIZE, width=LINE_WIDTH)
# horizontal line (bottom)
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,
CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, width=LINE_WIDTH)
# vertical lines
for i in range(len(YEARS)):
x = get_x_coordinate(CANVAS_WIDTH, i)
canvas.create_line(x, 0, x, CANVAS_HEIGHT, width=LINE_WIDTH)
canvas.create_text(x+TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=YEARS[i],
anchor=tkinter.NW)
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
# Write your code below this line
#################################
# (x_previous, y_previous) represents the rank of the target name in the last year
x_previous = 0
y_previous = 0
# draw the trend lines of the target names respectively
for i in range(len(lookup_names)):
name = lookup_names[i]
color = COLORS[i % len(COLORS)] # 使用常數!!
for j in range(len(YEARS)):
year = str(YEARS[j])
# (x, y) represents the rank of the target name in the year
# x: at the year line
# y: associated with the rank of the year
x = get_x_coordinate(CANVAS_WIDTH, j)
if year in name_data[name]: # the target name is in top 1000 of the year
rank = int(name_data[name][year])
y = GRAPH_MARGIN_SIZE + ((CANVAS_HEIGHT-GRAPH_MARGIN_SIZE*2)/MAX_RANK)*(rank-1) # 使用長數
else: # the target name is not in top 1000 of the year
rank = '*'
y = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE
# add trend lines
if j != 0:
canvas.create_line(x_previous, y_previous, x, y, width=LINE_WIDTH, fill=color)
# add text(name and rank) besides trend lines
canvas.create_text(x + TEXT_DX, y, text=f'{name} {rank}', anchor=tkinter.SW, fill=color)
# record
x_previous = x
y_previous = y
# main() code is provided, feel free to read through it but DO NOT MODIFY
def main():
# Load data
name_data = babynames.read_files(FILENAMES)
print(name_data)
# Create the window and the canvas
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
# Call draw_fixed_lines() once at startup so we have the lines
# even before the user types anything.
draw_fixed_lines(canvas)
# This line starts the graphical loop that is responsible for
# processing user interactions and plotting data
top.mainloop()
if __name__ == '__main__':
main()
| 35.86 | 112 | 0.636178 | [
"MIT"
] | beomgyutxt/stanCode_project | stancode_project/baby_names/babygraphics.py | 5,395 | Python |
import numpy as np
from future.cmorph import _dilate
rows = 1024
cols = 1024
srows = 64
scols = 64
image = np.random.randint(0, 255, rows * cols, dtype=np.uint8).reshape(
(rows, cols)
)
selem = np.random.randint(0, 1, srows * scols, dtype=np.uint8).reshape(
(srows, scols)
)
out = np.zeros((rows, cols), dtype=np.uint8)
shift_x = np.int8(2)
shift_y = np.int8(2) | 21.823529 | 71 | 0.679245 | [
"BSD-3-Clause"
] | fluiddyn/transonic | doc/for_dev/scikit-image/setup_codes/cmorph__dilate.py | 371 | Python |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
class bigone (Exchange):
def describe(self):
return self.deep_extend(super(bigone, self).describe(), {
'id': 'bigone',
'name': 'BigONE',
'countries': ['GB'],
'version': 'v2',
'has': {
'fetchTickers': True,
'fetchOpenOrders': True,
'fetchMyTrades': True,
'fetchDepositAddress': True,
'withdraw': True,
'fetchOHLCV': False,
'createMarketOrder': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/42803606-27c2b5ec-89af-11e8-8d15-9c8c245e8b2c.jpg',
'api': {
'public': 'https://big.one/api/v2',
'private': 'https://big.one/api/v2/viewer',
},
'www': 'https://big.one',
'doc': 'https://open.big.one/docs/api.html',
'fees': 'https://help.big.one/hc/en-us/articles/115001933374-BigONE-Fee-Policy',
'referral': 'https://b1.run/users/new?code=D3LLBVFT',
},
'api': {
'public': {
'get': [
'ping', # timestamp in nanoseconds
'markets',
'markets/{symbol}/depth',
'markets/{symbol}/trades',
'markets/{symbol}/ticker',
'orders',
'orders/{id}',
'tickers',
'trades',
],
},
'private': {
'get': [
'accounts',
'orders',
'orders/{order_id}',
],
'post': [
'orders',
'orders/{order_id}/cancel',
'orders/cancel_all',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
# HARDCODING IS DEPRECATED THE FEES BELOW ARE TO BE REMOVED SOON
'withdraw': {
'BTC': 0.002,
'ETH': 0.01,
'EOS': 0.01,
'ZEC': 0.002,
'LTC': 0.01,
'QTUM': 0.01,
# 'INK': 0.01 QTUM,
# 'BOT': 0.01 QTUM,
'ETC': 0.01,
'GAS': 0.0,
'BTS': 1.0,
'GXS': 0.1,
'BITCNY': 1.0,
},
},
},
'exceptions': {
'codes': {
'401': AuthenticationError,
'10030': InvalidNonce, # {"message":"invalid nonce, nonce should be a 19bits number","code":10030}
},
'detail': {
'Internal server error': ExchangeNotAvailable,
},
},
})
def fetch_markets(self):
response = self.publicGetMarkets()
markets = response['data']
result = []
self.options['marketsByUuid'] = {}
for i in range(0, len(markets)):
#
# { uuid: "550b34db-696e-4434-a126-196f827d9172",
# quoteScale: 3,
# quoteAsset: { uuid: "17082d1c-0195-4fb6-8779-2cdbcb9eeb3c",
# symbol: "USDT",
# name: "TetherUS" },
# name: "BTC-USDT",
# baseScale: 5,
# baseAsset: { uuid: "0df9c3c3-255a-46d7-ab82-dedae169fba9",
# symbol: "BTC",
# name: "Bitcoin" } }}
#
market = markets[i]
id = market['name']
uuid = market['uuid']
baseId = market['baseAsset']['symbol']
quoteId = market['quoteAsset']['symbol']
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': market['baseScale'],
'price': market['quoteScale'],
}
entry = {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
}
self.options['marketsByUuid'][uuid] = entry
result.append(entry)
return result
def parse_ticker(self, ticker, market=None):
#
# [
# {
# "volume": "190.4925000000000000",
# "open": "0.0777371200000000",
# "market_uuid": "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# "market_id": "ETH-BTC",
# "low": "0.0742925600000000",
# "high": "0.0789150000000000",
# "daily_change_perc": "-0.3789180767180466680525339760",
# "daily_change": "-0.0002945600000000",
# "close": "0.0774425600000000", # last price
# "bid": {
# "price": "0.0764777900000000",
# "amount": "6.4248000000000000"
# },
# "ask": {
# "price": "0.0774425600000000",
# "amount": "1.1741000000000000"
# }
# }
# ]
#
if market is None:
marketId = self.safe_string(ticker, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.milliseconds()
close = self.safe_float(ticker, 'close')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker['bid'], 'price'),
'bidVolume': self.safe_float(ticker['bid'], 'amount'),
'ask': self.safe_float(ticker['ask'], 'price'),
'askVolume': self.safe_float(ticker['ask'], 'amount'),
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': close,
'last': close,
'previousClose': None,
'change': self.safe_float(ticker, 'daily_change'),
'percentage': self.safe_float(ticker, 'daily_change_perc'),
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetMarketsSymbolTicker(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response['data'], market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
tickers = response['data']
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.publicGetMarketsSymbolDepth(self.extend({
'symbol': self.market_id(symbol),
}, params))
return self.parse_order_book(response['data'], None, 'bids', 'asks', 'price', 'amount')
def parse_trade(self, trade, market=None):
#
# { node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:06Z",
# id: "19913306",
# amount: "0.8800000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzA2" }
#
node = trade['node']
timestamp = self.parse8601(node['inserted_at'])
price = self.safe_float(node, 'price')
amount = self.safe_float(node, 'amount')
if market is None:
marketId = self.safe_string(node, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
cost = self.cost_to_precision(symbol, price * amount)
side = None
if node['taker_side'] == 'ASK':
side = 'sell'
else:
side = 'buy'
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string(node, 'id'),
'order': None,
'type': 'limit',
'side': side,
'price': price,
'amount': amount,
'cost': float(cost),
'fee': None,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['first'] = limit
response = self.publicGetMarketsSymbolTrades(self.extend(request, params))
#
# {data: {page_info: { start_cursor: "Y3Vyc29yOnYxOjE5OTEzMzA2",
# has_previous_page: True,
# has_next_page: False,
# end_cursor: "Y3Vyc29yOnYxOjIwMDU0NzIw" },
# edges: [{ node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:06Z",
# id: "19913306",
# amount: "0.8800000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzA2" },
# { node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:07Z",
# id: "19913307",
# amount: "0.3759000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzA3" },
# { node: { taker_side: "ASK",
# price: "0.0694071600000000",
# market_uuid: "38dd30bf-76c2-4777-ae2a-a3222433eef3",
# market_id: "ETH-BTC",
# inserted_at: "2018-07-14T09:22:08Z",
# id: "19913321",
# amount: "0.2197000000000000" },
# cursor: "Y3Vyc29yOnYxOjE5OTEzMzIx" },
#
return self.parse_trades(response['data']['edges'], market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
#
# {data: [{locked_balance: "0",
# balance: "0",
# asset_uuid: "04479958-d7bb-40e4-b153-48bd63f2f77f",
# asset_id: "NKC" },
# {locked_balance: "0",
# balance: "0",
# asset_uuid: "04c8da0e-44fd-4d71-aeb0-8f4d54a4a907",
# asset_id: "UBTC" },
# {locked_balance: "0",
# balance: "0",
# asset_uuid: "05bc0d34-4809-4a39-a3c8-3a1851c8d224",
# asset_id: "READ" },
#
result = {'info': response}
balances = response['data']
for i in range(0, len(balances)):
balance = balances[i]
currencyId = balance['asset_id']
code = self.common_currency_code(currencyId)
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
total = self.safe_float(balance, 'balance')
used = self.safe_float(balance, 'locked_balance')
free = None
if total is not None and used is not None:
free = total - used
account = {
'free': free,
'used': used,
'total': total,
}
result[code] = account
return self.parse_balance(result)
def parse_order(self, order, market=None):
#
# {
# "id": 10,
# "market_uuid": "d2185614-50c3-4588-b146-b8afe7534da6",
# "market_uuid": "BTC-EOS", # not sure which one is correct
# "market_id": "BTC-EOS", # not sure which one is correct
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
#
id = self.safe_string(order, 'id')
if market is None:
marketId = self.safe_string(order, 'market_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
marketUuid = self.safe_string(order, 'market_uuid')
if marketUuid in self.options['marketsByUuid']:
market = self.options['marketsByUuid'][marketUuid]
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(order, 'inserted_at'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'filled_amount')
remaining = max(0, amount - filled)
status = self.parse_order_status(self.safe_string(order, 'state'))
side = self.safe_string(order, 'side')
if side == 'BID':
side = 'buy'
else:
side = 'sell'
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
side = 'BID' if (side == 'buy') else 'ASK'
request = {
'market_id': market['id'], # market uuid d2185614-50c3-4588-b146-b8afe7534da6, required
'side': side, # order side one of "ASK"/"BID", required
'amount': self.amount_to_precision(symbol, amount), # order amount, string, required
'price': self.price_to_precision(symbol, price), # order price, string, required
}
response = self.privatePostOrders(self.extend(request, params))
#
# {
# "data":
# {
# "id": 10,
# "market_uuid": "BTC-EOS",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
# }
#
order = self.safe_value(response, 'data')
return self.parse_order(order, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {'order_id': id}
response = self.privatePostOrdersOrderIdCancel(self.extend(request, params))
#
# {
# "data":
# {
# "id": 10,
# "market_uuid": "BTC-EOS",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
# }
#
order = response['data']
return self.parse_order(order)
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
response = self.privatePostOrdersOrderIdCancel(params)
#
# [
# {
# "id": 10,
# "market_uuid": "d2185614-50c3-4588-b146-b8afe7534da6",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# },
# {
# ...
# },
# ]
#
return self.parse_orders(response)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {'order_id': id}
response = self.privateGetOrdersOrderId(self.extend(request, params))
#
# {
# "id": 10,
# "market_uuid": "d2185614-50c3-4588-b146-b8afe7534da6",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# }
#
return self.parse_order(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
# NAME DESCRIPTION EXAMPLE REQUIRED
# market_id market id ETH-BTC True
# after ask for the server to return orders after the cursor dGVzdGN1cmVzZQo False
# before ask for the server to return orders before the cursor dGVzdGN1cmVzZQo False
# first slicing count 20 False
# last slicing count 20 False
# side order side one of "ASK"/"BID" False
# state order state one of "CANCELED"/"FILLED"/"PENDING" False
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'market_id': market['id'],
}
if limit is not None:
request['first'] = limit
response = self.privateGetOrders(self.extend(request, params))
#
# {
# "data": {
# "edges": [
# {
# "node": {
# "id": 10,
# "market_id": "ETH-BTC",
# "price": "10.00",
# "amount": "10.00",
# "filled_amount": "9.0",
# "avg_deal_price": "12.0",
# "side": "ASK",
# "state": "FILLED"
# },
# "cursor": "dGVzdGN1cmVzZQo="
# }
# ],
# "page_info": {
# "end_cursor": "dGVzdGN1cmVzZQo=",
# "start_cursor": "dGVzdGN1cmVzZQo=",
# "has_next_page": True,
# "has_previous_page": False
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
orders = self.safe_value(data, 'edges', [])
result = []
for i in range(0, len(orders)):
result.append(self.parse_order(orders[i]['node'], market))
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_order_status(self, status):
statuses = {
'PENDING': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
}
return self.safe_string(statuses, status)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders(symbol, since, limit, self.extend({
'state': 'PENDING',
}, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders(symbol, since, limit, self.extend({
'state': 'FILLED',
}, params))
def nonce(self):
return self.microseconds() * 1000
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
request = {
'type': 'OpenAPI',
'sub': self.apiKey,
'nonce': nonce,
}
jwt = self.jwt(request, self.secret)
headers = {
'Authorization': 'Bearer ' + jwt,
}
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
elif method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
#
# {"errors":{"detail":"Internal server error"}}
# {"errors":[{"message":"invalid nonce, nonce should be a 19bits number","code":10030}],"data":null}
#
error = self.safe_value(response, 'error')
errors = self.safe_value(response, 'errors')
data = self.safe_value(response, 'data')
if error is not None or errors is not None or data is None:
feedback = self.id + ' ' + self.json(response)
code = None
if error is not None:
code = self.safe_integer(error, 'code')
exceptions = self.exceptions['codes']
if errors is not None:
if self.isArray(errors):
code = self.safe_string(errors[0], 'code')
else:
code = self.safe_string(errors, 'detail')
exceptions = self.exceptions['detail']
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
| 41.367534 | 126 | 0.424018 | [
"MIT"
] | tssujt/ccxt | python/ccxt/bigone.py | 27,013 | Python |
# -*- test-case-name: wokkel.test.test_muc -*-
#
# Copyright (c) Ralph Meijer.
# See LICENSE for details.
"""
XMPP Multi-User Chat protocol.
This protocol is specified in
U{XEP-0045<http://xmpp.org/extensions/xep-0045.html>}.
"""
from dateutil.tz import tzutc
from zope.interface import implements
from twisted.internet import defer
from twisted.words.protocols.jabber import jid, error, xmlstream
from twisted.words.xish import domish
from wokkel import data_form, generic, iwokkel, xmppim
from wokkel.compat import Values, ValueConstant
from wokkel.delay import Delay, DelayMixin
from wokkel.subprotocols import XMPPHandler
from wokkel.iwokkel import IMUCClient
# Multi User Chat namespaces
NS_MUC = 'http://jabber.org/protocol/muc'
NS_MUC_USER = NS_MUC + '#user'
NS_MUC_ADMIN = NS_MUC + '#admin'
NS_MUC_OWNER = NS_MUC + '#owner'
NS_MUC_ROOMINFO = NS_MUC + '#roominfo'
NS_MUC_CONFIG = NS_MUC + '#roomconfig'
NS_MUC_REQUEST = NS_MUC + '#request'
NS_MUC_REGISTER = NS_MUC + '#register'
NS_REGISTER = 'jabber:iq:register'
MESSAGE = '/message'
PRESENCE = '/presence'
GROUPCHAT = MESSAGE +'[@type="groupchat"]'
DEFER_TIMEOUT = 30 # basic timeout is 30 seconds
class STATUS_CODE(Values):
REALJID_PUBLIC = ValueConstant(100)
AFFILIATION_CHANGED = ValueConstant(101)
UNAVAILABLE_SHOWN = ValueConstant(102)
UNAVAILABLE_NOT_SHOWN = ValueConstant(103)
CONFIGURATION_CHANGED = ValueConstant(104)
SELF_PRESENCE = ValueConstant(110)
LOGGING_ENABLED = ValueConstant(170)
LOGGING_DISABLED = ValueConstant(171)
NON_ANONYMOUS = ValueConstant(172)
SEMI_ANONYMOUS = ValueConstant(173)
FULLY_ANONYMOUS = ValueConstant(174)
ROOM_CREATED = ValueConstant(201)
NICK_ASSIGNED = ValueConstant(210)
BANNED = ValueConstant(301)
NEW_NICK = ValueConstant(303)
KICKED = ValueConstant(307)
REMOVED_AFFILIATION = ValueConstant(321)
REMOVED_MEMBERSHIP = ValueConstant(322)
REMOVED_SHUTDOWN = ValueConstant(332)
class Statuses(set):
"""
Container of MUC status conditions.
This is currently implemented as a set of constant values from
L{STATUS_CODE}. Instances of this class provide L{IMUCStatuses}, that
defines the supported operations. Even though this class currently derives
from C{set}, future versions might not. This provides an upgrade path to
cater for extensible status conditions, as defined in
U{XEP-0306<http://xmpp.org/extensions/xep-0306.html>}.
"""
implements(iwokkel.IMUCStatuses)
class _FormRequest(generic.Request):
"""
Base class for form exchange requests.
"""
requestNamespace = None
formNamespace = None
def __init__(self, recipient, sender=None, options=None):
if options is None:
stanzaType = 'get'
else:
stanzaType = 'set'
generic.Request.__init__(self, recipient, sender, stanzaType)
self.options = options
def toElement(self):
element = generic.Request.toElement(self)
query = element.addElement((self.requestNamespace, 'query'))
if self.options is None:
# This is a request for the configuration form.
form = None
elif self.options is False:
form = data_form.Form(formType='cancel')
else:
form = data_form.Form(formType='submit',
formNamespace=self.formNamespace)
form.makeFields(self.options)
if form is not None:
query.addChild(form.toElement())
return element
class ConfigureRequest(_FormRequest):
"""
Configure MUC room request.
http://xmpp.org/extensions/xep-0045.html#roomconfig
"""
requestNamespace = NS_MUC_OWNER
formNamespace = NS_MUC_CONFIG
class RegisterRequest(_FormRequest):
"""
Register request.
http://xmpp.org/extensions/xep-0045.html#register
"""
requestNamespace = NS_REGISTER
formNamespace = NS_MUC_REGISTER
class AdminItem(object):
"""
Item representing role and/or affiliation for admin request.
"""
def __init__(self, affiliation=None, role=None, entity=None, nick=None,
reason=None):
self.affiliation = affiliation
self.role = role
self.entity = entity
self.nick = nick
self.reason = reason
def toElement(self):
element = domish.Element((NS_MUC_ADMIN, 'item'))
if self.entity:
element['jid'] = self.entity.full()
if self.nick:
element['nick'] = self.nick
if self.affiliation:
element['affiliation'] = self.affiliation
if self.role:
element['role'] = self.role
if self.reason:
element.addElement('reason', content=self.reason)
return element
@classmethod
def fromElement(Class, element):
item = Class()
if element.hasAttribute('jid'):
item.entity = jid.JID(element['jid'])
item.nick = element.getAttribute('nick')
item.affiliation = element.getAttribute('affiliation')
item.role = element.getAttribute('role')
for child in element.elements(NS_MUC_ADMIN, 'reason'):
item.reason = unicode(child)
return item
class AdminStanza(generic.Request):
"""
An admin request or response.
"""
childParsers = {(NS_MUC_ADMIN, 'query'): '_childParser_query'}
def toElement(self):
element = generic.Request.toElement(self)
element.addElement((NS_MUC_ADMIN, 'query'))
if self.items:
for item in self.items:
element.query.addChild(item.toElement())
return element
def _childParser_query(self, element):
self.items = []
for child in element.elements(NS_MUC_ADMIN, 'item'):
self.items.append(AdminItem.fromElement(child))
class DestructionRequest(generic.Request):
"""
Room destruction request.
@param reason: Optional reason for the destruction of this room.
@type reason: C{unicode}.
@param alternate: Optional room JID of an alternate venue.
@type alternate: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param password: Optional password for entering the alternate venue.
@type password: C{unicode}
"""
stanzaType = 'set'
def __init__(self, recipient, sender=None, reason=None, alternate=None,
password=None):
generic.Request.__init__(self, recipient, sender)
self.reason = reason
self.alternate = alternate
self.password = password
def toElement(self):
element = generic.Request.toElement(self)
element.addElement((NS_MUC_OWNER, 'query'))
element.query.addElement('destroy')
if self.alternate:
element.query.destroy['jid'] = self.alternate.full()
if self.password:
element.query.destroy.addElement('password',
content=self.password)
if self.reason:
element.query.destroy.addElement('reason', content=self.reason)
return element
class GroupChat(xmppim.Message, DelayMixin):
"""
A groupchat message.
"""
stanzaType = 'groupchat'
def toElement(self, legacyDelay=False):
"""
Render into a domish Element.
@param legacyDelay: If C{True} send the delayed delivery information
in legacy format.
"""
element = xmppim.Message.toElement(self)
if self.delay:
element.addChild(self.delay.toElement(legacy=legacyDelay))
return element
class PrivateChat(xmppim.Message):
"""
A chat message.
"""
stanzaType = 'chat'
class InviteMessage(xmppim.Message):
def __init__(self, recipient=None, sender=None, invitee=None, reason=None):
xmppim.Message.__init__(self, recipient, sender)
self.invitee = invitee
self.reason = reason
def toElement(self):
element = xmppim.Message.toElement(self)
child = element.addElement((NS_MUC_USER, 'x'))
child.addElement('invite')
child.invite['to'] = self.invitee.full()
if self.reason:
child.invite.addElement('reason', content=self.reason)
return element
class HistoryOptions(object):
"""
A history configuration object.
@ivar maxchars: Limit the total number of characters in the history to "X"
(where the character count is the characters of the complete XML
stanzas, not only their XML character data).
@type maxchars: C{int}
@ivar maxstanzas: Limit the total number of messages in the history to "X".
@type mazstanzas: C{int}
@ivar seconds: Send only the messages received in the last "X" seconds.
@type seconds: C{int}
@ivar since: Send only the messages received since the datetime specified.
Note that this must be an offset-aware instance.
@type since: L{datetime.datetime}
"""
attributes = ['maxChars', 'maxStanzas', 'seconds', 'since']
def __init__(self, maxChars=None, maxStanzas=None, seconds=None,
since=None):
self.maxChars = maxChars
self.maxStanzas = maxStanzas
self.seconds = seconds
self.since = since
def toElement(self):
"""
Returns a L{domish.Element} representing the history options.
"""
element = domish.Element((NS_MUC, 'history'))
for key in self.attributes:
value = getattr(self, key, None)
if value is not None:
if key == 'since':
stamp = value.astimezone(tzutc())
element[key] = stamp.strftime('%Y-%m-%dT%H:%M:%SZ')
else:
element[key.lower()] = str(value)
return element
class BasicPresence(xmppim.AvailabilityPresence):
"""
Availability presence sent from MUC client to service.
@type history: L{HistoryOptions}
"""
history = None
password = None
def toElement(self):
element = xmppim.AvailabilityPresence.toElement(self)
muc = element.addElement((NS_MUC, 'x'))
if self.password:
muc.addElement('password', content=self.password)
if self.history:
muc.addChild(self.history.toElement())
return element
class UserPresence(xmppim.AvailabilityPresence):
"""
Availability presence sent from MUC service to client.
@ivar affiliation: Affiliation of the entity to the room.
@type affiliation: C{unicode}
@ivar role: Role of the entity in the room.
@type role: C{unicode}
@ivar entity: The real JID of the entity this presence is from.
@type entity: L{JID<twisted.words.protocols.jabber.jid.JID>}
@ivar mucStatuses: Set of one or more status codes from L{STATUS_CODE}.
See L{Statuses} for usage notes.
@type mucStatuses: L{Statuses}
@ivar nick: The nick name of the entity in the room.
@type nick: C{unicode}
"""
affiliation = None
role = None
entity = None
nick = None
mucStatuses = None
childParsers = {(NS_MUC_USER, 'x'): '_childParser_mucUser'}
def __init__(self, *args, **kwargs):
self.mucStatuses = Statuses()
xmppim.AvailabilityPresence.__init__(self, *args, **kwargs)
def _childParser_mucUser(self, element):
"""
Parse the MUC user extension element.
"""
for child in element.elements():
if child.uri != NS_MUC_USER:
continue
elif child.name == 'status':
try:
value = int(child.getAttribute('code'))
statusCode = STATUS_CODE.lookupByValue(value)
except (TypeError, ValueError):
continue
self.mucStatuses.add(statusCode)
elif child.name == 'item':
if child.hasAttribute('jid'):
self.entity = jid.JID(child['jid'])
self.nick = child.getAttribute('nick')
self.affiliation = child.getAttribute('affiliation')
self.role = child.getAttribute('role')
for reason in child.elements(NS_MUC_ADMIN, 'reason'):
self.reason = unicode(reason)
# TODO: destroy
class VoiceRequest(xmppim.Message):
"""
Voice request message.
"""
def toElement(self):
element = xmppim.Message.toElement(self)
# build data form
form = data_form.Form('submit', formNamespace=NS_MUC_REQUEST)
form.addField(data_form.Field(var='muc#role',
value='participant',
label='Requested role'))
element.addChild(form.toElement())
return element
class MUCClientProtocol(xmppim.BasePresenceProtocol):
"""
Multi-User Chat client protocol.
"""
timeout = None
presenceTypeParserMap = {
'error': generic.ErrorStanza,
'available': UserPresence,
'unavailable': UserPresence,
}
def __init__(self, reactor=None):
XMPPHandler.__init__(self)
if reactor:
self._reactor = reactor
else:
from twisted.internet import reactor
self._reactor = reactor
def connectionInitialized(self):
"""
Called when the XML stream has been initialized.
It initializes several XPath events to handle MUC stanzas that come
in.
"""
xmppim.BasePresenceProtocol.connectionInitialized(self)
self.xmlstream.addObserver(GROUPCHAT, self._onGroupChat)
self._roomOccupantMap = {}
def _onGroupChat(self, element):
"""
A group chat message has been received from a MUC room.
There are a few event methods that may get called here.
L{receivedGroupChat}, L{receivedSubject} or L{receivedHistory}.
"""
message = GroupChat.fromElement(element)
self.groupChatReceived(message)
def groupChatReceived(self, message):
"""
Called when a groupchat message was received.
This method is called with a parsed representation of a received
groupchat message and can be overridden for further processing.
For regular groupchat message, the C{body} attribute contains the
message body. Conversation history sent by the room upon joining, will
have the C{delay} attribute set, room subject changes the C{subject}
attribute. See L{GroupChat} for details.
@param message: Groupchat message.
@type message: L{GroupChat}
"""
pass
def _sendDeferred(self, stanza):
"""
Send presence stanza, adding a deferred with a timeout.
@param stanza: The presence stanza to send over the wire.
@type stanza: L{generic.Stanza}
@param timeout: The number of seconds to wait before the deferred is
timed out.
@type timeout: C{int}
The deferred object L{defer.Deferred} is returned.
"""
def onResponse(element):
if element.getAttribute('type') == 'error':
d.errback(error.exceptionFromStanza(element))
else:
d.callback(UserPresence.fromElement(element))
def onTimeout():
d.errback(xmlstream.TimeoutError("Timeout waiting for response."))
def cancelTimeout(result):
if call.active():
call.cancel()
return result
def recordOccupant(presence):
occupantJID = presence.sender
roomJID = occupantJID.userhostJID()
self._roomOccupantMap[roomJID] = occupantJID
return presence
call = self._reactor.callLater(DEFER_TIMEOUT, onTimeout)
d = defer.Deferred()
d.addBoth(cancelTimeout)
d.addCallback(recordOccupant)
query = "/presence[@from='%s' or (@from='%s' and @type='error')]" % (
stanza.recipient.full(), stanza.recipient.userhost())
self.xmlstream.addOnetimeObserver(query, onResponse, priority=-1)
self.xmlstream.send(stanza.toElement())
return d
def join(self, roomJID, nick, historyOptions=None, password=None):
"""
Join a MUC room by sending presence to it.
@param roomJID: The JID of the room the entity is joining.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The nick name for the entitity joining the room.
@type nick: C{unicode}
@param historyOptions: Options for conversation history sent by the
room upon joining.
@type historyOptions: L{HistoryOptions}
@param password: Optional password for the room.
@type password: C{unicode}
@return: A deferred that fires when the entity is in the room or an
error has occurred.
"""
occupantJID = jid.JID(tuple=(roomJID.user, roomJID.host, nick))
presence = BasicPresence(recipient=occupantJID)
if password:
presence.password = password
if historyOptions:
presence.history = historyOptions
return self._sendDeferred(presence)
def nick(self, roomJID, nick):
"""
Change an entity's nick name in a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#changenick
@param roomJID: The JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The new nick name within the room.
@type nick: C{unicode}
"""
occupantJID = jid.JID(tuple=(roomJID.user, roomJID.host, nick))
presence = BasicPresence(recipient=occupantJID)
return self._sendDeferred(presence)
def status(self, roomJID, show=None, status=None):
"""
Change user status.
See: http://xmpp.org/extensions/xep-0045.html#changepres
@param roomJID: The Room JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param show: The availability of the entity. Common values are xa,
available, etc
@type show: C{unicode}
@param status: The current status of the entity.
@type status: C{unicode}
"""
occupantJID = self._roomOccupantMap[roomJID]
presence = BasicPresence(recipient=occupantJID, show=show,
status=status)
return self._sendDeferred(presence)
def leave(self, roomJID):
"""
Leave a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#exit
@param roomJID: The JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
occupantJID = self._roomOccupantMap[roomJID]
presence = xmppim.AvailabilityPresence(recipient=occupantJID,
available=False)
return self._sendDeferred(presence)
def groupChat(self, roomJID, body):
"""
Send a groupchat message.
"""
message = GroupChat(recipient=roomJID, body=body)
self.send(message.toElement())
def chat(self, occupantJID, body):
"""
Send a private chat message to a user in a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#privatemessage
@param occupantJID: The Room JID of the other user.
@type occupantJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
message = PrivateChat(recipient=occupantJID, body=body)
self.send(message.toElement())
def subject(self, roomJID, subject):
"""
Change the subject of a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#subject-mod
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param subject: The subject you want to set.
@type subject: C{unicode}
"""
message = GroupChat(roomJID.userhostJID(), subject=subject)
self.send(message.toElement())
def invite(self, roomJID, invitee, reason=None):
"""
Invite a xmpp entity to a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#invite
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param invitee: The entity that is being invited.
@type invitee: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param reason: The reason for the invite.
@type reason: C{unicode}
"""
message = InviteMessage(recipient=roomJID, invitee=invitee,
reason=reason)
self.send(message.toElement())
def getRegisterForm(self, roomJID):
"""
Grab the registration form for a MUC room.
@param room: The room jabber/xmpp entity id for the requested
registration form.
@type room: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
def cb(response):
form = data_form.findForm(response.query, NS_MUC_REGISTER)
return form
request = RegisterRequest(recipient=roomJID, options=None)
d = self.request(request)
d.addCallback(cb)
return d
def register(self, roomJID, options):
"""
Send a request to register for a room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param options: A mapping of field names to values, or C{None} to
cancel.
@type options: C{dict}
"""
if options is None:
options = False
request = RegisterRequest(recipient=roomJID, options=options)
return self.request(request)
def voice(self, roomJID):
"""
Request voice for a moderated room.
@param roomJID: The room jabber/xmpp entity id.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
message = VoiceRequest(recipient=roomJID)
self.xmlstream.send(message.toElement())
def history(self, roomJID, messages):
"""
Send history to create a MUC based on a one on one chat.
See: http://xmpp.org/extensions/xep-0045.html#continue
@param roomJID: The room jabber/xmpp entity id.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param messages: The history to send to the room as an ordered list of
message, represented by a dictionary with the keys
C{'stanza'}, holding the original stanza a
L{domish.Element}, and C{'timestamp'} with the
timestamp.
@type messages: C{list} of L{domish.Element}
"""
for message in messages:
stanza = message['stanza']
stanza['type'] = 'groupchat'
delay = Delay(stamp=message['timestamp'])
sender = stanza.getAttribute('from')
if sender is not None:
delay.sender = jid.JID(sender)
stanza.addChild(delay.toElement())
stanza['to'] = roomJID.userhost()
if stanza.hasAttribute('from'):
del stanza['from']
self.xmlstream.send(stanza)
def getConfiguration(self, roomJID):
"""
Grab the configuration from the room.
This sends an iq request to the room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@return: A deferred that fires with the room's configuration form as
a L{data_form.Form} or C{None} if there are no configuration
options available.
"""
def cb(response):
form = data_form.findForm(response.query, NS_MUC_CONFIG)
return form
request = ConfigureRequest(recipient=roomJID, options=None)
d = self.request(request)
d.addCallback(cb)
return d
def configure(self, roomJID, options):
"""
Configure a room.
@param roomJID: The room to configure.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param options: A mapping of field names to values, or C{None} to
cancel.
@type options: C{dict}
"""
if options is None:
options = False
request = ConfigureRequest(recipient=roomJID, options=options)
return self.request(request)
def _getAffiliationList(self, roomJID, affiliation):
"""
Send a request for an affiliation list in a room.
"""
def cb(response):
stanza = AdminStanza.fromElement(response)
return stanza.items
request = AdminStanza(recipient=roomJID, stanzaType='get')
request.items = [AdminItem(affiliation=affiliation)]
d = self.request(request)
d.addCallback(cb)
return d
def _getRoleList(self, roomJID, role):
"""
Send a request for a role list in a room.
"""
def cb(response):
stanza = AdminStanza.fromElement(response)
return stanza.items
request = AdminStanza(recipient=roomJID, stanzaType='get')
request.items = [AdminItem(role=role)]
d = self.request(request)
d.addCallback(cb)
return d
def getMemberList(self, roomJID):
"""
Get the member list of a room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._getAffiliationList(roomJID, 'member')
def getAdminList(self, roomJID):
"""
Get the admin list of a room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._getAffiliationList(roomJID, 'admin')
def getBanList(self, roomJID):
"""
Get an outcast list from a room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._getAffiliationList(roomJID, 'outcast')
def getOwnerList(self, roomJID):
"""
Get an owner list from a room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._getAffiliationList(roomJID, 'owner')
def getModeratorList(self, roomJID):
"""
Get the moderator list of a room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
d = self._getRoleList(roomJID, 'moderator')
return d
def _setAffiliation(self, roomJID, entity, affiliation,
reason=None, sender=None):
"""
Send a request to change an entity's affiliation to a MUC room.
"""
request = AdminStanza(recipient=roomJID, sender=sender,
stanzaType='set')
item = AdminItem(entity=entity, affiliation=affiliation, reason=reason)
request.items = [item]
return self.request(request)
def _setRole(self, roomJID, nick, role,
reason=None, sender=None):
"""
Send a request to change an occupant's role in a MUC room.
"""
request = AdminStanza(recipient=roomJID, sender=sender,
stanzaType='set')
item = AdminItem(nick=nick, role=role, reason=reason)
request.items = [item]
return self.request(request)
def modifyAffiliationList(self, roomJID, entities, affiliation,
sender=None):
"""
Modify an affiliation list.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param entities: The list of entities to change for a room.
@type entities: C{list} of
L{JID<twisted.words.protocols.jabber.jid.JID>}
@param affiliation: The affilation to the entities will acquire.
@type affiliation: C{unicode}
@param sender: The entity sending the request.
@type sender: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
request = AdminStanza(recipient=roomJID, sender=sender,
stanzaType='set')
request.items = [AdminItem(entity=entity, affiliation=affiliation)
for entity in entities]
return self.request(request)
def grantVoice(self, roomJID, nick, reason=None, sender=None):
"""
Grant voice to an entity.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The nick name for the user in this room.
@type nick: C{unicode}
@param reason: The reason for granting voice to the entity.
@type reason: C{unicode}
@param sender: The entity sending the request.
@type sender: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._setRole(roomJID, nick=nick,
role='participant',
reason=reason, sender=sender)
def revokeVoice(self, roomJID, nick, reason=None, sender=None):
"""
Revoke voice from a participant.
This will disallow the entity to send messages to a moderated room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The nick name for the user in this room.
@type nick: C{unicode}
@param reason: The reason for revoking voice from the entity.
@type reason: C{unicode}
@param sender: The entity sending the request.
@type sender: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._setRole(roomJID, nick=nick, role='visitor',
reason=reason, sender=sender)
def grantModerator(self, roomJID, nick, reason=None, sender=None):
"""
Grant moderator privileges to a MUC room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The nick name for the user in this room.
@type nick: C{unicode}
@param reason: The reason for granting moderation to the entity.
@type reason: C{unicode}
@param sender: The entity sending the request.
@type sender: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._setRole(roomJID, nick=nick, role='moderator',
reason=reason, sender=sender)
def ban(self, roomJID, entity, reason=None, sender=None):
"""
Ban a user from a MUC room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param entity: The bare JID of the entity to be banned.
@type entity: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param reason: The reason for banning the entity.
@type reason: C{unicode}
@param sender: The entity sending the request.
@type sender: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._setAffiliation(roomJID, entity, 'outcast',
reason=reason, sender=sender)
def kick(self, roomJID, nick, reason=None, sender=None):
"""
Kick a user from a MUC room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The occupant to be banned.
@type nick: C{unicode}
@param reason: The reason given for the kick.
@type reason: C{unicode}
@param sender: The entity sending the request.
@type sender: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._setRole(roomJID, nick, 'none',
reason=reason, sender=sender)
def destroy(self, roomJID, reason=None, alternate=None, password=None):
"""
Destroy a room.
@param roomJID: The JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param reason: The reason for the destruction of the room.
@type reason: C{unicode}
@param alternate: The JID of the room suggested as an alternate venue.
@type alternate: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
request = DestructionRequest(recipient=roomJID, reason=reason,
alternate=alternate, password=password)
return self.request(request)
class User(object):
"""
A user/entity in a multi-user chat room.
"""
def __init__(self, nick, entity=None):
self.nick = nick
self.entity = entity
self.affiliation = 'none'
self.role = 'none'
self.status = None
self.show = None
class Room(object):
"""
A Multi User Chat Room.
An in memory object representing a MUC room from the perspective of
a client.
@ivar roomJID: The Room JID of the MUC room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@ivar nick: The nick name for the client in this room.
@type nick: C{unicode}
@ivar occupantJID: The JID of the occupant in the room. Generated from
roomJID and nick.
@type occupantJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@ivar locked: Flag signalling a locked room. A locked room first needs
to be configured before it can be used. See
L{MUCClientProtocol.getConfiguration} and
L{MUCClientProtocol.configure}.
@type locked: C{bool}
"""
locked = False
def __init__(self, roomJID, nick):
"""
Initialize the room.
"""
self.roomJID = roomJID
self.setNick(nick)
self.roster = {}
def setNick(self, nick):
self.occupantJID = jid.internJID(u"%s/%s" % (self.roomJID, nick))
self.nick = nick
def addUser(self, user):
"""
Add a user to the room roster.
@param user: The user object that is being added to the room.
@type user: L{User}
"""
self.roster[user.nick] = user
def inRoster(self, user):
"""
Check if a user is in the MUC room.
@param user: The user object to check.
@type user: L{User}
"""
return user.nick in self.roster
def getUser(self, nick):
"""
Get a user from the room's roster.
@param nick: The nick for the user in the MUC room.
@type nick: C{unicode}
"""
return self.roster.get(nick)
def removeUser(self, user):
"""
Remove a user from the MUC room's roster.
@param user: The user object to check.
@type user: L{User}
"""
if self.inRoster(user):
del self.roster[user.nick]
class MUCClient(MUCClientProtocol):
"""
Multi-User Chat client protocol.
This is a subclass of L{XMPPHandler} and implements L{IMUCClient}.
@ivar _rooms: Collection of occupied rooms, keyed by the bare JID of the
room. Note that a particular entity can only join a room once
at a time.
@type _rooms: C{dict}
"""
implements(IMUCClient)
def __init__(self, reactor=None):
MUCClientProtocol.__init__(self, reactor)
self._rooms = {}
def _addRoom(self, room):
"""
Add a room to the room collection.
Rooms are stored by the JID of the room itself. I.e. it uses the Room
ID and service parts of the Room JID.
@note: An entity can only join a particular room once.
"""
roomJID = room.occupantJID.userhostJID()
self._rooms[roomJID] = room
def _getRoom(self, roomJID):
"""
Grab a room from the room collection.
This uses the Room ID and service parts of the given JID to look up
the L{Room} instance associated with it.
@type occupantJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
return self._rooms.get(roomJID)
def _removeRoom(self, roomJID):
"""
Delete a room from the room collection.
"""
if roomJID in self._rooms:
del self._rooms[roomJID]
def _getRoomUser(self, stanza):
"""
Lookup the room and user associated with the stanza's sender.
"""
occupantJID = stanza.sender
if not occupantJID:
return None, None
# when a user leaves a room we need to update it
room = self._getRoom(occupantJID.userhostJID())
if room is None:
# not in the room yet
return None, None
# Check if user is in roster
nick = occupantJID.resource
user = room.getUser(nick)
return room, user
def unavailableReceived(self, presence):
"""
Unavailable presence was received.
If this was received from a MUC room occupant JID, that occupant has
left the room.
"""
room, user = self._getRoomUser(presence)
if room is None or user is None:
return
room.removeUser(user)
self.userLeftRoom(room, user)
def availableReceived(self, presence):
"""
Available presence was received.
"""
room, user = self._getRoomUser(presence)
if room is None:
return
if user is None:
nick = presence.sender.resource
user = User(nick, presence.entity)
# Update user status
user.status = presence.status
user.show = presence.show
if room.inRoster(user):
self.userUpdatedStatus(room, user, presence.show, presence.status)
else:
room.addUser(user)
self.userJoinedRoom(room, user)
def groupChatReceived(self, message):
"""
A group chat message has been received from a MUC room.
There are a few event methods that may get called here.
L{receivedGroupChat}, L{receivedSubject} or L{receivedHistory}.
"""
room, user = self._getRoomUser(message)
if room is None:
return
if message.subject:
self.receivedSubject(room, user, message.subject)
elif message.delay is None:
self.receivedGroupChat(room, user, message)
else:
self.receivedHistory(room, user, message)
def userJoinedRoom(self, room, user):
"""
User has joined a MUC room.
This method will need to be modified inorder for clients to
do something when this event occurs.
@param room: The room the user has joined.
@type room: L{Room}
@param user: The user that joined the MUC room.
@type user: L{User}
"""
pass
def userLeftRoom(self, room, user):
"""
User has left a room.
This method will need to be modified inorder for clients to
do something when this event occurs.
@param room: The room the user has joined.
@type room: L{Room}
@param user: The user that left the MUC room.
@type user: L{User}
"""
pass
def userUpdatedStatus(self, room, user, show, status):
"""
User Presence has been received.
This method will need to be modified inorder for clients to
do something when this event occurs.
"""
pass
def receivedSubject(self, room, user, subject):
"""
A (new) room subject has been received.
This method will need to be modified inorder for clients to
do something when this event occurs.
"""
pass
def receivedGroupChat(self, room, user, message):
"""
A groupchat message was received.
@param room: The room the message was received from.
@type room: L{Room}
@param user: The user that sent the message, or C{None} if it was a
message from the room itself.
@type user: L{User}
@param message: The message.
@type message: L{GroupChat}
"""
pass
def receivedHistory(self, room, user, message):
"""
A groupchat message from the room's discussion history was received.
This is identical to L{receivedGroupChat}, with the delayed delivery
information (timestamp and original sender) in C{message.delay}. For
anonymous rooms, C{message.delay.sender} is the room's address.
@param room: The room the message was received from.
@type room: L{Room}
@param user: The user that sent the message, or C{None} if it was a
message from the room itself.
@type user: L{User}
@param message: The message.
@type message: L{GroupChat}
"""
pass
def join(self, roomJID, nick, historyOptions=None,
password=None):
"""
Join a MUC room by sending presence to it.
@param roomJID: The JID of the room the entity is joining.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The nick name for the entitity joining the room.
@type nick: C{unicode}
@param historyOptions: Options for conversation history sent by the
room upon joining.
@type historyOptions: L{HistoryOptions}
@param password: Optional password for the room.
@type password: C{unicode}
@return: A deferred that fires with the room when the entity is in the
room, or with a failure if an error has occurred.
"""
def cb(presence):
"""
We have presence that says we joined a room.
"""
if STATUS_CODE.ROOM_CREATED in presence.mucStatuses:
room.locked = True
return room
def eb(failure):
self._removeRoom(roomJID)
return failure
room = Room(roomJID, nick)
self._addRoom(room)
d = MUCClientProtocol.join(self, roomJID, nick, historyOptions,
password)
d.addCallbacks(cb, eb)
return d
def nick(self, roomJID, nick):
"""
Change an entity's nick name in a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#changenick
@param roomJID: The JID of the room, i.e. without a resource.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The new nick name within the room.
@type nick: C{unicode}
"""
def cb(presence):
# Presence confirmation, change the nickname.
room.setNick(nick)
return room
room = self._getRoom(roomJID)
d = MUCClientProtocol.nick(self, roomJID, nick)
d.addCallback(cb)
return d
def leave(self, roomJID):
"""
Leave a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#exit
@param roomJID: The Room JID of the room to leave.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
def cb(presence):
self._removeRoom(roomJID)
d = MUCClientProtocol.leave(self, roomJID)
d.addCallback(cb)
return d
def status(self, roomJID, show=None, status=None):
"""
Change user status.
See: http://xmpp.org/extensions/xep-0045.html#changepres
@param roomJID: The Room JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param show: The availability of the entity. Common values are xa,
available, etc
@type show: C{unicode}
@param status: The current status of the entity.
@type status: C{unicode}
"""
room = self._getRoom(roomJID)
d = MUCClientProtocol.status(self, roomJID, show, status)
d.addCallback(lambda _: room)
return d
def destroy(self, roomJID, reason=None, alternate=None, password=None):
"""
Destroy a room.
@param roomJID: The JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param reason: The reason for the destruction of the room.
@type reason: C{unicode}
@param alternate: The JID of the room suggested as an alternate venue.
@type alternate: L{JID<twisted.words.protocols.jabber.jid.JID>}
"""
def destroyed(iq):
self._removeRoom(roomJID)
d = MUCClientProtocol.destroy(self, roomJID, reason, alternate)
d.addCallback(destroyed)
return d
| 29.234124 | 79 | 0.607447 | [
"MIT"
] | Gandi/wokkel | wokkel/muc.py | 45,576 | Python |
#!/usr/bin/env python3
"""
Copyright Google Inc. 2019
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import apache_beam as beam
import numpy as np
import argparse, logging
def handle_ints(ints, startpos=0):
if ints[startpos] == 99:
return ints
x1 = ints[startpos+1]
x2 = ints[startpos+2]
outpos = ints[startpos+3]
if ints[startpos] == 1:
ints[outpos] = ints[x1] + ints[x2]
elif ints[startpos] == 2:
ints[outpos] = ints[x1] * ints[x2]
return handle_ints(ints, startpos+4)
def handle_intcode(intcode):
input = [int(x) for x in intcode.split(',')]
output = handle_ints(input)
return ','.join([str(x) for x in output])
def run_1202(intcode):
input = [int(x) for x in intcode.split(',')]
input[1] = 12
input[2] = 2
output = handle_ints(input)
return output[0]
def try_working():
assert handle_intcode('1,0,0,0,99') == '2,0,0,0,99'
assert handle_intcode('2,3,0,3,99') == '2,3,0,6,99'
assert handle_intcode('2,4,4,5,99,0') == '2,4,4,5,99,9801'
assert handle_intcode('1,1,1,4,99,5,6,0,99') == '30,1,1,4,2,5,6,0,99'
print('Assertions passed')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Solutions to https://adventofcode.com/2019/ using Apache Beam')
parser.add_argument('--input', required=True, help='Specify input file')
parser.add_argument('--output', required=True, help='Specify output file')
options = parser.parse_args()
runner = 'DirectRunner' # run Beam on local machine, but write outputs to cloud
logging.basicConfig(level=getattr(logging, 'INFO', None))
opts = beam.pipeline.PipelineOptions(flags=[])
p = beam.Pipeline(runner, options=opts)
(p
| 'read' >> beam.io.textio.ReadFromText(options.input)
| 'run_1202' >> beam.Map(run_1202)
| 'output' >> beam.io.textio.WriteToText(options.output)
)
job = p.run()
if runner == 'DirectRunner':
job.wait_until_finish()
| 34.15493 | 112 | 0.684536 | [
"Apache-2.0"
] | 16D070061/training-data-analyst | blogs/beamadvent/day2a.py | 2,425 | Python |
# coding: utf-8
"""
Velo Payments APIs
## Terms and Definitions Throughout this document and the Velo platform the following terms are used: * **Payor.** An entity (typically a corporation) which wishes to pay funds to one or more payees via a payout. * **Payee.** The recipient of funds paid out by a payor. * **Payment.** A single transfer of funds from a payor to a payee. * **Payout.** A batch of Payments, typically used by a payor to logically group payments (e.g. by business day). Technically there need be no relationship between the payments in a payout - a single payout can contain payments to multiple payees and/or multiple payments to a single payee. * **Sandbox.** An integration environment provided by Velo Payments which offers a similar API experience to the production environment, but all funding and payment events are simulated, along with many other services such as OFAC sanctions list checking. ## Overview The Velo Payments API allows a payor to perform a number of operations. The following is a list of the main capabilities in a natural order of execution: * Authenticate with the Velo platform * Maintain a collection of payees * Query the payor’s current balance of funds within the platform and perform additional funding * Issue payments to payees * Query the platform for a history of those payments This document describes the main concepts and APIs required to get up and running with the Velo Payments platform. It is not an exhaustive API reference. For that, please see the separate Velo Payments API Reference. ## API Considerations The Velo Payments API is REST based and uses the JSON format for requests and responses. Most calls are secured using OAuth 2 security and require a valid authentication access token for successful operation. See the Authentication section for details. Where a dynamic value is required in the examples below, the {token} format is used, suggesting that the caller needs to supply the appropriate value of the token in question (without including the { or } characters). Where curl examples are given, the –d @filename.json approach is used, indicating that the request body should be placed into a file named filename.json in the current directory. Each of the curl examples in this document should be considered a single line on the command-line, regardless of how they appear in print. ## Authenticating with the Velo Platform Once Velo backoffice staff have added your organization as a payor within the Velo platform sandbox, they will create you a payor Id, an API key and an API secret and share these with you in a secure manner. You will need to use these values to authenticate with the Velo platform in order to gain access to the APIs. The steps to take are explained in the following: create a string comprising the API key (e.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8) and API secret (e.g. c396b26b-137a-44fd-87f5-34631f8fd529) with a colon between them. E.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8:c396b26b-137a-44fd-87f5-34631f8fd529 base64 encode this string. E.g.: NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== create an HTTP **Authorization** header with the value set to e.g. Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== perform the Velo authentication REST call using the HTTP header created above e.g. via curl: ``` curl -X POST \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ==\" \\ 'https://api.sandbox.velopayments.com/v1/authenticate?grant_type=client_credentials' ``` If successful, this call will result in a **200** HTTP status code and a response body such as: ``` { \"access_token\":\"19f6bafd-93fd-4747-b229-00507bbc991f\", \"token_type\":\"bearer\", \"expires_in\":1799, \"scope\":\"...\" } ``` ## API access following authentication Following successful authentication, the value of the access_token field in the response (indicated in green above) should then be presented with all subsequent API calls to allow the Velo platform to validate that the caller is authenticated. This is achieved by setting the HTTP Authorization header with the value set to e.g. Bearer 19f6bafd-93fd-4747-b229-00507bbc991f such as the curl example below: ``` -H \"Authorization: Bearer 19f6bafd-93fd-4747-b229-00507bbc991f \" ``` If you make other Velo API calls which require authorization but the Authorization header is missing or invalid then you will get a **401** HTTP status response. # noqa: E501
The version of the OpenAPI document: 2.26.124
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import velo_payments
from velo_payments.models.ping import Ping # noqa: E501
from velo_payments.rest import ApiException
class TestPing(unittest.TestCase):
"""Ping unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPing(self):
"""Test Ping"""
# FIXME: construct object with mandatory attributes with example values
# model = velo_payments.models.ping.Ping() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 134.475 | 4,651 | 0.770961 | [
"Apache-2.0"
] | velopaymentsapi/velo-python | test/test_ping.py | 5,383 | Python |
class Argument(object):
def __init__(self, argument = None, base: bool = False):
self.arg = argument
self.is_base = base
def __repr__(self):
return self.arg
def __str__(self):
return self.arg
def is_pipe(self):
return self.arg == ">>" or self.arg == "<<"
| 21 | 60 | 0.571429 | [
"MIT"
] | ii-Python/Sprint-v2 | sprint/core/parser/args.py | 315 | Python |
"""
Cubic spline planner
Author: Atsushi Sakai(@Atsushi_twi)
"""
import math
import numpy as np
import bisect
class Spline:
"""
Cubic Spline class
"""
def __init__(self, x, y):
self.b, self.c, self.d, self.w = [], [], [], []
self.x = x
self.y = y
self.nx = len(x) # dimension of x
h = np.diff(x)
# calc coefficient c
self.a = [iy for iy in y]
# calc coefficient c
A = self.__calc_A(h)
B = self.__calc_B(h)
self.c = np.linalg.solve(A, B)
# print(self.c1)
# calc spline coefficient b and d
for i in range(self.nx - 1):
self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \
(self.c[i + 1] + 2.0 * self.c[i]) / 3.0
self.b.append(tb)
def calc(self, t):
"""
Calc position
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return result
def calcd(self, t):
"""
Calc first derivative
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0
return result
def calcdd(self, t):
"""
Calc second derivative
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx
return result
def __search_index(self, x):
"""
search data segment index
"""
return bisect.bisect(self.x, x) - 1
def __calc_A(self, h):
"""
calc matrix A for spline coefficient c
"""
A = np.zeros((self.nx, self.nx))
A[0, 0] = 1.0
for i in range(self.nx - 1):
if i != (self.nx - 2):
A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])
A[i + 1, i] = h[i]
A[i, i + 1] = h[i]
A[0, 1] = 0.0
A[self.nx - 1, self.nx - 2] = 0.0
A[self.nx - 1, self.nx - 1] = 1.0
# print(A)
return A
def __calc_B(self, h):
"""
calc matrix B for spline coefficient c
"""
B = np.zeros(self.nx)
for i in range(self.nx - 2):
B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \
h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]
return B
class Spline2D:
"""
2D Cubic Spline class
"""
def __init__(self, x, y):
self.s = self.__calc_s(x, y)
self.sx = Spline(self.s, x)
self.sy = Spline(self.s, y)
def __calc_s(self, x, y):
dx = np.diff(x)
dy = np.diff(y)
self.ds = np.hypot(dx, dy)
s = [0]
s.extend(np.cumsum(self.ds))
return s
def calc_position(self, s):
"""
calc position
"""
x = self.sx.calc(s)
y = self.sy.calc(s)
return x, y
def calc_curvature(self, s):
"""
calc curvature
"""
dx = self.sx.calcd(s)
ddx = self.sx.calcdd(s)
dy = self.sy.calcd(s)
ddy = self.sy.calcdd(s)
k = (ddy * dx - ddx * dy) / ((dx ** 2 + dy ** 2)**(3 / 2))
return k
def calc_yaw(self, s):
"""
calc yaw
"""
dx = self.sx.calcd(s)
dy = self.sy.calcd(s)
yaw = math.atan2(dy, dx)
return yaw
def calc_spline_course(x, y, ds=0.1):
sp = Spline2D(x, y)
s = list(np.arange(0, sp.s[-1], ds))
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = sp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(sp.calc_yaw(i_s))
rk.append(sp.calc_curvature(i_s))
return rx, ry, ryaw, rk, s
def main(): # pragma: no cover
print("Spline 2D test")
import matplotlib.pyplot as plt
x = [-2.5, 0.0, 2.5, 5.0, 7.5, 3.0, -1.0]
y = [0.7, -6, 5, 6.5, 0.0, 5.0, -2.0]
ds = 0.1 # [m] distance of each interpolated points
sp = Spline2D(x, y)
s = np.arange(0, sp.s[-1], ds)
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = sp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(sp.calc_yaw(i_s))
rk.append(sp.calc_curvature(i_s))
plt.plot(rx,ry)
plt.show()
plt.close()
plt.subplots(1)
plt.plot(x, y, "xb", label="input")
plt.plot(rx, ry, "-r", label="spline")
plt.grid(True)
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
plt.subplots(1)
plt.plot(s, [np.rad2deg(iyaw) for iyaw in ryaw], "-r", label="yaw")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("yaw angle[deg]")
plt.subplots(1)
plt.plot(s, rk, "-r", label="curvature")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("curvature [1/m]")
plt.show()
if __name__ == '__main__':
main()
| 23.400844 | 79 | 0.462495 | [
"Apache-2.0"
] | hadleyhzy34/mpc_python_traj | cubic_spline_planner.py | 5,546 | Python |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def f(x):
return 1-x
data=pd.read_csv("test.csv")
print(data)
roll=data["Rollno"]
t1 =data["t1"]
t2 = data["t2"]
print(roll,t1,t2)
plt.pie(t1,labels=roll,autopct="%1.2f%%")
plt.title("Marks in test1")
plt.show()
plt.pie(t2,labels=roll,autopct="%1.2f%%")
plt.title("Marks in test2")
plt.show()
data["t2-t1"]=data["t2"]-data["t1"]
print(data)
plt.title("Marks in test1")
benefit=0
notbenefit=0
for i in data['t2-t1']:
if i>0:
benefit +=1
else:
notbenefit +=1
print(benefit,notbenefit)
plt.pie([benefit,notbenefit],labels=["Benefitted","Not Benefitted"],autopct="%1.2f%%",explode=[0.1,0.1])
plt.title("Deciding")
plt.show()
range=["0-15","15-18","18-21","21-23","23-26"]
n = [0,0,0,0,0]
for i in data["t1"]:
if i < 15:
n[0] += 1
elif i < 18:
n[1] += 1
elif i < 21:
n[2] += 1
elif i < 23:
n[3] += 1
elif i < 26:
n[4] += 1
plt.pie(n,labels=range,autopct="%1.2f%%")
plt.show()
x = np.linspace(0,1,100)
plt.plot(x,f(x),color="red")
plt.xlim(0,1)
plt.ylim(0,1)
plt.title("happening Vs Not happening")
plt.show()
| 20.821429 | 104 | 0.596913 | [
"MIT"
] | Maurya232Abhishek/Python-repository-for-basics | ML/Graph/pieChart2.py | 1,166 | Python |
#!/usr/bin/env python3
## Copyright 2021 Aon plc
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from argparse import ArgumentParser, Namespace
import json
import logging
from pathlib import Path
from typing import Any, Dict
from libcsce.error import CobaltStrikeError
from libcsce.parser import CobaltStrikeConfigParser
from libcsce.utils import JSONEncoderWithBinarySupport
__version__ = "0.1.0"
logger = logging.getLogger("csce")
def csce(args: Namespace):
"""Parse configuration options from Cobalt Strike Beacon."""
if not args.source.is_file():
logger.error("Source path does not exist or is not file")
return 1
if args.cs_version:
version_list = [args.cs_version]
else:
version_list = list(CobaltStrikeConfigParser.SUPPORTED_VERSIONS)
config: Dict[str, Any] = dict()
for version in version_list:
with CobaltStrikeConfigParser(args.source, version) as parser:
try:
config = parser.parse_config()
break
except CobaltStrikeError:
pass
print(
json.dumps(
config,
indent=(2 if args.pretty else None),
cls=JSONEncoderWithBinarySupport,
)
)
return 0
def gen_command_parser() -> ArgumentParser:
parser = ArgumentParser(
description="Parse Cobalt Strike beacon configuration from PE file or memory dump."
)
parser.add_argument(
"-V", "--version", action="version", version=f"%(prog)s {__version__}"
)
parser.add_argument(
"--pretty", action="store_true", help="Pretty-print JSON output", dest="pretty"
)
parser.add_argument(
"-v",
"--cs-version",
type=int,
choices=CobaltStrikeConfigParser.SUPPORTED_VERSIONS,
help="Cobalt Strike version. If not specified, will try all supported versions",
dest="cs_version",
)
parser.add_argument("source", type=Path, help="Path to PE file or memory dump")
parser.set_defaults(func=csce)
return parser
def main() -> int:
parser = gen_command_parser()
args = parser.parse_args()
return args.func(args)
if __name__ == "__main__":
main()
| 29.576087 | 91 | 0.674752 | [
"Apache-2.0"
] | strozfriedberg/cobaltstrike-config-extractor | libcsce/bin/csce.py | 2,721 | Python |
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from azureml.core.run import Run
import os
import argparse
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import joblib
import json
def train_model(run, data, alpha):
run.log("alpha", alpha)
run.parent.log("alpha", alpha)
reg = Ridge(alpha=alpha)
reg.fit(data["train"]["X"], data["train"]["y"])
preds = reg.predict(data["test"]["X"])
run.log("mse", mean_squared_error(
preds, data["test"]["y"]), description="Mean squared error metric")
run.parent.log("mse", mean_squared_error(
preds, data["test"]["y"]), description="Mean squared error metric")
return reg
def main():
print("Running train.py")
parser = argparse.ArgumentParser("train")
parser.add_argument(
"--build_id",
type=str,
help="The build ID of the build triggering this pipeline run",
)
parser.add_argument(
"--model_name",
type=str,
help="Name of the Model",
default="sklearn_regression_model.pkl",
)
parser.add_argument(
"--step_output",
type=str,
help=("output for passing data to next step")
)
args = parser.parse_args()
print("Argument [build_id]: %s" % args.build_id)
print("Argument [model_name]: %s" % args.model_name)
print("Argument [step_output]: %s" % args.step_output)
model_name = args.model_name
build_id = args.build_id
step_output_path = args.step_output
print("Getting training parameters")
alpha = 0.5
print("Parameter alpha: %s" % alpha)
run = Run.get_context()
# Get the dataset
dataset = run.input_datasets['training_data']
if (dataset):
df = dataset.to_pandas_dataframe()
X = df.values
y = df.Y
else:
e = ("No dataset provided")
print(e)
raise Exception(e)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
data = {"train": {"X": X_train, "y": y_train},
"test": {"X": X_test, "y": y_test}}
reg = train_model(run, data, alpha)
# Pass model file to next step
os.makedirs(step_output_path, exist_ok=True)
model_output_path = os.path.join(step_output_path, model_name)
joblib.dump(value=reg, filename=model_output_path)
# Also upload model file to run outputs for history
os.makedirs('outputs', exist_ok=True)
output_path = os.path.join('outputs', model_name)
joblib.dump(value=reg, filename=output_path)
# Add properties to identify this specific training run
run.parent.tag("BuildId", value=build_id)
run.tag("BuildId", value=build_id)
run.tag("run_type", value="train")
builduri_base = os.environ.get("BUILDURI_BASE")
if (builduri_base is not None):
build_uri = builduri_base + build_id
run.tag("BuildUri", value=build_uri)
run.parent.tag("BuildUri", value=build_uri)
print(f"tags now present for run: {run.tags}")
run.complete()
if __name__ == '__main__':
main() | 34.666667 | 79 | 0.696897 | [
"MIT"
] | Voda88/EasyAzureMLOPS | scripts/train_model.py | 4,592 | Python |
"""This module provides functionality for wrapping key infrastructure components
from distutils and setuptools.
"""
from __future__ import print_function
import argparse
import copy
import json
import os
import os.path
import platform
import stat
import sys
import warnings
from contextlib import contextmanager
# pylint: disable-next=wrong-import-order
from distutils.errors import DistutilsArgError, DistutilsError, DistutilsGetoptError
from glob import glob
from shutil import copyfile, copymode
# Must be imported before distutils
import setuptools
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
if sys.version_info >= (3, 3):
from shutil import which
else:
from .compat import which
from packaging.requirements import Requirement
from packaging.version import parse as parse_version
from setuptools.dist import Distribution as upstream_Distribution
from . import cmaker
from .command import (
bdist,
bdist_wheel,
build,
build_ext,
build_py,
clean,
egg_info,
generate_source_manifest,
install,
install_lib,
install_scripts,
sdist,
test,
)
from .constants import (
CMAKE_DEFAULT_EXECUTABLE,
CMAKE_INSTALL_DIR,
CMAKE_SPEC_FILE,
set_skbuild_plat_name,
skbuild_plat_name,
)
from .exceptions import SKBuildError, SKBuildGeneratorNotFoundError
from .utils import (
PythonModuleFinder,
mkdir_p,
parse_manifestin,
to_platform_path,
to_unix_path,
)
def create_skbuild_argparser():
"""Create and return a scikit-build argument parser."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"--build-type", default="Release", metavar="", help="specify the CMake build type (e.g. Debug or Release)"
)
parser.add_argument("-G", "--generator", metavar="", help="specify the CMake build system generator")
parser.add_argument("-j", metavar="N", type=int, dest="jobs", help="allow N build jobs at once")
parser.add_argument("--cmake-executable", default=None, metavar="", help="specify the path to the cmake executable")
parser.add_argument(
"--install-target",
default=None,
metavar="",
help="specify the CMake target performing the install. " "If not provided, uses the target ``install``",
)
parser.add_argument(
"--skip-generator-test",
action="store_true",
help="skip generator test when a generator is explicitly selected using --generator",
)
return parser
def _is_cmake_configure_argument(arg):
"""Return True if ``arg`` is a relevant argument to pass to cmake when configuring a project."""
for cmake_arg in (
"-C", # initial-cache
"-D", # <var>[:<type>]=<value>
):
if arg.startswith(cmake_arg):
return True
return False
def parse_skbuild_args(args, cmake_args, build_tool_args):
"""
Parse arguments in the scikit-build argument set. Convert specified
arguments to proper format and append to cmake_args and build_tool_args.
Returns the tuple ``(remaining arguments, cmake executable, skip_generator_test)``.
"""
parser = create_skbuild_argparser()
# Consider CMake arguments passed as global setuptools options
cmake_args.extend([arg for arg in args if _is_cmake_configure_argument(arg)])
# ... and remove them from the list
args = [arg for arg in args if not _is_cmake_configure_argument(arg)]
namespace, remaining_args = parser.parse_known_args(args)
# Construct CMake argument list
cmake_args.append("-DCMAKE_BUILD_TYPE:STRING=" + namespace.build_type)
if namespace.generator is not None:
cmake_args.extend(["-G", namespace.generator])
# Construct build tool argument list
build_tool_args.extend(["--config", namespace.build_type])
if namespace.jobs is not None:
build_tool_args.extend(["-j", str(namespace.jobs)])
if namespace.install_target is not None:
build_tool_args.extend(["--install-target", namespace.install_target])
if namespace.generator is None and namespace.skip_generator_test is True:
sys.exit("ERROR: Specifying --skip-generator-test requires --generator to also be specified.")
return remaining_args, namespace.cmake_executable, namespace.skip_generator_test
def parse_args():
"""This function parses the command-line arguments ``sys.argv`` and returns
the tuple ``(setuptools_args, cmake_executable, skip_generator_test, cmake_args, build_tool_args)``
where each ``*_args`` element corresponds to a set of arguments separated by ``--``."""
dutils = []
cmake = []
make = []
argsets = [dutils, cmake, make]
i = 0
separator = "--"
for arg in sys.argv:
if arg == separator:
i += 1
if i >= len(argsets):
sys.exit(
'ERROR: Too many "{}" separators provided '
"(expected at most {}).".format(separator, len(argsets) - 1)
)
else:
argsets[i].append(arg)
dutils, cmake_executable, skip_generator_test = parse_skbuild_args(dutils, cmake, make)
return dutils, cmake_executable, skip_generator_test, cmake, make
@contextmanager
def _capture_output():
oldout, olderr = sys.stdout, sys.stderr
try:
out = [StringIO(), StringIO()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
def _parse_setuptools_arguments(setup_attrs):
"""This function instantiates a Distribution object and
parses the command line arguments.
It returns the tuple ``(display_only, help_commands, commands, hide_listing, force_cmake, skip_cmake, plat_name)``
where
- display_only is a boolean indicating if an argument like '--help',
'--help-commands' or '--author' was passed.
- help_commands is a boolean indicating if argument '--help-commands'
was passed.
- commands contains the list of commands that were passed.
- hide_listing is a boolean indicating if the list of files being included
in the distribution is displayed or not.
- force_cmake a boolean indicating that CMake should always be executed.
- skip_cmake is a boolean indicating if the execution of CMake should
explicitly be skipped.
- plat_name is a string identifying the platform name to embed in generated
filenames. It defaults to :func:`skbuild.constants.skbuild_plat_name()`.
- build_ext_inplace is a boolean indicating if ``build_ext`` command was
specified along with the --inplace argument.
Otherwise it raises DistutilsArgError exception if there are
any error on the command-line, and it raises DistutilsGetoptError
if there any error in the command 'options' attribute.
The code has been adapted from the setup() function available
in distutils/core.py.
"""
setup_attrs = dict(setup_attrs)
setup_attrs["script_name"] = os.path.basename(sys.argv[0])
dist = upstream_Distribution(setup_attrs)
# Update class attribute to also ensure the argument is processed
# when ``setuptools.setup`` is called.
upstream_Distribution.global_options.extend(
[
("hide-listing", None, "do not display list of files being " "included in the distribution"),
("force-cmake", None, "always run CMake"),
("skip-cmake", None, "do not run CMake"),
]
)
# Find and parse the config file(s): they will override options from
# the setup script, but be overridden by the command line.
dist.parse_config_files()
# Parse the command line and override config files; any
# command-line errors are the end user's fault, so turn them into
# SystemExit to suppress tracebacks.
with _capture_output():
result = dist.parse_command_line()
display_only = not result
if not hasattr(dist, "hide_listing"):
dist.hide_listing = False
if not hasattr(dist, "force_cmake"):
dist.force_cmake = False
if not hasattr(dist, "skip_cmake"):
dist.skip_cmake = False
plat_names = set()
for cmd in [dist.get_command_obj(command) for command in dist.commands]:
if getattr(cmd, "plat_name", None) is not None:
plat_names.add(cmd.plat_name)
if not plat_names:
plat_names.add(None)
elif len(plat_names) > 1:
raise SKBuildError("--plat-name is ambiguous: %s" % ", ".join(plat_names))
plat_name = list(plat_names)[0]
build_ext_inplace = dist.get_command_obj("build_ext").inplace
return (
display_only,
dist.help_commands,
dist.commands,
dist.hide_listing,
dist.force_cmake,
dist.skip_cmake,
plat_name,
build_ext_inplace,
)
def _check_skbuild_parameters(skbuild_kw):
cmake_install_dir = skbuild_kw["cmake_install_dir"]
if os.path.isabs(cmake_install_dir):
raise SKBuildError(
(
"\n setup parameter 'cmake_install_dir' is set to "
"an absolute path. A relative path is expected.\n"
" Project Root : {}\n"
" CMake Install Directory: {}\n"
).format(os.getcwd(), cmake_install_dir)
)
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if not os.path.exists(os.path.abspath(cmake_source_dir)):
raise SKBuildError(
(
"\n setup parameter 'cmake_source_dir' set to "
"a nonexistent directory.\n"
" Project Root : {}\n"
" CMake Source Directory: {}\n"
).format(os.getcwd(), cmake_source_dir)
)
def strip_package(package_parts, module_file):
"""Given ``package_parts`` (e.g. ``['foo', 'bar']``) and a
``module_file`` (e.g. ``foo/bar/jaz/rock/roll.py``), starting
from the left, this function will strip the parts of the path
matching the package parts and return a new string
(e.g ``jaz/rock/roll.py``).
The function will work as expected for either Windows or Unix-style
``module_file`` and this independently of the platform.
"""
if not package_parts or os.path.isabs(module_file):
return module_file
package = "/".join(package_parts)
module_dir = os.path.dirname(module_file.replace("\\", "/"))
module_dir = module_dir[: len(package)]
return module_file[len(package) + 1 :] if package != "" and module_dir.startswith(package) else module_file
def _package_data_contain_module(module, package_data):
"""Return True if the ``module`` is contained
in the ``package_data``.
``module`` is a tuple of the form
``(package, modulename, module_file)``.
"""
(package, _, module_file) = module
if package not in package_data:
return False
# We need to strip the package because a module entry
# usually looks like this:
#
# ('foo.bar', 'module', 'foo/bar/module.py')
#
# and the entry in package_data would look like this:
#
# {'foo.bar' : ['module.py']}
if strip_package(package.split("."), module_file) in package_data[package]:
return True
return False
def _should_run_cmake(commands, cmake_with_sdist):
"""Return True if at least one command requiring ``cmake`` to run
is found in ``commands``."""
for expected_command in [
"build",
"build_ext",
"develop",
"install",
"install_lib",
"bdist",
"bdist_dumb",
"bdist_egg",
"bdist_rpm",
"bdist_wininst",
"bdist_wheel",
"test",
]:
if expected_command in commands:
return True
if "sdist" in commands and cmake_with_sdist:
return True
return False
def _save_cmake_spec(args):
"""Save the CMake spec to disk"""
# We use JSON here because readability is more important than performance
try:
os.makedirs(os.path.dirname(CMAKE_SPEC_FILE()))
except OSError:
pass
with open(CMAKE_SPEC_FILE(), "w+") as fp:
json.dump(args, fp)
def _load_cmake_spec():
"""Load and return the CMake spec from disk"""
try:
with open(CMAKE_SPEC_FILE()) as fp:
return json.load(fp)
except (OSError, IOError, ValueError):
return None
# pylint:disable=too-many-locals, too-many-branches
def setup(*args, **kw): # noqa: C901
"""This function wraps setup() so that we can run cmake, make,
CMake build, then proceed as usual with setuptools, appending the
CMake-generated output as necessary.
The CMake project is re-configured only if needed. This is achieved by (1) retrieving the environment mapping
associated with the generator set in the ``CMakeCache.txt`` file, (2) saving the CMake configure arguments and
version in :func:`skbuild.constants.CMAKE_SPEC_FILE()`: and (3) re-configuring only if either the generator or
the CMake specs change.
"""
# If any, strip ending slash from each package directory
# Regular setuptools does not support this
# TODO: will become an error in the future
if "package_dir" in kw:
for package, prefix in kw["package_dir"].items():
if prefix.endswith("/"):
msg = "package_dir={{{!r}: {!r}}} ends with a trailing slash, which is not supported by setuptools.".format(
package, prefix
)
warnings.warn(msg, FutureWarning, stacklevel=2)
kw["package_dir"][package] = prefix[:-1]
sys.argv, cmake_executable, skip_generator_test, cmake_args, make_args = parse_args()
# work around https://bugs.python.org/issue1011113
# (patches provided, but no updates since 2014)
cmdclass = kw.get("cmdclass", {})
cmdclass["build"] = cmdclass.get("build", build.build)
cmdclass["build_py"] = cmdclass.get("build_py", build_py.build_py)
cmdclass["build_ext"] = cmdclass.get("build_ext", build_ext.build_ext)
cmdclass["install"] = cmdclass.get("install", install.install)
cmdclass["install_lib"] = cmdclass.get("install_lib", install_lib.install_lib)
cmdclass["install_scripts"] = cmdclass.get("install_scripts", install_scripts.install_scripts)
cmdclass["clean"] = cmdclass.get("clean", clean.clean)
cmdclass["sdist"] = cmdclass.get("sdist", sdist.sdist)
cmdclass["bdist"] = cmdclass.get("bdist", bdist.bdist)
cmdclass["bdist_wheel"] = cmdclass.get("bdist_wheel", bdist_wheel.bdist_wheel)
cmdclass["egg_info"] = cmdclass.get("egg_info", egg_info.egg_info)
cmdclass["generate_source_manifest"] = cmdclass.get(
"generate_source_manifest", generate_source_manifest.generate_source_manifest
)
cmdclass["test"] = cmdclass.get("test", test.test)
kw["cmdclass"] = cmdclass
# Extract setup keywords specific to scikit-build and remove them from kw.
# Removing the keyword from kw need to be done here otherwise, the
# following call to _parse_setuptools_arguments would complain about
# unknown setup options.
parameters = {
"cmake_args": [],
"cmake_install_dir": "",
"cmake_source_dir": "",
"cmake_with_sdist": False,
"cmake_languages": ("C", "CXX"),
"cmake_minimum_required_version": None,
"cmake_process_manifest_hook": None,
"cmake_install_target": "install",
}
skbuild_kw = {param: kw.pop(param, value) for param, value in parameters.items()}
# ... and validate them
try:
_check_skbuild_parameters(skbuild_kw)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# Convert source dir to a path relative to the root
# of the project
cmake_source_dir = skbuild_kw["cmake_source_dir"]
if cmake_source_dir == ".":
cmake_source_dir = ""
if os.path.isabs(cmake_source_dir):
cmake_source_dir = os.path.relpath(cmake_source_dir)
# Skip running CMake in the following cases:
# * flag "--skip-cmake" is provided
# * "display only" argument is provided (e.g '--help', '--author', ...)
# * no command-line arguments or invalid ones are provided
# * no command requiring cmake is provided
# * no CMakeLists.txt if found
display_only = has_invalid_arguments = help_commands = False
force_cmake = skip_cmake = False
commands = []
try:
(
display_only,
help_commands,
commands,
hide_listing,
force_cmake,
skip_cmake,
plat_name,
build_ext_inplace,
) = _parse_setuptools_arguments(kw)
except (DistutilsArgError, DistutilsGetoptError):
has_invalid_arguments = True
has_cmakelists = os.path.exists(os.path.join(cmake_source_dir, "CMakeLists.txt"))
if not has_cmakelists:
print("skipping skbuild (no CMakeLists.txt found)")
skip_skbuild = (
display_only
or has_invalid_arguments
or not _should_run_cmake(commands, skbuild_kw["cmake_with_sdist"])
or not has_cmakelists
)
if skip_skbuild and not force_cmake:
if help_commands:
# Prepend scikit-build help. Generate option descriptions using
# argparse.
skbuild_parser = create_skbuild_argparser()
arg_descriptions = [line for line in skbuild_parser.format_help().split("\n") if line.startswith(" ")]
print("scikit-build options:")
print("\n".join(arg_descriptions))
print("")
print('Arguments following a "--" are passed directly to CMake ' "(e.g. -DMY_VAR:BOOL=TRUE).")
print('Arguments following a second "--" are passed directly to ' " the build tool.")
print("")
return setuptools.setup(*args, **kw)
developer_mode = "develop" in commands or "test" in commands or build_ext_inplace
packages = kw.get("packages", [])
package_dir = kw.get("package_dir", {})
package_data = copy.deepcopy(kw.get("package_data", {}))
py_modules = kw.get("py_modules", [])
new_py_modules = {py_module: False for py_module in py_modules}
scripts = kw.get("scripts", [])
new_scripts = {script: False for script in scripts}
data_files = {(parent_dir or "."): set(file_list) for parent_dir, file_list in kw.get("data_files", [])}
# Since CMake arguments provided through the command line have more
# weight and when CMake is given multiple times a argument, only the last
# one is considered, let's prepend the one provided in the setup call.
cmake_args = skbuild_kw["cmake_args"] + cmake_args
# Handle cmake_install_target
# get the target (next item after '--install-target') or return '' if no --install-target
cmake_install_target_from_command = next(
(make_args[index + 1] for index, item in enumerate(make_args) if item == "--install-target"), ""
)
cmake_install_target_from_setup = skbuild_kw["cmake_install_target"]
# Setting target from command takes precedence
# cmake_install_target_from_setup has the default 'install',
# so cmake_install_target would never be empty.
if cmake_install_target_from_command:
cmake_install_target = cmake_install_target_from_command
else:
cmake_install_target = cmake_install_target_from_setup
# Parse CMAKE_ARGS
env_cmake_args = os.environ["CMAKE_ARGS"].split() if "CMAKE_ARGS" in os.environ else []
env_cmake_args = [s for s in env_cmake_args if "CMAKE_INSTALL_PREFIX" not in s]
# Using the environment variable CMAKE_ARGS has lower precedence than manual options
cmake_args = env_cmake_args + cmake_args
if sys.platform == "darwin":
# If no ``--plat-name`` argument was passed, set default value.
if plat_name is None:
plat_name = skbuild_plat_name()
(_, version, machine) = plat_name.split("-")
# The loop here allows for CMAKE_OSX_* command line arguments to overload
# values passed with either the ``--plat-name`` command-line argument
# or the ``cmake_args`` setup option.
for cmake_arg in cmake_args:
if "CMAKE_OSX_DEPLOYMENT_TARGET" in cmake_arg:
version = cmake_arg.split("=")[1]
if "CMAKE_OSX_ARCHITECTURES" in cmake_arg:
machine = cmake_arg.split("=")[1]
if set(machine.split(";")) == {"x86_64", "arm64"}:
machine = "universal2"
set_skbuild_plat_name("macosx-{}-{}".format(version, machine))
# Set platform env. variable so that commands (e.g. bdist_wheel)
# uses this information. The _PYTHON_HOST_PLATFORM env. variable is
# used in distutils.util.get_platform() function.
os.environ.setdefault("_PYTHON_HOST_PLATFORM", skbuild_plat_name())
# Set CMAKE_OSX_DEPLOYMENT_TARGET and CMAKE_OSX_ARCHITECTURES if not already
# specified
(_, version, machine) = skbuild_plat_name().split("-")
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_DEPLOYMENT_TARGET"):
cmake_args.append("-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=%s" % version)
if not cmaker.has_cmake_cache_arg(cmake_args, "CMAKE_OSX_ARCHITECTURES"):
machine_archs = "x86_64;arm64" if machine == "universal2" else machine
cmake_args.append("-DCMAKE_OSX_ARCHITECTURES:STRING=%s" % machine_archs)
# Install cmake if listed in `setup_requires`
for package in kw.get("setup_requires", []):
if Requirement(package).name == "cmake":
setup_requires = [package]
dist = upstream_Distribution({"setup_requires": setup_requires})
dist.fetch_build_eggs(setup_requires)
# Considering packages associated with "setup_requires" keyword are
# installed in .eggs subdirectory without honoring setuptools "console_scripts"
# entry_points and without settings the expected executable permissions, we are
# taking care of it below.
import cmake # pylint: disable=import-outside-toplevel
for executable in ["cmake", "cpack", "ctest"]:
executable = os.path.join(cmake.CMAKE_BIN_DIR, executable)
if platform.system().lower() == "windows":
executable += ".exe"
st = os.stat(executable)
permissions = st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(executable, permissions)
cmake_executable = os.path.join(cmake.CMAKE_BIN_DIR, "cmake")
break
# Languages are used to determine a working generator
cmake_languages = skbuild_kw["cmake_languages"]
try:
if cmake_executable is None:
cmake_executable = CMAKE_DEFAULT_EXECUTABLE
cmkr = cmaker.CMaker(cmake_executable)
if not skip_cmake:
cmake_minimum_required_version = skbuild_kw["cmake_minimum_required_version"]
if cmake_minimum_required_version is not None:
if parse_version(cmkr.cmake_version) < parse_version(cmake_minimum_required_version):
raise SKBuildError(
"CMake version {} or higher is required. CMake version {} is being used".format(
cmake_minimum_required_version, cmkr.cmake_version
)
)
# Used to confirm that the cmake executable is the same, and that the environment
# didn't change
cmake_spec = {
"args": [which(CMAKE_DEFAULT_EXECUTABLE)] + cmake_args,
"version": cmkr.cmake_version,
"environment": {
"PYTHONNOUSERSITE": os.environ.get("PYTHONNOUSERSITE"),
"PYTHONPATH": os.environ.get("PYTHONPATH"),
},
}
# skip the configure step for a cached build
env = cmkr.get_cached_generator_env()
if env is None or cmake_spec != _load_cmake_spec():
env = cmkr.configure(
cmake_args,
skip_generator_test=skip_generator_test,
cmake_source_dir=cmake_source_dir,
cmake_install_dir=skbuild_kw["cmake_install_dir"],
languages=cmake_languages,
)
_save_cmake_spec(cmake_spec)
cmkr.make(make_args, install_target=cmake_install_target, env=env)
except SKBuildGeneratorNotFoundError as ex:
sys.exit(ex)
except SKBuildError as ex:
import traceback # pylint: disable=import-outside-toplevel
print("Traceback (most recent call last):")
traceback.print_tb(sys.exc_info()[2])
print("")
sys.exit(ex)
# If needed, set reasonable defaults for package_dir
for package in packages:
if package not in package_dir:
package_dir[package] = package.replace(".", "/")
if "" in package_dir:
package_dir[package] = to_unix_path(os.path.join(package_dir[""], package_dir[package]))
kw["package_dir"] = package_dir
package_prefixes = _collect_package_prefixes(package_dir, packages)
# This hook enables custom processing of the cmake manifest
cmake_manifest = cmkr.install()
process_manifest = skbuild_kw.get("cmake_process_manifest_hook")
if process_manifest is not None:
if callable(process_manifest):
cmake_manifest = process_manifest(cmake_manifest)
else:
raise SKBuildError("The cmake_process_manifest_hook argument should be callable.")
_classify_installed_files(
cmake_manifest,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
skbuild_kw["cmake_install_dir"],
)
original_manifestin_data_files = []
if kw.get("include_package_data", False):
original_manifestin_data_files = parse_manifestin(os.path.join(os.getcwd(), "MANIFEST.in"))
for path in original_manifestin_data_files:
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
if developer_mode:
# Copy packages
for package, package_file_list in package_data.items():
for package_file in package_file_list:
package_file = os.path.join(package_dir[package], package_file)
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
# Copy modules
for py_module in py_modules:
package_file = py_module + ".py"
cmake_file = os.path.join(CMAKE_INSTALL_DIR(), package_file)
if os.path.exists(cmake_file):
_copy_file(cmake_file, package_file, hide_listing)
else:
_consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing)
original_package_data = kw.get("package_data", {}).copy()
_consolidate_package_data_files(original_package_data, package_prefixes, hide_listing)
for data_file in original_manifestin_data_files:
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), data_file)
_copy_file(data_file, dest_data_file, hide_listing)
kw["package_data"] = package_data
kw["package_dir"] = {
package: (
os.path.join(CMAKE_INSTALL_DIR(), prefix)
if os.path.exists(os.path.join(CMAKE_INSTALL_DIR(), prefix))
else prefix
)
for prefix, package in package_prefixes
}
kw["scripts"] = [
os.path.join(CMAKE_INSTALL_DIR(), script) if mask else script for script, mask in new_scripts.items()
]
kw["data_files"] = [(parent_dir, list(file_set)) for parent_dir, file_set in data_files.items()]
if "zip_safe" not in kw:
kw["zip_safe"] = False
# Adapted from espdev/ITKPythonInstaller/setup.py.in
class BinaryDistribution(upstream_Distribution): # pylint: disable=missing-class-docstring
def has_ext_modules(self): # pylint: disable=no-self-use,missing-function-docstring
return has_cmakelists
kw["distclass"] = BinaryDistribution
print("")
return setuptools.setup(*args, **kw)
def _collect_package_prefixes(package_dir, packages):
"""
Collect the list of prefixes for all packages
The list is used to match paths in the install manifest to packages
specified in the setup.py script.
The list is sorted in decreasing order of prefix length so that paths are
matched with their immediate parent package, instead of any of that
package's ancestors.
For example, consider the project structure below. Assume that the
setup call was made with a package list featuring "top" and "top.bar", but
not "top.not_a_subpackage".
::
top/ -> top/
__init__.py -> top/__init__.py (parent: top)
foo.py -> top/foo.py (parent: top)
bar/ -> top/bar/ (parent: top)
__init__.py -> top/bar/__init__.py (parent: top.bar)
not_a_subpackage/ -> top/not_a_subpackage/ (parent: top)
data_0.txt -> top/not_a_subpackage/data_0.txt (parent: top)
data_1.txt -> top/not_a_subpackage/data_1.txt (parent: top)
The paths in the generated install manifest are matched to packages
according to the parents indicated on the right. Only packages that are
specified in the setup() call are considered. Because of the sort order,
the data files on the bottom would have been mapped to
"top.not_a_subpackage" instead of "top", proper -- had such a package been
specified.
"""
return list(
sorted(
((package_dir[package].replace(".", "/"), package) for package in packages),
key=lambda tup: len(tup[0]),
reverse=True,
)
)
def _classify_installed_files(
install_paths,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
_cmake_install_dir,
):
assert not os.path.isabs(cmake_source_dir)
assert cmake_source_dir != "."
install_root = os.path.join(os.getcwd(), CMAKE_INSTALL_DIR())
for path in install_paths:
# if this installed file is not within the project root, complain and
# exit
if not to_platform_path(path).startswith(CMAKE_INSTALL_DIR()):
raise SKBuildError(
(
"\n CMake-installed files must be within the project root.\n"
" Project Root : {}\n"
" Violating File: {}\n"
).format(install_root, to_platform_path(path))
)
# peel off the 'skbuild' prefix
path = to_unix_path(os.path.relpath(path, CMAKE_INSTALL_DIR()))
_classify_file(
path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files
)
def _classify_file(path, package_data, package_prefixes, py_modules, new_py_modules, scripts, new_scripts, data_files):
found_package = False
found_module = False
found_script = False
path = to_unix_path(path)
# check to see if path is part of a package
for prefix, package in package_prefixes:
if path.startswith(prefix + "/"):
# peel off the package prefix
path = to_unix_path(os.path.relpath(path, prefix))
package_file_list = package_data.get(package, [])
package_file_list.append(path)
package_data[package] = package_file_list
found_package = True
break
if found_package:
return
# If control reaches this point, then this installed file is not part of
# a package.
# check if path is a module
for module in py_modules:
if path.replace("/", ".") == ".".join((module, "py")):
new_py_modules[module] = True
found_module = True
break
if found_module:
return
# If control reaches this point, then this installed file is not a
# module
# if the file is a script, mark the corresponding script
for script in scripts:
if path == script:
new_scripts[script] = True
found_script = True
break
if found_script:
return
# If control reaches this point, then this installed file is not a
# script
# If control reaches this point, then we have installed files that are
# not part of a package, not a module, nor a script. Without any other
# information, we can only treat it as a generic data file.
parent_dir = os.path.dirname(path)
file_set = data_files.get(parent_dir)
if file_set is None:
file_set = set()
data_files[parent_dir] = file_set
file_set.add(os.path.join(CMAKE_INSTALL_DIR(), path))
def _copy_file(src_file, dest_file, hide_listing=True):
"""Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed
on standard output. Setting ``hide_listing`` to False avoids message from
being displayed.
"""
# Create directory if needed
dest_dir = os.path.dirname(dest_file)
if dest_dir != "" and not os.path.exists(dest_dir):
if not hide_listing:
print("creating directory {}".format(dest_dir))
mkdir_p(dest_dir)
# Copy file
if not hide_listing:
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
def _consolidate_package_modules(cmake_source_dir, packages, package_dir, py_modules, package_data, hide_listing):
"""This function consolidates packages having modules located in
both the source tree and the CMake install tree into one location.
The one location is the CMake install tree
(see :func:`.constants.CMAKE_INSTALL_DIR()`).
Why ? This is a necessary evil because ``Setuptools`` keeps track of
packages and modules files to install using a dictionary of lists where
the key are package names (e.g ``foo.bar``) and the values are lists of
module files (e.g ``['__init__.py', 'baz.py']``. Since this doesn't allow
to "split" files associated with a given module in multiple location, one
location is selected, and files are copied over.
How? It currently searches for modules across both locations using
the :class:`.utils.PythonModuleFinder`. then with the help
of :func:`_package_data_contain_module`, it identifies which
one are either already included or missing from the distribution.
Once a module has been identified as ``missing``, it is both copied
into the :func:`.constants.CMAKE_INSTALL_DIR()` and added to the
``package_data`` dictionary so that it can be considered by
the upstream setup function.
"""
try:
# Search for python modules in both the current directory
# and cmake install tree.
modules = PythonModuleFinder(
packages, package_dir, py_modules, alternative_build_base=CMAKE_INSTALL_DIR()
).find_all_modules()
except DistutilsError as msg:
raise SystemExit("error: {}".format(str(msg)))
print("")
for entry in modules:
# Check if module file should be copied into the CMake install tree.
if _package_data_contain_module(entry, package_data):
continue
(package, _, src_module_file) = entry
# Copy missing module file
if os.path.exists(src_module_file):
dest_module_file = os.path.join(CMAKE_INSTALL_DIR(), src_module_file)
_copy_file(src_module_file, dest_module_file, hide_listing)
# Since the mapping in package_data expects the package to be associated
# with a list of files relative to the directory containing the package,
# the following section makes sure to strip the redundant part of the
# module file path.
# The redundant part should be stripped for both cmake_source_dir and
# the package.
package_parts = []
if cmake_source_dir:
package_parts = cmake_source_dir.split(os.path.sep)
package_parts += package.split(".")
stripped_module_file = strip_package(package_parts, src_module_file)
# Update list of files associated with the corresponding package
try:
package_data[package].append(stripped_module_file)
except KeyError:
package_data[package] = [stripped_module_file]
def _consolidate_package_data_files(original_package_data, package_prefixes, hide_listing):
"""This function copies package data files specified using the ``package_data`` keyword
into :func:`.constants.CMAKE_INSTALL_DIR()`.
::
setup(...,
packages=['mypkg'],
package_dir={'mypkg': 'src/mypkg'},
package_data={'mypkg': ['data/*.dat']},
)
Considering that (1) the packages associated with modules located in both the source tree and
the CMake install tree are consolidated into the CMake install tree, and (2) the consolidated
package path set in the ``package_dir`` dictionary and later used by setuptools to package
(or install) modules and data files is :func:`.constants.CMAKE_INSTALL_DIR()`, copying the data files
is required to ensure setuptools can find them when it uses the package directory.
"""
project_root = os.getcwd()
for prefix, package in package_prefixes:
if package not in original_package_data:
continue
raw_patterns = original_package_data[package]
for pattern in raw_patterns:
expanded_package_dir = os.path.join(project_root, prefix, pattern)
for src_data_file in glob(expanded_package_dir):
full_prefix_length = len(os.path.join(project_root, prefix)) + 1
data_file = src_data_file[full_prefix_length:]
dest_data_file = os.path.join(CMAKE_INSTALL_DIR(), prefix, data_file)
_copy_file(src_data_file, dest_data_file, hide_listing)
| 38.586377 | 124 | 0.656698 | [
"MIT"
] | pekkarr/scikit-build | skbuild/setuptools_wrap.py | 39,088 | Python |
# The code for this extension is based on https://github.com/ulrobix/sphinxcontrib-contentui
| 46.5 | 92 | 0.806452 | [
"Apache-2.0"
] | NivekNey/sparkling-water | doc/src/site/sphinx/extensions/contentui/__init__.py | 93 | Python |
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2020 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible import context
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import become_loader
def test_ksu(mocker, parser, reset_cli_args):
options = parser.parse_args([])
context._init_global_context(options)
play_context = PlayContext()
default_cmd = "/bin/foo"
default_exe = "/bin/bash"
ksu_exe = 'ksu'
ksu_flags = ''
cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
assert cmd == default_cmd
success = 'BECOME-SUCCESS-.+?'
play_context.become = True
play_context.become_user = 'foo'
play_context.set_become_plugin(become_loader.get('ksu'))
play_context.become_method = 'ksu'
play_context.become_flags = ksu_flags
cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
assert (re.match("""%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, play_context.become_user, ksu_flags,
default_exe, success, default_cmd), cmd) is not None)
| 34.1 | 115 | 0.699413 | [
"MIT"
] | tr3ck3r/linklight | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py | 1,364 | Python |
print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print "Yay! Printing"
print "Id much rather you 'not'."
print 'I "said" do not touch this.'
| 22.75 | 35 | 0.697802 | [
"MIT"
] | llinmeng/PythonStudy | python_ex/01ex.py | 182 | Python |
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException
from .generator import parse_generator_expressions
from .. import mlog
from ..mesonlib import version_compare
import typing as T
from pathlib import Path
from functools import lru_cache
import re
import json
import textwrap
class CMakeTraceLine:
def __init__(self, file_str: str, line: int, func: str, args: T.List[str]) -> None:
self.file = CMakeTraceLine._to_path(file_str)
self.line = line
self.func = func.lower()
self.args = args
@staticmethod
@lru_cache(maxsize=None)
def _to_path(file_str: str) -> Path:
return Path(file_str)
def __repr__(self) -> str:
s = 'CMake TRACE: {0}:{1} {2}({3})'
return s.format(self.file, self.line, self.func, self.args)
class CMakeCacheEntry(T.NamedTuple):
value: T.List[str]
type: str
class CMakeTarget:
def __init__(
self,
name: str,
target_type: str,
properties: T.Optional[T.Dict[str, T.List[str]]] = None,
imported: bool = False,
tline: T.Optional[CMakeTraceLine] = None
):
if properties is None:
properties = {}
self.name = name
self.type = target_type
self.properties = properties
self.imported = imported
self.tline = tline
self.depends = [] # type: T.List[str]
self.current_bin_dir = None # type: T.Optional[Path]
self.current_src_dir = None # type: T.Optional[Path]
def __repr__(self) -> str:
s = 'CMake TARGET:\n -- name: {}\n -- type: {}\n -- imported: {}\n -- properties: {{\n{} }}\n -- tline: {}'
propSTR = ''
for i in self.properties:
propSTR += " '{}': {}\n".format(i, self.properties[i])
return s.format(self.name, self.type, self.imported, propSTR, self.tline)
def strip_properties(self) -> None:
# Strip the strings in the properties
if not self.properties:
return
for key, val in self.properties.items():
self.properties[key] = [x.strip() for x in val]
assert all([';' not in x for x in self.properties[key]])
class CMakeGeneratorTarget(CMakeTarget):
def __init__(self, name: str) -> None:
super().__init__(name, 'CUSTOM', {})
self.outputs = [] # type: T.List[Path]
self.command = [] # type: T.List[T.List[str]]
self.working_dir = None # type: T.Optional[Path]
class CMakeTraceParser:
def __init__(self, cmake_version: str, build_dir: Path, permissive: bool = True) -> None:
self.vars: T.Dict[str, T.List[str]] = {}
self.vars_by_file: T.Dict[Path, T.Dict[str, T.List[str]]] = {}
self.targets: T.Dict[str, CMakeTarget] = {}
self.cache: T.Dict[str, CMakeCacheEntry] = {}
self.explicit_headers = set() # type: T.Set[Path]
# T.List of targes that were added with add_custom_command to generate files
self.custom_targets = [] # type: T.List[CMakeGeneratorTarget]
self.permissive = permissive # type: bool
self.cmake_version = cmake_version # type: str
self.trace_file = 'cmake_trace.txt'
self.trace_file_path = build_dir / self.trace_file
self.trace_format = 'json-v1' if version_compare(cmake_version, '>=3.17') else 'human'
# State for delayed command execution. Delayed command execution is realised
# with a custom CMake file that overrides some functions and adds some
# introspection information to the trace.
self.delayed_commands = [] # type: T.List[str]
self.stored_commands = [] # type: T.List[CMakeTraceLine]
# All supported functions
self.functions = {
'set': self._cmake_set,
'unset': self._cmake_unset,
'add_executable': self._cmake_add_executable,
'add_library': self._cmake_add_library,
'add_custom_command': self._cmake_add_custom_command,
'add_custom_target': self._cmake_add_custom_target,
'set_property': self._cmake_set_property,
'set_target_properties': self._cmake_set_target_properties,
'target_compile_definitions': self._cmake_target_compile_definitions,
'target_compile_options': self._cmake_target_compile_options,
'target_include_directories': self._cmake_target_include_directories,
'target_link_libraries': self._cmake_target_link_libraries,
'target_link_options': self._cmake_target_link_options,
'add_dependencies': self._cmake_add_dependencies,
# Special functions defined in the preload script.
# These functions do nothing in the CMake code, but have special
# meaning here in the trace parser.
'meson_ps_execute_delayed_calls': self._meson_ps_execute_delayed_calls,
'meson_ps_reload_vars': self._meson_ps_reload_vars,
'meson_ps_disabled_function': self._meson_ps_disabled_function,
} # type: T.Dict[str, T.Callable[[CMakeTraceLine], None]]
if version_compare(self.cmake_version, '<3.17.0'):
mlog.deprecation(textwrap.dedent(f'''\
CMake support for versions <3.17 is deprecated since Meson 0.62.0.
|
| However, Meson was only able to find CMake {self.cmake_version}.
|
| Support for all CMake versions below 3.17.0 will be removed once
| newer CMake versions are more widely adopted. If you encounter
| any errors please try upgrading CMake to a newer version first.
'''), once=True)
def trace_args(self) -> T.List[str]:
arg_map = {
'human': ['--trace', '--trace-expand'],
'json-v1': ['--trace-expand', '--trace-format=json-v1'],
}
base_args = ['--no-warn-unused-cli']
if not self.requires_stderr():
base_args += [f'--trace-redirect={self.trace_file}']
return arg_map[self.trace_format] + base_args
def requires_stderr(self) -> bool:
return version_compare(self.cmake_version, '<3.16')
def parse(self, trace: T.Optional[str] = None) -> None:
# First load the trace (if required)
if not self.requires_stderr():
if not self.trace_file_path.exists and not self.trace_file_path.is_file():
raise CMakeException(f'CMake: Trace file "{self.trace_file_path!s}" not found')
trace = self.trace_file_path.read_text(errors='ignore', encoding='utf-8')
if not trace:
raise CMakeException('CMake: The CMake trace was not provided or is empty')
# Second parse the trace
lexer1 = None
if self.trace_format == 'human':
lexer1 = self._lex_trace_human(trace)
elif self.trace_format == 'json-v1':
lexer1 = self._lex_trace_json(trace)
else:
raise CMakeException(f'CMake: Internal error: Invalid trace format {self.trace_format}. Expected [human, json-v1]')
# Primary pass -- parse everything
for l in lexer1:
# store the function if its execution should be delayed
if l.func in self.delayed_commands:
self.stored_commands += [l]
continue
# "Execute" the CMake function if supported
fn = self.functions.get(l.func, None)
if fn:
fn(l)
# Postprocess
for tgt in self.targets.values():
tgt.strip_properties()
def get_first_cmake_var_of(self, var_list: T.List[str]) -> T.List[str]:
# Return the first found CMake variable in list var_list
for i in var_list:
if i in self.vars:
return self.vars[i]
return []
def get_cmake_var(self, var: str) -> T.List[str]:
# Return the value of the CMake variable var or an empty list if var does not exist
if var in self.vars:
return self.vars[var]
return []
def var_to_str(self, var: str) -> T.Optional[str]:
if var in self.vars and self.vars[var]:
return self.vars[var][0]
return None
def _str_to_bool(self, expr: T.Union[str, T.List[str]]) -> bool:
if not expr:
return False
if isinstance(expr, list):
expr_str = expr[0]
else:
expr_str = expr
expr_str = expr_str.upper()
return expr_str not in ['0', 'OFF', 'NO', 'FALSE', 'N', 'IGNORE'] and not expr_str.endswith('NOTFOUND')
def var_to_bool(self, var: str) -> bool:
return self._str_to_bool(self.vars.get(var, []))
def _gen_exception(self, function: str, error: str, tline: CMakeTraceLine) -> None:
# Generate an exception if the parser is not in permissive mode
if self.permissive:
mlog.debug(f'CMake trace warning: {function}() {error}\n{tline}')
return None
raise CMakeException(f'CMake: {function}() {error}\n{tline}')
def _cmake_set(self, tline: CMakeTraceLine) -> None:
"""Handler for the CMake set() function in all variaties.
comes in three flavors:
set(<var> <value> [PARENT_SCOPE])
set(<var> <value> CACHE <type> <docstring> [FORCE])
set(ENV{<var>} <value>)
We don't support the ENV variant, and any uses of it will be ignored
silently. the other two variates are supported, with some caveats:
- we don't properly handle scoping, so calls to set() inside a
function without PARENT_SCOPE set could incorrectly shadow the
outer scope.
- We don't honor the type of CACHE arguments
"""
# DOC: https://cmake.org/cmake/help/latest/command/set.html
cache_type = None
cache_force = 'FORCE' in tline.args
try:
cache_idx = tline.args.index('CACHE')
cache_type = tline.args[cache_idx + 1]
except (ValueError, IndexError):
pass
# 1st remove PARENT_SCOPE and CACHE from args
args = []
for i in tline.args:
if not i or i == 'PARENT_SCOPE':
continue
# Discard everything after the CACHE keyword
if i == 'CACHE':
break
args.append(i)
if len(args) < 1:
return self._gen_exception('set', 'requires at least one argument', tline)
# Now that we've removed extra arguments all that should be left is the
# variable identifier and the value, join the value back together to
# ensure spaces in the value are correctly handled. This assumes that
# variable names don't have spaces. Please don't do that...
identifier = args.pop(0)
value = ' '.join(args)
# Write to the CMake cache instead
if cache_type:
# Honor how the CMake FORCE parameter works
if identifier not in self.cache or cache_force:
self.cache[identifier] = CMakeCacheEntry(value.split(';'), cache_type)
if not value:
# Same as unset
if identifier in self.vars:
del self.vars[identifier]
else:
self.vars[identifier] = value.split(';')
self.vars_by_file.setdefault(tline.file, {})[identifier] = value.split(';')
def _cmake_unset(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/unset.html
if len(tline.args) < 1:
return self._gen_exception('unset', 'requires at least one argument', tline)
if tline.args[0] in self.vars:
del self.vars[tline.args[0]]
def _cmake_add_executable(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_executable.html
args = list(tline.args) # Make a working copy
# Make sure the exe is imported
is_imported = True
if 'IMPORTED' not in args:
return self._gen_exception('add_executable', 'non imported executables are not supported', tline)
args.remove('IMPORTED')
if len(args) < 1:
return self._gen_exception('add_executable', 'requires at least 1 argument', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'EXECUTABLE', {}, tline=tline, imported=is_imported)
def _cmake_add_library(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_library.html
args = list(tline.args) # Make a working copy
# Make sure the lib is imported
if 'INTERFACE' in args:
args.remove('INTERFACE')
if len(args) < 1:
return self._gen_exception('add_library', 'interface library name not specified', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'INTERFACE', {}, tline=tline, imported='IMPORTED' in args)
elif 'IMPORTED' in args:
args.remove('IMPORTED')
# Now, only look at the first two arguments (target_name and target_type) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
self.targets[args[0]] = CMakeTarget(args[0], args[1], {}, tline=tline, imported=True)
elif 'ALIAS' in args:
args.remove('ALIAS')
# Now, only look at the first two arguments (target_name and target_ref) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
# Simulate the ALIAS with INTERFACE_LINK_LIBRARIES
self.targets[args[0]] = CMakeTarget(args[0], 'ALIAS', {'INTERFACE_LINK_LIBRARIES': [args[1]]}, tline=tline)
elif 'OBJECT' in args:
return self._gen_exception('add_library', 'OBJECT libraries are not supported', tline)
else:
self.targets[args[0]] = CMakeTarget(args[0], 'NORMAL', {}, tline=tline)
def _cmake_add_custom_command(self, tline: CMakeTraceLine, name: T.Optional[str] = None) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_command.html
args = self._flatten_args(list(tline.args)) # Commands can be passed as ';' separated lists
if not args:
return self._gen_exception('add_custom_command', 'requires at least 1 argument', tline)
# Skip the second function signature
if args[0] == 'TARGET':
return self._gen_exception('add_custom_command', 'TARGET syntax is currently not supported', tline)
magic_keys = ['OUTPUT', 'COMMAND', 'MAIN_DEPENDENCY', 'DEPENDS', 'BYPRODUCTS',
'IMPLICIT_DEPENDS', 'WORKING_DIRECTORY', 'COMMENT', 'DEPFILE',
'JOB_POOL', 'VERBATIM', 'APPEND', 'USES_TERMINAL', 'COMMAND_EXPAND_LISTS']
target = CMakeGeneratorTarget(name)
def handle_output(key: str, target: CMakeGeneratorTarget) -> None:
target.outputs += [Path(key)]
def handle_command(key: str, target: CMakeGeneratorTarget) -> None:
if key == 'ARGS':
return
target.command[-1] += [key]
def handle_depends(key: str, target: CMakeGeneratorTarget) -> None:
target.depends += [key]
working_dir = None
def handle_working_dir(key: str, target: CMakeGeneratorTarget) -> None:
nonlocal working_dir
if working_dir is None:
working_dir = key
else:
working_dir += ' '
working_dir += key
fn = None
for i in args:
if i in magic_keys:
if i == 'OUTPUT':
fn = handle_output
elif i == 'DEPENDS':
fn = handle_depends
elif i == 'WORKING_DIRECTORY':
fn = handle_working_dir
elif i == 'COMMAND':
fn = handle_command
target.command += [[]]
else:
fn = None
continue
if fn is not None:
fn(i, target)
cbinary_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_BINARY_DIR')
csource_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
target.working_dir = Path(working_dir) if working_dir else None
target.current_bin_dir = Path(cbinary_dir) if cbinary_dir else None
target.current_src_dir = Path(csource_dir) if csource_dir else None
target.outputs = [Path(x) for x in self._guess_files([str(y) for y in target.outputs])]
target.depends = self._guess_files(target.depends)
target.command = [self._guess_files(x) for x in target.command]
self.custom_targets += [target]
if name:
self.targets[name] = target
def _cmake_add_custom_target(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_target.html
# We only the first parameter (the target name) is interesting
if len(tline.args) < 1:
return self._gen_exception('add_custom_target', 'requires at least one argument', tline)
# It's pretty much the same as a custom command
self._cmake_add_custom_command(tline, tline.args[0])
def _cmake_set_property(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/set_property.html
args = list(tline.args)
scope = args.pop(0)
append = False
targets = []
while args:
curr = args.pop(0)
# XXX: APPEND_STRING is specifically *not* supposed to create a
# list, is treating them as aliases really okay?
if curr == 'APPEND' or curr == 'APPEND_STRING':
append = True
continue
if curr == 'PROPERTY':
break
targets += curr.split(';')
if not args:
return self._gen_exception('set_property', 'faild to parse argument list', tline)
if len(args) == 1:
# Tries to set property to nothing so nothing has to be done
return
identifier = args.pop(0)
if self.trace_format == 'human':
value = ' '.join(args).split(';')
else:
value = [y for x in args for y in x.split(';')]
if not value:
return
def do_target(t: str) -> None:
if t not in self.targets:
return self._gen_exception('set_property', f'TARGET {t} not found', tline)
tgt = self.targets[t]
if identifier not in tgt.properties:
tgt.properties[identifier] = []
if append:
tgt.properties[identifier] += value
else:
tgt.properties[identifier] = value
def do_source(src: str) -> None:
if identifier != 'HEADER_FILE_ONLY' or not self._str_to_bool(value):
return
current_src_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
if not current_src_dir:
mlog.warning(textwrap.dedent('''\
CMake trace: set_property(SOURCE) called before the preload script was loaded.
Unable to determine CMAKE_CURRENT_SOURCE_DIR. This can lead to build errors.
'''))
current_src_dir = '.'
cur_p = Path(current_src_dir)
src_p = Path(src)
if not src_p.is_absolute():
src_p = cur_p / src_p
self.explicit_headers.add(src_p)
if scope == 'TARGET':
for i in targets:
do_target(i)
elif scope == 'SOURCE':
files = self._guess_files(targets)
for i in files:
do_source(i)
def _cmake_set_target_properties(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/set_target_properties.html
args = list(tline.args)
targets = []
while args:
curr = args.pop(0)
if curr == 'PROPERTIES':
break
targets.append(curr)
# Now we need to try to reconsitute the original quoted format of the
# arguments, as a property value could have spaces in it. Unlike
# set_property() this is not context free. There are two approaches I
# can think of, both have drawbacks:
#
# 1. Assume that the property will be capitalized ([A-Z_]), this is
# convention but cmake doesn't require it.
# 2. Maintain a copy of the list here: https://cmake.org/cmake/help/latest/manual/cmake-properties.7.html#target-properties
#
# Neither of these is awesome for obvious reasons. I'm going to try
# option 1 first and fall back to 2, as 1 requires less code and less
# synchroniztion for cmake changes.
#
# With the JSON output format, introduced in CMake 3.17, spaces are
# handled properly and we don't have to do either options
arglist = [] # type: T.List[T.Tuple[str, T.List[str]]]
if self.trace_format == 'human':
name = args.pop(0)
values = [] # type: T.List[str]
prop_regex = re.compile(r'^[A-Z_]+$')
for a in args:
if prop_regex.match(a):
if values:
arglist.append((name, ' '.join(values).split(';')))
name = a
values = []
else:
values.append(a)
if values:
arglist.append((name, ' '.join(values).split(';')))
else:
arglist = [(x[0], x[1].split(';')) for x in zip(args[::2], args[1::2])]
for name, value in arglist:
for i in targets:
if i not in self.targets:
return self._gen_exception('set_target_properties', f'TARGET {i} not found', tline)
self.targets[i].properties[name] = value
def _cmake_add_dependencies(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_dependencies.html
args = list(tline.args)
if len(args) < 2:
return self._gen_exception('add_dependencies', 'takes at least 2 arguments', tline)
target = self.targets.get(args[0])
if not target:
return self._gen_exception('add_dependencies', 'target not found', tline)
for i in args[1:]:
target.depends += i.split(';')
def _cmake_target_compile_definitions(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_definitions.html
self._parse_common_target_options('target_compile_definitions', 'COMPILE_DEFINITIONS', 'INTERFACE_COMPILE_DEFINITIONS', tline)
def _cmake_target_compile_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_options.html
self._parse_common_target_options('target_compile_options', 'COMPILE_OPTIONS', 'INTERFACE_COMPILE_OPTIONS', tline)
def _cmake_target_include_directories(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_include_directories.html
self._parse_common_target_options('target_include_directories', 'INCLUDE_DIRECTORIES', 'INTERFACE_INCLUDE_DIRECTORIES', tline, ignore=['SYSTEM', 'BEFORE'], paths=True)
def _cmake_target_link_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_options.html
self._parse_common_target_options('target_link_options', 'LINK_OPTIONS', 'INTERFACE_LINK_OPTIONS', tline)
def _cmake_target_link_libraries(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_libraries.html
self._parse_common_target_options('target_link_options', 'LINK_LIBRARIES', 'INTERFACE_LINK_LIBRARIES', tline)
def _parse_common_target_options(self, func: str, private_prop: str, interface_prop: str, tline: CMakeTraceLine, ignore: T.Optional[T.List[str]] = None, paths: bool = False) -> None:
if ignore is None:
ignore = ['BEFORE']
args = list(tline.args)
if len(args) < 1:
return self._gen_exception(func, 'requires at least one argument', tline)
target = args[0]
if target not in self.targets:
return self._gen_exception(func, f'TARGET {target} not found', tline)
interface = []
private = []
mode = 'PUBLIC'
for i in args[1:]:
if i in ignore:
continue
if i in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'PRIVATE', 'LINK_PUBLIC', 'LINK_PRIVATE']:
mode = i
continue
if mode in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'LINK_PUBLIC']:
interface += i.split(';')
if mode in ['PUBLIC', 'PRIVATE', 'LINK_PRIVATE']:
private += i.split(';')
if paths:
interface = self._guess_files(interface)
private = self._guess_files(private)
interface = [x for x in interface if x]
private = [x for x in private if x]
for j in [(private_prop, private), (interface_prop, interface)]:
if not j[0] in self.targets[target].properties:
self.targets[target].properties[j[0]] = []
self.targets[target].properties[j[0]] += j[1]
def _meson_ps_execute_delayed_calls(self, tline: CMakeTraceLine) -> None:
for l in self.stored_commands:
fn = self.functions.get(l.func, None)
if fn:
fn(l)
# clear the stored commands
self.stored_commands = []
def _meson_ps_reload_vars(self, tline: CMakeTraceLine) -> None:
self.delayed_commands = self.get_cmake_var('MESON_PS_DELAYED_CALLS')
def _meson_ps_disabled_function(self, tline: CMakeTraceLine) -> None:
args = list(tline.args)
if not args:
mlog.error('Invalid preload.cmake script! At least one argument to `meson_ps_disabled_function` is expected')
return
mlog.warning(f'The CMake function "{args[0]}" was disabled to avoid compatibility issues with Meson.')
def _lex_trace_human(self, trace: str) -> T.Generator[CMakeTraceLine, None, None]:
# The trace format is: '<file>(<line>): <func>(<args -- can contain \n> )\n'
reg_tline = re.compile(r'\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(([\s\S]*?) ?\)\s*\n', re.MULTILINE)
reg_other = re.compile(r'[^\n]*\n')
loc = 0
while loc < len(trace):
mo_file_line = reg_tline.match(trace, loc)
if not mo_file_line:
skip_match = reg_other.match(trace, loc)
if not skip_match:
print(trace[loc:])
raise CMakeException('Failed to parse CMake trace')
loc = skip_match.end()
continue
loc = mo_file_line.end()
file = mo_file_line.group(1)
line = mo_file_line.group(3)
func = mo_file_line.group(4)
args = mo_file_line.group(5)
args = parse_generator_expressions(args)
argl = args.split(' ')
argl = list(map(lambda x: x.strip(), argl))
yield CMakeTraceLine(file, int(line), func, argl)
def _lex_trace_json(self, trace: str) -> T.Generator[CMakeTraceLine, None, None]:
lines = trace.splitlines(keepends=False)
lines.pop(0) # The first line is the version
for i in lines:
data = json.loads(i)
assert isinstance(data['file'], str)
assert isinstance(data['line'], int)
assert isinstance(data['cmd'], str)
assert isinstance(data['args'], list)
args = data['args']
for j in args:
assert isinstance(j, str)
args = [parse_generator_expressions(x) for x in args]
yield CMakeTraceLine(data['file'], data['line'], data['cmd'], args)
def _flatten_args(self, args: T.List[str]) -> T.List[str]:
# Split lists in arguments
res = [] # type: T.List[str]
for i in args:
res += i.split(';')
return res
def _guess_files(self, broken_list: T.List[str]) -> T.List[str]:
# Nothing has to be done for newer formats
if self.trace_format != 'human':
return broken_list
# Try joining file paths that contain spaces
reg_start = re.compile(r'^([A-Za-z]:)?/(.*/)*[^./]+$')
reg_end = re.compile(r'^.*\.[a-zA-Z]+$')
fixed_list = [] # type: T.List[str]
curr_str = None # type: T.Optional[str]
path_found = False # type: bool
for i in broken_list:
if curr_str is None:
curr_str = i
path_found = False
elif Path(curr_str).is_file():
# Abort concatenation if curr_str is an existing file
fixed_list += [curr_str]
curr_str = i
path_found = False
elif not reg_start.match(curr_str):
# Abort concatenation if curr_str no longer matches the regex
fixed_list += [curr_str]
curr_str = i
path_found = False
elif reg_end.match(i):
# File detected
curr_str = f'{curr_str} {i}'
fixed_list += [curr_str]
curr_str = None
path_found = False
elif Path(f'{curr_str} {i}').exists():
# Path detected
curr_str = f'{curr_str} {i}'
path_found = True
elif path_found:
# Add path to fixed_list after ensuring the whole path is in curr_str
fixed_list += [curr_str]
curr_str = i
path_found = False
else:
curr_str = f'{curr_str} {i}'
path_found = False
if curr_str:
fixed_list += [curr_str]
return fixed_list
| 41.014304 | 186 | 0.590298 | [
"Apache-2.0"
] | Linux-Defender/meson | mesonbuild/cmake/traceparser.py | 31,540 | Python |
# -*- coding: utf-8 -*-
'''
A Runner module interface on top of the salt-ssh Python API.
This allows for programmatic use from salt-api, the Reactor, Orchestrate, etc.
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Libs
import salt.client.ssh.client
def cmd(
tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
kwarg=None):
'''
Execute a single command via the salt-ssh subsystem and return all
routines at once
.. versionaddedd:: 2015.2
A wrapper around the :py:meth:`SSHClient.cmd
<salt.client.ssh.client.SSHClient.cmd>` method.
'''
client = salt.client.ssh.client.SSHClient(mopts=__opts__)
return client.cmd(
tgt,
fun,
arg,
timeout,
expr_form,
kwarg)
| 21.175 | 78 | 0.602125 | [
"Apache-2.0"
] | 0xf10e/salt | salt/runners/ssh.py | 847 | Python |
from __future__ import print_function
from flask import Flask, Response
from pyzbar import pyzbar
from picamera.array import PiRGBArray
from picamera import PiCamera
from datetime import datetime
import numpy as np
import cv2
import time
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
app = Flask(__name__)
@app.route('/stream')
def stream():
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def gen():
while True:
frame = get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
def get_frame():
camera.capture(rawCapture, format="bgr", use_video_port=True)
frame = rawCapture.array
decoded_objs = decode(frame)
frame = display(frame, decoded_objs)
ret, jpeg = cv2.imencode('.jpg', frame)
rawCapture.truncate(0)
return jpeg.tobytes()
def decode(frame):
decoded_objs = pyzbar.decode(frame, scan_locations=True)
for obj in decoded_objs:
print(datetime.now().strftime('%H:%M:%S.%f'))
print('Type: ', obj.type)
print('Data: ', obj.data)
return decoded_objs
def display(frame, decoded_objs):
for decoded_obj in decoded_objs:
left, top, width, height = decoded_obj.rect
frame = cv2.rectangle(frame,
(left, top),
(left + width, height + top),
(0, 255, 255), 2)
return frame
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=False, threaded=True)
| 26.539683 | 73 | 0.623206 | [
"MIT"
] | shihu/qr-reader | main.py | 1,672 | Python |
#!/usr/bin/python -i
import Block
import rlcompleter, readline
readline.parse_and_bind("tab: complete")
device = Block.Block("86:00.0",2,"libcomanche-blknvme.so")
buffer = device.allocate_io_buffer(4096,32,-1)
info = device.get_volume_info()
info
| 19.307692 | 58 | 0.756972 | [
"Apache-2.0"
] | Bhaskers-Blu-Org1/comanche | src/components/api/wrappers/test.py | 251 | Python |
import json
import boto3
def lambda_handler(event, context):
print(event)
lex = boto3.client('lex-runtime')
lex_resp = lex.post_text(
botName = 'dining_concierge_bot',
botAlias = 'Test',
userId = 'user01',
inputText = event['messages'][0]['unstructured']['text'],
activeContexts=[]
)
response = {
"messages":
[
{"type": "unstructured",
"unstructured":
{
"text": lex_resp['message']
}
}
]
}
return response
| 23.961538 | 65 | 0.457464 | [
"MIT"
] | nikhilkhaneja/Dining-Concierge-using-AWS-1 | Lambda functions/LF0.py | 623 | Python |
# coding=utf-8
import os, clr
os.chdir(os.path.dirname(__file__))
clr.AddReference('System.Drawing')
clr.AddReference('System.Windows.Forms')
from System import Drawing, Array, ComponentModel, Diagnostics, IO
from System.Windows import Forms
import System.Object as object
import System.String as string
from System.Windows.Forms import MessageBox
#----------------------------------------------------------------------------
from collections import OrderedDict
import logging
logging.basicConfig(filename='./message.log', level=logging.DEBUG, filemode='w', format='%(message)s')
import re
import ScriptEnv
ScriptEnv.Initialize("Ansoft.ElectronicsDesktop")
oDesktop.RestoreWindow()
oDesktop.ClearMessages("", "", 2)
oProject = oDesktop.GetActiveProject()
oDesign = oProject.GetActiveDesign()
oEditor = oDesign.GetActiveEditor()
lines = oEditor.FindObjects('type', 'line')
def getLayerLineInfo():
data = OrderedDict()
for layer in oEditor.GetStackupLayerNames():
objs = oEditor.FindObjects('layer', layer)
data[layer] = list(set(lines).intersection(set(objs)))
result = OrderedDict()
for layer in data:
if not bool(data[layer]):
continue
result[layer] = {}
for line in data[layer]:
net = oEditor.GetPropertyValue('BaseElementTab', line, 'Net')
line_width = oEditor.GetPropertyValue('BaseElementTab', line, 'LineWidth')
try:
result[layer][net] +=[(line, line_width)]
except:
result[layer][net] = [(line, line_width)]
return result
def changeLineWidth(line, width):
oEditor.ChangeProperty(
[
"NAME:AllTabs",
[
"NAME:BaseElementTab",
[
"NAME:PropServers",
line
],
[
"NAME:ChangedProps",
[
"NAME:LineWidth",
"Value:=" , width
]
]
]
])
#----------------------------------------------------------------------------
class MyForm(Forms.Form):
def __init__(self):
self.label1 = Forms.Label()
self.label2 = Forms.Label()
self.listBox_selection = Forms.ListBox()
self.comboBox_layer = Forms.ComboBox()
self.textBox_net = Forms.TextBox()
self.label3 = Forms.Label()
self.textBox_linewidth = Forms.TextBox()
self.button_change = Forms.Button()
self.label4 = Forms.Label()
self.SuspendLayout()
# label1
self.label1.AutoSize = True
self.label1.Location = Drawing.Point(13, 10)
self.label1.Name = "label1"
self.label1.Size = Drawing.Size(50, 19)
self.label1.TabIndex = 0
self.label1.Text = "Layer:"
# label2
self.label2.AutoSize = True
self.label2.Location = Drawing.Point(13, 72)
self.label2.Name = "label2"
self.label2.Size = Drawing.Size(103, 19)
self.label2.TabIndex = 1
self.label2.Text = "Net Keyword:"
self.label2.Click += self.label2_Click
# listBox_selection
self.listBox_selection.FormattingEnabled = True
self.listBox_selection.ItemHeight = 19
self.listBox_selection.Location = Drawing.Point(174, 32)
self.listBox_selection.Name = "listBox_selection"
self.listBox_selection.SelectionMode = Forms.SelectionMode.MultiExtended
self.listBox_selection.Size = Drawing.Size(225, 308)
self.listBox_selection.TabIndex = 2
self.listBox_selection.SelectedIndexChanged += self.listBox_selection_SelectedIndexChanged
# comboBox_layer
self.comboBox_layer.FormattingEnabled = True
self.comboBox_layer.Location = Drawing.Point(13, 32)
self.comboBox_layer.Name = "comboBox_layer"
self.comboBox_layer.Size = Drawing.Size(151, 27)
self.comboBox_layer.TabIndex = 3
self.comboBox_layer.SelectedIndexChanged += self.comboBox_layer_SelectedIndexChanged
# textBox_net
self.textBox_net.Location = Drawing.Point(13, 94)
self.textBox_net.Name = "textBox_net"
self.textBox_net.Size = Drawing.Size(151, 27)
self.textBox_net.TabIndex = 4
self.textBox_net.Text = ".*"
self.textBox_net.TextChanged += self.textBox_net_TextChanged
# label3
self.label3.AutoSize = True
self.label3.Location = Drawing.Point(13, 207)
self.label3.Name = "label3"
self.label3.Size = Drawing.Size(88, 19)
self.label3.TabIndex = 5
self.label3.Text = "Line Width:"
# textBox_linewidth
self.textBox_linewidth.Location = Drawing.Point(13, 229)
self.textBox_linewidth.Name = "textBox_linewidth"
self.textBox_linewidth.Size = Drawing.Size(151, 27)
self.textBox_linewidth.TabIndex = 6
# button_change
self.button_change.Font = Drawing.Font("Microsoft JhengHei UI", 12, Drawing.FontStyle.Bold, Drawing.GraphicsUnit.Point)
self.button_change.Location = Drawing.Point(13, 278)
self.button_change.Name = "button_change"
self.button_change.Size = Drawing.Size(151, 62)
self.button_change.TabIndex = 7
self.button_change.Text = "CHANGE"
self.button_change.UseVisualStyleBackColor = True
self.button_change.Click += self.button_change_Click
# label4
self.label4.AutoSize = True
self.label4.Location = Drawing.Point(174, 10)
self.label4.Name = "label4"
self.label4.Size = Drawing.Size(104, 19)
self.label4.TabIndex = 8
self.label4.Text = "Net Selection:"
# Form1
self.AutoScaleDimensions = Drawing.SizeF(9, 19)
self.AutoScaleMode = Forms.AutoScaleMode.Font
self.ClientSize = Drawing.Size(412, 353)
self.Controls.Add(self.label4)
self.Controls.Add(self.button_change)
self.Controls.Add(self.textBox_linewidth)
self.Controls.Add(self.label3)
self.Controls.Add(self.textBox_net)
self.Controls.Add(self.comboBox_layer)
self.Controls.Add(self.listBox_selection)
self.Controls.Add(self.label2)
self.Controls.Add(self.label1)
self.FormBorderStyle = Forms.FormBorderStyle.FixedSingle
self.MaximizeBox = False
self.MinimizeBox = False
self.MinimumSize = Drawing.Size(400, 400)
self.Name = "Form1"
self.Padding = Forms.Padding(10)
self.SizeGripStyle = Forms.SizeGripStyle.Show
self.StartPosition = Forms.FormStartPosition.CenterScreen
self.Text = "Line Width Editor"
self.TopMost = True
self.Load += self.Form1_Load
self.ResumeLayout(False)
self.PerformLayout()
def refreshListBox(self):
self.listBox_selection.Items.Clear()
for net in self.info[self.comboBox_layer.Text]:
if re.search(self.textBox_net.Text, net):
width = self.info[self.comboBox_layer.Text][net][0][1]
self.listBox_selection.Items.Add('{} - {}'.format(net, width))
def textBox_net_TextChanged(self, sender, e):
self.refreshListBox()
def label2_Click(self, sender, e):
pass
def listBox_selection_SelectedIndexChanged(self, sender, e):
pass
def comboBox_layer_SelectedIndexChanged(self, sender, e):
self.refreshListBox()
def button_change_Click(self, sender, e):
try:
new_width = self.textBox_linewidth.Text
for net_width in self.listBox_selection.SelectedItems:
net = net_width.split()[0]
for n, (line, width) in enumerate(self.info[self.comboBox_layer.Text][net]):
changeLineWidth(line, new_width)
self.info[self.comboBox_layer.Text][net][n] = (line, new_width)
self.refreshListBox()
except:
logging.exception('Error')
MessageBox.Show('Invalid Input!')
self.refreshListBox()
def Form1_Load(self, sender, e):
self.info = getLayerLineInfo()
for layer in self.info:
self.comboBox_layer.Items.Add(layer)
self.comboBox_layer.SelectedIndex = 0
if __name__ == '__main__':
form = MyForm()
form.ShowDialog()
form = MyForm()
form.Dispose()
AddWarningMessage('Good Bye!')
#form.Show()
#oDesktop.PauseScript()
| 38.375546 | 128 | 0.600819 | [
"MIT"
] | hefeifan98/HowtoSim_Script | 2021/changeLineWidth.py | 8,788 | Python |
from src.NN import NetWrapper
from src.games.Tictactoe import Tictactoe
from src.Player import *
from src.MCTS import MCTS
import yaml
with open("config.yaml", 'r') as f:
config = yaml.safe_load(f)
game = Tictactoe(**config['GAME'])
nn = NetWrapper(game, **config['NN'])
nn.load_model("models/the_bestest_of_models.pt")
nn1 = NetWrapper(game, **config['NN'])
nn1.load_model()
mcts = MCTS(**config['MCTS'])
play_game(game, p1 = AlphaZeroPlayer(nn1, mcts), p2 = HumanPlayer(), print_b = True)
#player_vs_player(game, p1 = AlphaZeroPlayer(nn, mcts), p2 = AlphaZeroPlayer(nn1, mcts), n_games = 100, treshold = 0.5, print_b = False)
| 30.619048 | 139 | 0.710731 | [
"MIT"
] | gemasphi/alpha-zero-torch | test.py | 643 | Python |
from decimal import Decimal
import setoptconf as soc
GOOD_NAMES = ("foo", "foo_bar", "foo123", "foo_bar_baz")
BAD_NAMES = ("_foo", "1foo", "FOO", "foo_", "foo__bar", "foo-bar")
def test_name():
for name in GOOD_NAMES:
yield check_good_name, name
for name in BAD_NAMES:
yield check_bad_name, name
def check_good_name(name):
setting = soc.StringSetting(name)
def check_bad_name(name):
try:
setting = soc.StringSetting(name)
except soc.NamingError:
pass
else:
assert False, "Invalid name allowed: %s" % name
def test_list_setting():
setting = soc.ListSetting("foo", soc.String)
assert setting.name == "foo"
setting.value = ["bar", "baz"]
assert setting.value == ["bar", "baz"]
def test_choice_setting():
setting = soc.ChoiceSetting("foo", ["bar", "baz"], soc.String)
assert setting.name == "foo"
setting.value = "baz"
assert setting.value == "baz"
| 19.612245 | 66 | 0.636837 | [
"MIT"
] | carlio/setoptconf-tmp | test/test_settings.py | 961 | Python |
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nndct_shared.base import NNDCT_CONSTANT, NNDCT_OP
from nndct_shared.nndct_graph import Tensor
from .code_template import CodeTemplate
class OpDescriptor(object):
@staticmethod
def input(ctx, node, output_str):
return "{} = args[{}]".format(output_str, int(node.name.split('_')[-1]))
@staticmethod
def rsub(ctx, node, output_str):
other = node.node_config('other')
if isinstance(other, Tensor):
other = ctx.get_output_tensor_name(other)
return "{output} = {other} - {input}".format(
output=output_str,
other=other,
input=ctx._to_list_str(ctx._get_module_input(node)))
@staticmethod
def strided_slice(ctx, node, output_str):
starts = node.node_config('start')
ends = node.node_config('end')
steps = node.node_config('step')
break_symbol = ':'
symbols = ""
start_symbol = []
end_symbol = []
step_symbol = []
for i in range(len(starts)):
start_symbol.append(ctx.infer_attr_value(starts[i]))
end_symbol.append(ctx.infer_attr_value(ends[i]))
step_symbol.append(ctx.infer_attr_value(steps[i]))
for i in range(len(starts)):
slice_symbol = break_symbol.join([start_symbol[i], end_symbol[i], step_symbol[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
# for i in range(len(starts)):
# start_symbol = str(starts[i]) if starts[i] > 0 else ''
# end_symbol = str(ends[i]) if ends[i] < NNDCT_CONSTANT.INT_MAX else ''
# step_symbol = ':' + str(steps[i]) if steps[i] > 1 else ''
# slice_symbol = start_symbol + break_symbol + end_symbol + step_symbol
# if i > 0:
# symbols += "," + slice_symbol
# else:
# symbols = slice_symbol
input_str = ctx.infer_attr_value(node.node_config('input'))
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str,
input_tensor=input_str,
symbols=symbols)
@staticmethod
def slice_tensor_inplace_copy(ctx, node, output_str):
slice_tensor, input = ctx._get_module_input(node)
dim = node.node_config('dim')
index = node.node_config('index')
symbols = str(index)
for i in range(dim):
symbols = ','.join([':', symbols])
return "{slice_tensor}[{symbols}] = {input_tensor}".format(
slice_tensor=slice_tensor, symbols=symbols, input_tensor=input)
@staticmethod
def _sequence(ctx, node, output_str):
inputs = node.op.get_config('input')
for idx, ip in enumerate(inputs):
if isinstance(ip, Tensor):
inputs[idx] = ctx.get_output_tensor_name(ip)
return "{output} = {op_name}([{inputs}])".format(
output=output_str,
op_name=node.op.type,
inputs=ctx._to_list_str(inputs))
@staticmethod
def list(ctx, node, output_str):
return OpDescriptor._sequence(ctx, node, output_str)
@staticmethod
def index(ctx, node, output_str):
indices = ""
for i, index in enumerate(node.node_config('index')):
if isinstance(index, Tensor):
symbol = ctx.get_output_tensor_name(index)
elif index is None:
symbol = ":"
if i > 0:
indices += "," + symbol
else:
indices = symbol
input = node.node_config('input')
input_tensor = ctx.get_output_tensor_name(input)
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str, input_tensor=input_tensor, symbols=indices)
@staticmethod
def strided_slice_inplace_copy(ctx, node, output_str):
destination = node.node_config('destination')
source = node.node_config('source')
starts = node.node_config('start')
ends = node.node_config('end')
steps = node.node_config('step')
break_symbol = ':'
symbols = ""
start_symbol = []
end_symbol = []
step_symbol = []
for i in range(len(starts)):
start_symbol.append(ctx.infer_attr_value(starts[i]))
end_symbol.append(ctx.infer_attr_value(ends[i]))
step_symbol.append(ctx.infer_attr_value(steps[i]))
for i in range(len(starts)):
if starts[i] == ends[i]:
slice_symbol = start_symbol[i]
else:
slice_symbol = break_symbol.join([start_symbol[i], end_symbol[i], step_symbol[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
destination_str = ctx.infer_attr_value(destination)
source_str = ctx.infer_attr_value(source)
return "{output}[{symbols}] = {input_tensor}".format(
output=destination_str,
input_tensor=source_str, symbols=symbols)
@staticmethod
def index_put_inplace(ctx, node, output_str):
# destination, _, source = ctx._get_module_input(node)
destination = node.node_config('input')
source = node.node_config('values')
indices = node.node_config('indices')
indices_symbol = ''
sep_symbol = ','
break_symbol = ':'
for i, index in enumerate(indices):
index = break_symbol if index is None else ctx.get_output_tensor_name(index)
if i > 0:
indices_symbol += sep_symbol + index
else:
indices_symbol = index
destination_str = ctx.infer_attr_value(destination)
source_str = ctx.infer_attr_value(source)
ctx.set_name_alias_for_output(output_str, destination_str)
return "{output}[{symbols}] = {input_tensor}".format(
output=destination_str,
input_tensor=source_str, symbols=indices_symbol)
#@staticmethod
#def loop(ctx, node, output_str):
# loop_pattern = None
# if node.node_config("is_while_loop"):
# raise NotImplementedError()
# else:
# loop_pattern = CodeTemplate("""$loop_outputs = $loop_vars
# for $iter_var in range(0, $max_trip_count):
# $block_inputs = $loop_outputs
# $body
# $loop_outputs = $body_ret
# """)
# loop_outputs = output_str
# loop_vars = node.node_config("initial_loop_vars")
# assert len(loop_vars) == len(ctx._get_module_output(node))
#
# def loop_var_to_str(var):
# if isinstance(var, list):
# start_str = '['
# end_str = ']'
# var_lst = []
# for ele in var:
# var_lst.append(loop_var_to_str(ele))
# return start_str + ",".join(var_lst) + end_str
# else:
# return ctx.get_output_tensor_name(var)
#
# loop_vars_str = ",".join([loop_var_to_str(var) for var in loop_vars])
#
# body_str = ""
# block_inputs_idx = 0
# iter_var_str = ''
# block_inputs = []
# max_trip_count = node.node_config("max_trip_count")
# if isinstance(max_trip_count, Tensor):
# max_trip_count = ctx.get_output_tensor_name(max_trip_count)
#
# for inner_node in node.blocks[0].nodes:
# if inner_node.op.type == NNDCT_OP.INPUT:
# output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
# if block_inputs_idx == 0:
# iter_var_str = output_str
# else:
# if isinstance(ctx._get_module_output(inner_node), list) and len(ctx._get_module_output(inner_node)) > 1:
# output_str = f"({output_str})"
# block_inputs.append(output_str)
# block_inputs_idx += 1
# else:
# forward_str, output_str = ctx._get_forward_str(inner_node)
# body_str += forward_str + '\n'
#
# block_inputs_str = ",".join(block_inputs)
#
# def get_ret_val_str(ret_val):
# if isinstance(ret_val, list):
# ret_val_str = ""
# head_str = "["
# tail_str = "]"
# for val in ret_val:
# ret_val_str += get_ret_val_str(val) + ","
# return head_str + ret_val_str + tail_str
# elif isinstance(ret_val, Tensor):
# return ctx.get_output_tensor_name(ret_val)
#
# body_ret_str = ",".join([get_ret_val_str(ret_val) for ret_val in node.blocks[0].return_struct[1:]])
#
# return loop_pattern.substitute(loop_outputs=loop_outputs,
# loop_vars=loop_vars_str,
# iter_var=iter_var_str,
# max_trip_count=max_trip_count,
# block_inputs=block_inputs_str,
# body=body_str,
# body_ret=body_ret_str)
@staticmethod
def loop(ctx, node, output_str):
loop_outputs = output_str
loop_vars = node.node_config("initial_loop_vars")
loop_vars_str = ctx.infer_attr_value(loop_vars[0] if len(loop_vars) == 1 else loop_vars)
assert len(loop_vars) == len(ctx._get_module_output(node))
init_condition_str = ctx.infer_attr_value(node.node_config("initial_condition"))
body_str = ""
block_inputs_idx = 0
iter_var_str = ''
block_inputs = []
iter_start_str = str(0)
max_trip_count = node.node_config("max_trip_count")
max_trip_count_str = ctx.infer_attr_value(max_trip_count)
for inner_node in node.blocks[0].nodes:
if inner_node.op.type == NNDCT_OP.INPUT:
output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
if block_inputs_idx == 0:
iter_var_str = output_str
else:
if isinstance(ctx._get_module_output(inner_node), list) and len(ctx._get_module_output(inner_node)) > 1:
output_str = f"({output_str})"
block_inputs.append(output_str)
block_inputs_idx += 1
elif inner_node.op.type == NNDCT_OP.DERIVE_LOOP_INDEX:
iter_start_str = str(inner_node.node_config("start"))
output_str = ctx._to_list_str(ctx._get_module_output(inner_node))
iter_var_str = output_str
else:
forward_str, output_str = ctx._get_forward_str(inner_node)
body_str += forward_str + '\n'
block_inputs_str = ",".join(block_inputs)
body_ret_str = ",".join([ctx.infer_attr_value(ret_val) for ret_val in node.blocks[0].return_struct[1:]])
iter_end_str = "+".join([max_trip_count_str, iter_start_str])
iter_conditon_str = ctx.infer_attr_value(node.blocks[0].return_struct[0])
loop_pattern = None
if node.node_config("is_while_loop"):
loop_pattern = CodeTemplate("""\
$loop_outputs = $loop_vars
condition = $initial_condition
while condition:
$block_inputs = $loop_outputs
$body
$loop_outputs = $body_ret
condition = $iter_condition
""")
return loop_pattern.substitute(loop_outputs=loop_outputs,
loop_vars=loop_vars_str,
initial_condition=init_condition_str,
block_inputs=block_inputs_str,
body = body_str,
body_ret = body_ret_str,
iter_condition=iter_conditon_str)
else:
loop_pattern = CodeTemplate("""\
$loop_outputs = $loop_vars
for $iter_var in range($iter_start, $iter_end):
$block_inputs = $loop_outputs
$body
$loop_outputs = $body_ret
""")
return loop_pattern.substitute(loop_outputs=loop_outputs,
loop_vars=loop_vars_str,
iter_var=iter_var_str,
iter_start=iter_start_str,
iter_end=iter_end_str,
block_inputs=block_inputs_str,
body=body_str,
body_ret=body_ret_str)
@staticmethod
def list_add(ctx, node, output_str):
inputs = node.node_config("input")
others = node.node_config("other")
input_str = ""
if isinstance(inputs, list):
input_str += "["
for inp in inputs:
input_str += ctx.get_output_tensor_name(inp)
input_str += "]"
else:
input_str += ctx.get_output_tensor_name(inputs)
others_str = ""
if isinstance(others, list):
others_str += "["
for other in others:
others_str += ctx.get_output_tensor_name(other)
others_str += "]"
else:
others_str += ctx.get_output_tensor_name(others)
return f"{output_str} = {input_str} + {others_str}"
@staticmethod
def floor_div(ctx, node, output_str):
inputs = node.node_config("input")
others = node.node_config("other")
return f"{output_str} = {ctx.get_output_tensor_name(inputs)} // {ctx.get_output_tensor_name(others)}"
@staticmethod
def sequence_unpack(ctx, node, output_str):
if len(node.out_tensors) == 1:
return f"{output_str}, = {ctx._to_list_str(ctx._get_module_input(node))}"
else:
return f"{output_str} = {ctx._to_list_str(ctx._get_module_input(node))}"
@staticmethod
def slice(ctx, node, output_str):
start = node.node_config('start')
end = node.node_config('end')
step = node.node_config('step')
dim = node.node_config('dim')
break_symbol = ':'
symbols = ""
starts = []
ends = []
steps = []
for i in range(dim + 1):
if i != dim:
starts.append(str(0))
ends.append(str(NNDCT_CONSTANT.INT_MAX))
steps.append(str(1))
else:
starts.append(ctx.infer_attr_value(start))
ends.append(ctx.infer_attr_value(end))
steps.append(ctx.infer_attr_value(step))
for i in range(dim + 1):
slice_symbol = break_symbol.join([starts[i], ends[i], steps[i]])
if i > 0:
symbols += "," + slice_symbol
else:
symbols = slice_symbol
input_str = ctx.infer_attr_value(node.node_config("input"))
return "{output} = {input_tensor}[{symbols}]".format(
output=output_str,
input_tensor=input_str,
symbols=symbols)
@staticmethod
def length(ctx, node, output_str):
return "{output} = len({input})".format(output=output_str, input=ctx._to_list_str(ctx._get_module_input(node)))
@staticmethod
def If(ctx, node, output_str):
if_pattern = CodeTemplate("""\
if ($condition):
$block_0_body
$if_out = $ret_0
else:
$block_1_body
$if_out = $ret_1
""")
if_out_str = output_str
condition_str = ctx.infer_attr_value(node.node_config("condition"))
assert len(node.blocks) == 2
blocks = [""] * 2
block_ret = [""] * 2
for i, block in enumerate(node.blocks):
for inner_node in block.nodes:
forward_str, output_str = ctx._get_forward_str(inner_node)
blocks[i] += forward_str + '\n'
block_ret[i] = ",".join([ctx.infer_attr_value(ret_val) for ret_val in block.return_struct])
block_0_body, block_1_body = blocks
ret_0_str, ret_1_str = block_ret
return if_pattern.substitute(condition=condition_str,
block_0_body=block_0_body,
block_1_body=block_1_body,
if_out=if_out_str,
ret_0=ret_0_str,
ret_1=ret_1_str
)
@staticmethod
def lt(ctx, node, output_str):
input_str = ctx.infer_attr_value(node.node_config("input"))
other_str = ctx.infer_attr_value(node.node_config("other"))
return "{output} = {input} < {other}".format(output=output_str, input=input_str, other=other_str)
@staticmethod
def eq(ctx, node, output_str):
input_str = ctx.infer_attr_value(node.node_config("input"))
other_str = ctx.infer_attr_value(node.node_config("other"))
return "{output} = {input} == {other}".format(output=output_str, input=input_str, other=other_str)
@staticmethod
def default(ctx, node, output_str):
return "{output} = {op_name}({inputs})".format(
output=output_str,
op_name=node.op.type,
inputs=ctx._to_list_str(ctx._get_module_input(node)))
MISC_OP_DISCR_MAP = {
NNDCT_OP.INPUT: OpDescriptor.input,
NNDCT_OP.RSUB: OpDescriptor.rsub,
NNDCT_OP.STRIDED_SLICE: OpDescriptor.strided_slice,
NNDCT_OP.SLICE_TENSOR_INPLACE_COPY: OpDescriptor.slice_tensor_inplace_copy,
NNDCT_OP.INDEX: OpDescriptor.index,
NNDCT_OP.INT: OpDescriptor.default,
NNDCT_OP.STRIDED_SLICE_INPLACE_COPY: OpDescriptor.strided_slice_inplace_copy,
NNDCT_OP.INDEX_INPUT_INPLACE: OpDescriptor.index_put_inplace,
NNDCT_OP.LOOP: OpDescriptor.loop,
NNDCT_OP.LIST_ADD: OpDescriptor.list_add,
NNDCT_OP.FLOOR_DIV: OpDescriptor.floor_div,
NNDCT_OP.TUPLE_UNPACK: OpDescriptor.sequence_unpack,
NNDCT_OP.SLICE: OpDescriptor.slice,
NNDCT_OP.LENGTH: OpDescriptor.length,
NNDCT_OP.IF: OpDescriptor.If,
NNDCT_OP.SCALAR_LESS_THAN: OpDescriptor.lt,
NNDCT_OP.SCALAR_EQUAL: OpDescriptor.eq
}
| 35.467213 | 115 | 0.633175 | [
"Apache-2.0"
] | bryanloz-xilinx/Vitis-AI | tools/RNN/rnn_quantizer/pytorch_binding/pytorch_nndct/export/op_descriptor.py | 17,308 | Python |
# -*- coding: utf-8 -*-
import os
import tempfile
import pytest
@pytest.fixture(scope="module")
def master(request, salt_factories):
return salt_factories.spawn_master(request, "master-1")
@pytest.fixture(scope="module")
def minion(request, salt_factories, master):
return salt_factories.spawn_minion(request, "minion-1", master_id="master-1")
@pytest.fixture
def minion_3(request, salt_factories, master):
return salt_factories.spawn_minion(request, "minion-3", master_id="master-1")
@pytest.fixture
def salt_run(salt_factories, master):
return salt_factories.get_salt_run_cli(master.config["id"])
@pytest.fixture
def salt_cp(salt_factories, master):
return salt_factories.get_salt_cp_cli(master.config["id"])
@pytest.fixture
def salt_key(salt_factories, master):
return salt_factories.get_salt_key_cli(master.config["id"])
def test_master(master):
assert master.is_alive()
def test_salt_run(master, salt_run):
max_open_files_config_value = master.config["max_open_files"]
ret = salt_run.run("config.get", "max_open_files")
assert ret.exitcode == 0, ret
assert ret.json == max_open_files_config_value
def test_salt_cp(master, minion, salt_cp, tempfiles):
"""
Test copying a file from the master to the minion
"""
tfile = tempfile.NamedTemporaryFile(delete=True)
tfile.close()
dest = tfile.name
try:
contents = "id: foo"
sls = tempfiles.makeslsfile(contents)
assert master.is_alive()
assert minion.is_alive()
ret = salt_cp.run("minion-1", sls, dest)
assert ret.exitcode == 0, ret
assert ret.json == {"minion-1": {dest: True}}, ret
assert os.path.exists(dest)
with open(dest) as rfh:
assert rfh.read() == contents
finally:
if os.path.exists(dest):
os.unlink(dest)
tfile = tempfile.NamedTemporaryFile(delete=True)
tfile.close()
dest = tfile.name
try:
contents = "id: foo"
sls = tempfiles.makeslsfile(contents)
assert master.is_alive()
assert minion.is_alive()
ret = salt_cp.run(sls, dest, minion_tgt="minion-1")
assert ret.exitcode == 0, ret
assert ret.json == {dest: True}, ret
assert os.path.exists(dest)
with open(dest) as rfh:
assert rfh.read() == contents
finally:
if os.path.exists(dest):
os.unlink(dest)
def test_salt_cp_no_match(master, minion, salt_cp, tempfiles):
assert master.is_alive()
assert minion.is_alive()
tfile = tempfile.NamedTemporaryFile(delete=True)
tfile.close()
dest = tfile.name
try:
contents = "id: foo"
sls = tempfiles.makeslsfile(contents)
assert master.is_alive()
assert minion.is_alive()
ret = salt_cp.run(sls, dest, minion_tgt="minion-2")
assert ret.exitcode == 0, ret
assert not ret.json, ret
assert not os.path.exists(dest)
finally:
if os.path.exists(dest):
os.unlink(dest)
def test_salt_key(master, minion, minion_3, salt_key):
ret = salt_key.run("--list-all")
assert ret.exitcode == 0, ret
assert ret.json == {
"minions": ["minion-1", "minion-3"],
"minions_pre": [],
"minions_denied": [],
"minions_rejected": [],
}, ret
| 27.858333 | 81 | 0.647921 | [
"Apache-2.0"
] | cmcmarrow/pytest-salt-factories | tests/integration/factories/master/test_master.py | 3,343 | Python |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from os import listdir
from os.path import isfile, join
import numpy as np
import soundfile as sf
from scipy import io
import scipy.signal as sp
from src.features import gtgram
ROOT = Path(__file__).resolve().parents[2]
# set the path to the sound files
SOUND_FILES = ROOT / 'data/raw/sound_samples/'
# create a list of the sound files
SOUND_FILES = list(SOUND_FILES.glob('**/*.wav'))
# Define up to which frequency the data should be generated
def create_data(freq_bands=24, participant_number=19, snr=0.2, normalize=False, azimuth=12, time_window=0.1, max_freq=20000):
str_r = 'data/processed_' + str(max_freq) + 'Hz/binaural_right_0_gammatone_' + str(time_window) + '_window_{0:03d}'.format(participant_number) + '_cipic_' + str(
int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm.npy'
str_l = 'data/processed_' + str(max_freq) + 'Hz/binaural_left_0_gammatone_' + str(time_window) + '_window_{0:03d}'.format(participant_number) + '_cipic_' + str(
int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm.npy'
path_data_r = ROOT / str_r
path_data_l = ROOT / str_l
# check if we can load the data from a file
if path_data_r.is_file() and path_data_l.is_file():
print('Data set found. Loading from file : ' + str_r)
return np.load(path_data_r), np.load(path_data_l)
else:
print('Creating data set : ' + str_l)
# read the HRIR data
hrtf_path = (
ROOT / 'data/raw/hrtfs/hrir_{0:03d}.mat'.format(participant_number)).resolve()
hrir_mat = io.loadmat(hrtf_path.as_posix())
# get the data for the left ear
hrir_l = hrir_mat['hrir_l']
# get the data for the right ear
hrir_r = hrir_mat['hrir_r']
# use always all elevations -> 50
psd_all_i = np.zeros((len(SOUND_FILES), 50, freq_bands))
psd_all_c = np.zeros((len(SOUND_FILES), 50, freq_bands))
# temporal_means = np.zeros((hrir_elevs.shape[0],87))
for i in range(psd_all_i.shape[0]):
for i_elevs in range(psd_all_i.shape[1]):
# read the hrir for a specific location
hrir_elevs = np.squeeze(hrir_l[azimuth, i_elevs, :])
# load a sound sample
signal = sf.read(SOUND_FILES[i].as_posix())[0]
# add noise to the signal
signal_elevs = (1 - snr) * sp.lfilter(hrir_elevs, 1, signal) + \
snr * (signal + np.random.random(signal.shape[0]) * snr)
###### TAKE THE ENTIRE SIGNAL #######
# window_means = get_spectrum(signal_elevs,nperseg=welch_nperseg)
#####################################
# read the hrir for a specific location
hrir_elevs = np.squeeze(hrir_r[azimuth, i_elevs, :])
# add noise to the signal
signal_elevs_c = (1 - snr) * sp.lfilter(hrir_elevs, 1, signal) + \
snr * (signal + np.random.random(signal.shape[0]) * snr)
# Default gammatone-based spectrogram parameters
twin = time_window
thop = twin / 2
fmin = 20
fs = 44100
###### Apply Gammatone Filter Bank ##############
y = gtgram.gtgram(signal_elevs, fs, twin,
thop, freq_bands, fmin, max_freq)
y = (20 * np.log10(y + 1))
window_means = np.mean(y, axis=1)
psd_all_i[i, i_elevs, :] = window_means
y = gtgram.gtgram(signal_elevs_c, fs,
twin, thop, freq_bands, fmin, max_freq)
y = (20 * np.log10(y + 1))
window_means = np.mean(y, axis=1)
psd_all_c[i, i_elevs, :] = window_means
#################################################
np.save(path_data_r.absolute(), psd_all_c)
np.save(path_data_l.absolute(), psd_all_i)
return psd_all_c, psd_all_i
def main():
""" This script creates HRTF filtered sound samples of the sounds given in the folder SOUND_FILES.
This is done for each participant's HRTF specified in participant_numbers.
ALL ELEVATIONS (50) are taken to filter the data.
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
########################################################################
######################## Set parameters ################################
########################################################################
normalize = False # paramter is not considered
time_window = 0.1 # time window for spectrogram in sec
# Parameter to test
snrs = np.arange(0, 1.1, 0.1) # Signal to noise ratio
# snrs = np.array([0.2]) # Signal to noise ratio
# snrs = np.array([0.2]) # Signal to noise ratio
# freq_bandss = np.array([32, 64, 128]) # Frequency bands in resulting data
freq_bandss = np.array([128]) # Frequency bands in resulting data
# azimuths = np.arange(0, 25, 1) # which azimuths to create
azimuths = np.array([12]) # which azimuths to create
participant_numbers = np.array([1, 2, 3, 8, 9, 10, 11,
12, 15, 17, 18, 19, 20,
21, 27, 28, 33, 40, 44,
48, 50, 51, 58, 59, 60,
61, 65, 119, 124, 126,
127, 131, 133, 134, 135,
137, 147, 148, 152, 153,
154, 155, 156, 158, 162,
163, 165])
# define max frequency for gammatone filter bank
max_freqs = np.array([16000, 20000])
# participant_numbers = participant_numbers[::-1]
# snrs = snrs[::-1]
# freq_bandss = freq_bandss[::-1]
########################################################################
########################################################################
# walk over all parameter combinations
for _, participant_number in enumerate(participant_numbers):
for _, snr in enumerate(snrs):
for _, freq_bands in enumerate(freq_bandss):
for _, azimuth in enumerate(azimuths):
for _, max_freq in enumerate(max_freqs):
psd_all_c, psd_all_i = create_data(freq_bands, participant_number, snr, normalize, azimuth, time_window, max_freq=max_freq)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| 43.968354 | 165 | 0.539513 | [
"MIT"
] | oesst/HRTF_neural_model | src/data/generateData.py | 6,947 | Python |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_cancel_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_start_os_upgrade_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_start_extension_upgrade_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
**kwargs
)
def build_get_latest_request(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class VirtualMachineScaleSetRollingUpgradesOperations(object):
"""VirtualMachineScaleSetRollingUpgradesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _cancel_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
request = build_cancel_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._cancel_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_cancel_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel"} # type: ignore
@distributed_trace
def begin_cancel( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Cancels the current virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._cancel_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel"} # type: ignore
def _start_os_upgrade_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
request = build_start_os_upgrade_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_os_upgrade_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_os_upgrade_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade"} # type: ignore
@distributed_trace
def begin_start_os_upgrade( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Starts a rolling upgrade to move all virtual machine scale set instances to the latest
available Platform Image OS version. Instances which are already running the latest available
OS version are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_os_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_os_upgrade.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade"} # type: ignore
def _start_extension_upgrade_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
request = build_start_extension_upgrade_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_extension_upgrade_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_extension_upgrade_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade"} # type: ignore
@distributed_trace
def begin_start_extension_upgrade( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Starts a rolling upgrade to move all extensions for all virtual machine scale set instances to
the latest available extension version. Instances which are already running the latest
extension versions are not affected.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_extension_upgrade_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_extension_upgrade.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade"} # type: ignore
@distributed_trace
def get_latest(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> "_models.RollingUpgradeStatusInfo":
"""Gets the status of the latest virtual machine scale set rolling upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RollingUpgradeStatusInfo, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.RollingUpgradeStatusInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RollingUpgradeStatusInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
request = build_get_latest_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_latest.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RollingUpgradeStatusInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_latest.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest"} # type: ignore
| 44.154545 | 234 | 0.680255 | [
"MIT"
] | AikoBB/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/operations/_virtual_machine_scale_set_rolling_upgrades_operations.py | 24,285 | Python |
# MIT License
# Copyright (c) 2022 Muhammed
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Telegram Link : https://telegram.dog/Mo_Tech_Group
# Repo Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot
# License Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot/blob/LuciferMoringstar-Robot/LICENSE
from pyrogram import Client as lucifermoringstar_robot , filters, enums
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from LuciferMoringstar_Robot import ADMINS, CREATOR_USERNAME
@lucifermoringstar_robot.on_message((filters.group | filters.private) & filters.command('leave') & filters.user(ADMINS))
async def leave_bot(bot, update):
if len(update.command) == 1:
return await update.reply_text("𝙶𝙸𝚅𝙴 𝙼𝙴 𝙰 𝙶𝚁𝙾𝚄𝙿 𝙸𝙳")
chat = update.command[1]
try:
chat = int(chat)
except:
chat = chat
try:
pr0fess0r_99 = [[ InlineKeyboardButton('𝙶𝙴𝚃 𝚂𝚄𝙿𝙿𝙾𝚁𝚃', url=f'https://t.me/{CREATOR_USERNAME}') ]]
pr0fess0r_99 = InlineKeyboardMarkup(pr0fess0r_99)
await bot.send_message(chat_id=chat, text="𝙷𝙴𝙻𝙻𝙾 𝙵𝚁𝙸𝙴𝙽𝙳𝚂,\n𝙼𝚈 𝙼𝙰𝚂𝚃𝙴𝚁 𝙷𝙰𝚂 𝚃𝙾𝙻𝙳 𝙼𝙴 𝚃𝙾 𝙻𝙴𝙰𝚅𝙴 𝙵𝚁𝙾𝙼 𝙶𝚁𝙾𝚄𝙿. 𝚂𝙾 𝙸 𝙶𝙾 😛. 𝙸𝙵 𝚈𝙾𝚄 𝚆𝙰𝙽𝙽𝙰 𝙰𝙳𝙳 𝙼𝙴 𝙰𝙶𝙰𝙸𝙽 𝙲𝙾𝙽𝚃𝙰𝙲𝚃 𝙼𝙴", reply_markup=pr0fess0r_99)
await bot.leave_chat(chat)
await update.reply(f"𝙻𝙴𝙵𝚃 𝚃𝙷𝙴 𝙲𝙷𝙰𝚃 `{chat}`")
except Exception as e:
await update.reply(f'𝙴𝚁𝚁𝙾𝚁 - {e}')
| 50 | 186 | 0.750833 | [
"MIT"
] | PR0FESS0R-99/LuciferMoringstar-Robot | LuciferMoringstar_Robot/admins/chat.py | 2,760 | Python |
The next greater element of some element x in an array is the first greater element that is to the right of x in the same array.
You are given two distinct 0-indexed integer arrays nums1 and nums2, where nums1 is a subset of nums2.
For each 0 <= i < nums1.length, find the index j such that nums1[i] == nums2[j] and determine the next greater element of nums2[j] in nums2. If there is no next greater element, then the answer for this query is -1.
Return an array ans of length nums1.length such that ans[i] is the next greater element as described above.
Example 1:
Input: nums1 = [4,1,2], nums2 = [1,3,4,2]
Output: [-1,3,-1]
Explanation: The next greater element for each value of nums1 is as follows:
- 4 is underlined in nums2 = [1,3,4,2]. There is no next greater element, so the answer is -1.
- 1 is underlined in nums2 = [1,3,4,2]. The next greater element is 3.
- 2 is underlined in nums2 = [1,3,4,2]. There is no next greater element, so the answer is -1.
Example 2:
Input: nums1 = [2,4], nums2 = [1,2,3,4]
Output: [3,-1]
Explanation: The next greater element for each value of nums1 is as follows:
- 2 is underlined in nums2 = [1,2,3,4]. The next greater element is 3.
- 4 is underlined in nums2 = [1,2,3,4]. There is no next greater element, so the answer is -1.
Constraints:
1 <= nums1.length <= nums2.length <= 1000
0 <= nums1[i], nums2[i] <= 104
All integers in nums1 and nums2 are unique.
All the integers of nums1 also appear in nums2.
Solution:
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
ans = defaultdict(lambda: -1)
stack = []
for i in range(len(nums2)):
while stack and stack[-1] < nums2[i]:
ans[stack.pop()] = nums2[i]
stack.append(nums2[i])
for i in range(len(nums1)):
nums1[i] = ans[nums1[i]]
return nums1
| 39.5 | 215 | 0.670359 | [
"MIT"
] | vijay2020pc/100-days-of-code | leetcode-CP/Problem solving/496. Next Greater Element I.py | 1,896 | Python |
#!/usr/bin/env python
import common
import json
import docker_utils
nginx_sites_available = '/etc/nginx/sites-available'
CERT_DIR = '/root/certs'
import subprocess
def create_certificates(domains):
format_args = {'cert_dir': CERT_DIR}
import os.path
if not os.path.isfile(os.path.join(CERT_DIR, 'acmeCA.key.deleteme')):
commands = """openssl rsa -in %(cert_dir)s/acmeCA.key -out %(cert_dir)s/acmeCA.key.deleteme""" % format_args
for command in [cmd for cmd in commands.split("\n") if cmd]:
subprocess.call([arg for arg in command.split(" ") if arg])
for domain in domains:
create_certificate(domain)
def create_certificate(domain):
format_args = {'domain': domain,
'cert_dir': CERT_DIR}
import os.path
if os.path.isfile('%(cert_dir)s/%(domain)s.key' % format_args):
return
commands = """
openssl genrsa -out %(cert_dir)s/%(domain)s.key 2048
openssl req -new -key %(cert_dir)s/%(domain)s.key -out %(cert_dir)s/%(domain)s.csr -subj /C=DE/ST=Niedersachsen/L=Osnabrueck/O=OPS/CN=%(domain)s
openssl x509 -req -in %(cert_dir)s/%(domain)s.csr -CA %(cert_dir)s/acmeCA.pem -CAkey %(cert_dir)s/acmeCA.key.deleteme -CAcreateserial -out %(cert_dir)s/%(domain)s.crt -days 500
rm %(cert_dir)s/%(domain)s.csr
""" % format_args
for command in [cmd for cmd in commands.split("\n") if cmd]:
print command.split(" ")
subprocess.call([arg for arg in command.split(" ") if arg])
# create_certificates([host.domains[0] for host in common.get_vhost_config()])
def update_vhosts_config(applications):
jsonFile = open('/root/config/nginx_vhosts.json', "r")
data = json.load(jsonFile)
jsonFile.close()
for app in applications:
docker_container_config = docker_utils.get_config(app.docker_container_name)
vhost_config = data[app.vhost_name]
vhost_config['port'] = docker_container_config.port if not app.docker_container_port else app.docker_container_port
vhost_config['ip_addr'] = docker_container_config.ip_addr
jsonFile = open('/root/config/nginx_vhosts.json', "w+")
jsonFile.write(json.dumps(data, indent=4, sort_keys=True))
jsonFile.close()
def update_vhosts(vhosts):
for vhost in vhosts:
host = vhost.host
port = vhost.port
ip_addr = vhost.ip_addr
domains = vhost.domains
flags = vhost.flags
location_tmpl = """
location %(path)s {
proxy_pass http://upstream_%(upstream)s%(upstream_path)s;
proxy_http_version 1.1;
%(redirect_rule)s
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Host %(host)s;
%(set_script_name)s
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port $server_port;
%(misc)s
}
"""
location_tmpl_params = {
'redirect_rule': 'proxy_redirect off;' if flags.get('disableRedirect') else ''
}
def render_location(location_dict):
location_dict['host'] = location_dict.get('host', '$host')
location_dict['set_script_name'] = location_dict.get('set_script_name', '')
location_dict['misc'] = location_dict.get('misc', '')
location_dict['upstream_path'] = location_dict.get('upstream_path', '')
params = dict(location_dict.items()+ location_tmpl_params.items())
# print params
return location_tmpl % params
location_parameters = { 'upstream': domains[0], 'path': '/', 'host': flags.get('forceHost', '$host'),
'upstream_path': flags.get('upstream_path', '')}
if 'htpasswd_file' in flags:
location_parameters['misc'] = 'auth_basic "Restricted"; auth_basic_user_file %s;' % (flags['htpasswd_file'])
if 'location_extra' in flags:
location_parameters['misc'] = location_parameters['misc'] if 'misc' in location_parameters else ''
location_parameters['misc'] += flags['location_extra']
location = render_location(location_parameters)
location_ssl = location
upstreams = [{
'local_port': port,
'local_address': ip_addr,
'name': domains[0]
}]
if flags.get('sslToPort'):
upstream_name = "%s_ssl " % domains[0]
location_ssl = render_location({ 'upstream': upstream_name, 'path': '/', 'host': flags.get('forceHost', '$host')})
upstreams.append({
'local_port': flags.get('sslToPort'),
'local_address': ip_addr,
'name': upstream_name
})
if flags.get('httpsToHttpPaths'):
for path in flags.get('httpsToHttpPaths').split(','):
location_ssl += "\n" + render_location({ 'upstream': domains[0], 'path': '/%s' % path, 'host': flags.get('forceHost', '$host') })
other_locations = [{ 'upstream': domains[0], 'path': '@failover', 'host': flags.get('forceHost', '$host')}]
other_locations_https = []
path_idx = 0
for path, path_config in vhost.paths.items():
upstream_name = "%s_%s " % (domains[0], path_idx)
upstreams.append({
'local_port': path_config['port'],
'local_address': vm_map[path_config['host']]['local_address'],
'name': upstream_name
})
if path_config['secure']:
other_locations_https.append({ 'upstream': upstream_name, 'path': '/%s' % path,
'misc': '''
''',
'set_script_name': ('proxy_set_header SCRIPT_NAME /%s;' % path.rstrip('/')) if path_config.get('setScriptName') else '',
'host': flags.get('forceHost', '$host')})
else:
other_locations.append({ 'upstream': upstream_name, 'path': '/%s' % path,
'misc': '''
error_page 500 = @failover;
proxy_intercept_errors on;
''',
'set_script_name': ('proxy_set_header SCRIPT_NAME /%s;' % path.rstrip('/')) if path_config.get('setScriptName') else '',
'host': flags.get('forceHost', '$host')})
path_idx += 1
upstream_tmpl = 'upstream upstream_%(name)s { server %(local_address)s:%(local_port)s; }'
rewrites = ''
extra_directives = ''
if flags.get('block_robots'):
extra_directives += '''
location = /robots.txt {
alias /var/www/robots_deny.txt;
}
'''
if flags.get('allow_robots'):
extra_directives += '''
location = /robots.txt {
alias /var/www/robots_allow.txt;
}
'''
if 'server_config_extra' in flags:
extra_directives += flags['server_config_extra']
if flags.get('aliases'):
aliases = flags.get('aliases').split("\n")
for alias in aliases:
extra_directives += '''
location /%s {
alias %s;
}
''' % tuple(alias.strip().split('->'))
if vhost.rewrites:
rewrites += vhost.rewrites
location_http = location if flags.get('allow_http') else 'return 301 https://$host$request_uri;'
if flags.get('httpPaths'):
for path in flags.get('httpPaths').split(','):
location_http = "\n" + render_location({ 'upstream': domains[0], 'path': '/%s' % path, 'host': flags.get('forceHost', '$host') }) + "\n" + ''' location / { return 301 https://$host$request_uri; }
'''
format_args = {
'upstreams': "\n".join([upstream_tmpl % up for up in upstreams]),
'public_port': port,
'other_locations': "\n".join([render_location(location_dict) for location_dict in other_locations]),
'other_locations_https': "\n".join([render_location(location_dict) for location_dict in other_locations_https]),
'extra_directives': extra_directives,
'domain': domains[0],
'server_names': ' '.join(domains) if not flags.get('rewriteDomains') else domains[0],
'location': location_ssl,
'rewrites': rewrites,
'upload_limit': flags.get('uploadLimit', '20M'),
'location_http': location_http,
'cert_dir': CERT_DIR}
config = """
%(upstreams)s
server {
listen 80;
server_name %(server_names)s;
client_max_body_size %(upload_limit)s;
%(rewrites)s
%(location_http)s
%(other_locations)s
%(extra_directives)s
}
""" % format_args
if not flags.get('noSsl'):
config += """
server {
listen 443 ssl;
server_name %(server_names)s;
client_max_body_size %(upload_limit)s;
ssl on;
ssl_certificate %(cert_dir)s/%(domain)s.cer;
ssl_certificate_key %(cert_dir)s/%(domain)s.key;
ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH:!CAMELLIA;
ssl_protocols TLSv1.2 TLSv1.1 TLSv1;
ssl_prefer_server_ciphers on;
%(location)s
%(other_locations_https)s
%(extra_directives)s
}
""" % format_args
if flags.get('rewriteDomains'):
for domain in domains[1:]:
config += """
server {
listen 80;
server_name %(domain1)s;
return 301 http://%(domain2)s$request_uri;
}
""" % {'domain1': domain, 'domain2': domains[0]}
f = open('%s/%s' % (nginx_sites_available, domains[0]), 'w')
f.write(config)
f.close()
'''
proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
'''
update_vhosts_config(common.get_applications_config())
update_vhosts(common.get_vhost_config())
| 38.708185 | 341 | 0.558886 | [
"Unlicense"
] | bcoding/docker-host-scripts | py/update_nginx_vhosts.py | 10,877 | Python |
# -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <[email protected]>
# Copyright (C) 2001-2007 Fernando Perez. <[email protected]>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import abc
import ast
import atexit
import builtins as builtin_mod
import dis
import functools
import inspect
import os
import re
import runpy
import subprocess
import sys
import tempfile
import traceback
import types
import warnings
from ast import stmt
from io import open as io_open
from logging import error
from pathlib import Path
from typing import Callable
from typing import List as ListType
from typing import Optional, Tuple
from warnings import warn
from pickleshare import PickleShareDB
from tempfile import TemporaryDirectory
from traitlets import (
Any,
Bool,
CaselessStrEnum,
Dict,
Enum,
Instance,
Integer,
List,
Type,
Unicode,
default,
observe,
validate,
)
from traitlets.config.configurable import SingletonConfigurable
from traitlets.utils.importstring import import_item
import IPython.core.hooks
from IPython.core import magic, oinspect, page, prefilter, ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import InterruptiblePdb
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.events import EventManager, available_events
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.usage import default_banner
from IPython.display import display
from IPython.paths import get_ipython_dir
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize, io, openpy, py3compat
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.utils.path import ensure_dir_exists, get_home_dir, get_py_filename
from IPython.utils.process import getoutput, system
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import DollarFormatter, LSString, SList, format_screen
sphinxify: Optional[Callable]
try:
import docrepr.sphinxify as sphx
def sphinxify(oinfo):
wrapped_docstring = sphx.wrap_main_docstring(oinfo)
def sphinxify_docstring(docstring):
with TemporaryDirectory() as dirname:
return {
"text/html": sphx.sphinxify(wrapped_docstring, dirname),
"text/plain": docstring,
}
return sphinxify_docstring
except ImportError:
sphinxify = None
class ProvisionalWarning(DeprecationWarning):
"""
Warning class for unstable features
"""
pass
from ast import Module
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
#-----------------------------------------------------------------------------
# Await Helpers
#-----------------------------------------------------------------------------
# we still need to run things using the asyncio eventloop, but there is no
# async integration
from .async_helpers import (
_asyncio_runner,
_curio_runner,
_pseudo_sync_runner,
_should_be_async,
_trio_runner,
)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def no_op(*a, **kw):
pass
class SpaceInInput(Exception): pass
class SeparateUnicode(Unicode):
r"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
"""
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
@undoc
class DummyMod(object):
"""A dummy module used for IPython's interactive module when
a namespace must be assigned to the module's __dict__."""
__spec__ = None
class ExecutionInfo(object):
"""The arguments used for a call to :meth:`InteractiveShell.run_cell`
Stores information about what is going to happen.
"""
raw_cell = None
store_history = False
silent = False
shell_futures = True
def __init__(self, raw_cell, store_history, silent, shell_futures):
self.raw_cell = raw_cell
self.store_history = store_history
self.silent = silent
self.shell_futures = shell_futures
def __repr__(self):
name = self.__class__.__qualname__
raw_cell = ((self.raw_cell[:50] + '..')
if len(self.raw_cell) > 50 else self.raw_cell)
return '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s>' %\
(name, id(self), raw_cell, self.store_history, self.silent, self.shell_futures)
class ExecutionResult(object):
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count = None
error_before_exec = None
error_in_exec: Optional[BaseException] = None
info = None
result = None
def __init__(self, info):
self.info = info
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
def __repr__(self):
name = self.__class__.__qualname__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
ast_transformers = List([], help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
def _default_loop_runner(self):
return import_item("IPython.core.interactiveshell._asyncio_runner")
@validate('loop_runner')
def _import_runner(self, proposal):
if isinstance(proposal.value, str):
if proposal.value in self.loop_runner_map:
runner, autoawait = self.loop_runner_map[proposal.value]
self.autoawait = autoawait
return runner
runner = import_item(proposal.value)
if not callable(runner):
raise ValueError('loop_runner must be callable')
return runner
if not callable(proposal.value):
raise ValueError('loop_runner must be callable')
return proposal.value
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(1000, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
).tag(config=True)
color_info = Bool(True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
).tag(config=True)
debug = Bool(False).tag(config=True)
disable_failing_post_execute = Bool(False,
help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
compiler_class = Type(CachingCompiler)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
def _sphinxify_docstring_changed(self, change):
if change['new']:
warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
def _enable_html_pager_changed(self, change):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
def input_transformers_cleanup(self):
return self.input_transformer_manager.cleanup_transforms
input_transformers_post = List([],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
@property
def input_splitter(self):
"""Make this available for backward compatibility (pre-7.0 release) with existing code.
For example, ipykernel ipykernel currently uses
`shell.input_splitter.check_complete`
"""
from warnings import warn
warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
DeprecationWarning, stacklevel=2
)
return self.input_transformer_manager
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
default_value='Context',
help="Switch modes for the IPython exception handlers."
).tag(config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(**kwargs)
if 'PromptManager' in self.config:
warn('As of IPython 5.0 `PromptManager` config will have no effect'
' and has been replaced by TerminalInteractiveShell.prompts_class')
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
self.init_user_ns()
self.init_logger()
self.init_builtins()
# The following was in post_config_initialization
self.init_inspector()
self.raw_input_original = input
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
# The trio runner is used for running Trio in the foreground thread. It
# is different from `_trio_runner(async_fn)` in `async_helpers.py`
# which calls `trio.run()` for every cell. This runner runs all cells
# inside a single Trio event loop. If used, it is set from
# `ipykernel.kernelapp`.
self.trio_runner = None
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
def set_autoindent(self,value=None):
"""Set the autoindent flag.
If called with no arguments, it acts as a toggle."""
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
def set_trio_runner(self, tr):
self.trio_runner = tr
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir = ProfileDir.create_profile_dir_by_name(
self.ipython_dir, "default"
)
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = self.compiler_class()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
# The files here are stored with Path from Pathlib
self.tempfiles = []
self.tempdirs = []
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = os.getcwd()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
@observe('colors')
def init_syntax_highlighting(self, changes=None):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser(style=self.colors, parent=self).format
self.pycolorize = lambda src: pyformat(src,'str')
def refresh_style(self):
# No-op here, used in subclass
pass
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
self.builtin_trap = BuiltinTrap(shell=self)
@observe('colors')
def init_inspector(self, changes=None):
# Object inspector
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
self.colors,
self.object_info_string_level)
def init_io(self):
# implemented in subclasses, TerminalInteractiveShell does call
# colorama.init().
pass
def init_prompts(self):
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self, shell=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/revmoes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
@staticmethod
def get_path_links(p: Path):
"""Gets path links including all symlinks
Examples
--------
In [1]: from IPython.core.interactiveshell import InteractiveShell
In [2]: import sys, pathlib
In [3]: paths = InteractiveShell.get_path_links(pathlib.Path(sys.executable))
In [4]: len(paths) == len(set(paths))
Out[4]: True
In [5]: bool(paths)
Out[5]: True
"""
paths = [p]
while p.is_symlink():
new_path = Path(os.readlink(p))
if not new_path.is_absolute():
new_path = p.parent / new_path
p = new_path
paths.append(p)
return paths
def init_virtualenv(self):
"""Add the current virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
elif os.environ["VIRTUAL_ENV"] == "":
warn("Virtual env path set to '', please check if this is intended.")
return
p = Path(sys.executable)
p_venv = Path(os.environ["VIRTUAL_ENV"])
# fallback venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
paths = self.get_path_links(p)
# In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
if p_venv.parts[1] == "cygdrive":
drive_name = p_venv.parts[2]
p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:])
if any(p_venv == p.parents[1] for p in paths):
# Our exe is inside or has access to the virtualenv, don't need to do anything.
return
if sys.platform == "win32":
virtual_env = str(Path(os.environ["VIRTUAL_ENV"], "Lib", "site-packages"))
else:
virtual_env_path = Path(
os.environ["VIRTUAL_ENV"], "lib", "python{}.{}", "site-packages"
)
p_ver = sys.version_info[:2]
# Predict version from py[thon]-x.x in the $VIRTUAL_ENV
re_m = re.search(r"\bpy(?:thon)?([23])\.(\d+)\b", os.environ["VIRTUAL_ENV"])
if re_m:
predicted_path = Path(str(virtual_env_path).format(*re_m.groups()))
if predicted_path.exists():
p_ver = re_m.groups()
virtual_env = str(virtual_env_path).format(*p_ver)
warn(
"Attempting to work in a virtualenv. If you encounter problems, "
"please install IPython inside the virtualenv."
)
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
sys.stdout.write(banner)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name, getattr(hooks, hook_name), 100)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self, name, hook, priority=50, str_key=None, re_key=None):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if name in IPython.core.hooks.deprecated:
alternative = IPython.core.hooks.deprecated[name]
raise ValueError(
"Hook {} has been deprecated since IPython 5.0. Use {} instead.".format(
name, alternative
)
)
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def register_post_execute(self, func):
"""DEPRECATED: Use ip.events.register('post_run_cell', func)
Register a function for calling after code execution.
"""
raise ValueError(
"ip.register_post_execute is deprecated since IPython 1.0, use "
"ip.events.register('post_run_cell', func) instead."
)
def _clear_warning_registry(self):
# clear the warning registry, so that different code blocks with
# overlapping line number ranges don't cause spurious suppression of
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <[email protected]>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <[email protected]> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so doctest and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
them.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True, aggressive=False):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
if aggressive and not hasattr(self, "_sys_modules_keys"):
print("Cannot restore sys.module, no snapshot")
elif aggressive:
print("culling sys module...")
current_keys = set(sys.modules.keys())
for k in current_keys - self._sys_modules_keys:
if k.startswith("multiprocessing"):
continue
del sys.modules[k]
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
if cmd not in self.magics_manager.magics['line']:
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError as e:
raise NameError("name '%s' is not defined" % varname) from e
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError as e:
raise TypeError('regex must be a string or compiled pattern') from e
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not all(a.isidentifier() for a in oname.split(".")):
return {'found': False}
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {
'obj':obj,
'found':found,
'parent':parent,
'ismagic':ismagic,
'isalias':isalias,
'namespace':ospace
}
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends.
"""
info = self._object_find(oname, namespaces)
docformat = (
sphinxify(self.object_inspect(oname)) if self.sphinxify_docstring else None
)
if info.found:
pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(
info.obj,
oname,
formatter,
info,
enable_html_pager=self.enable_html_pager,
**kw,
)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0, omit_sections=()):
"""Get object info as a mimebundle of formatted representations.
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
docformat = (
sphinxify(self.object_inspect(oname))
if self.sphinxify_docstring
else None
)
return self.inspector._get_info(
info.obj,
oname,
info=info,
detail_level=detail_level,
formatter=docformat,
omit_sections=omit_sections,
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
debugger_cls = InterruptiblePdb
def init_traceback_handlers(self, custom_exceptions):
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=check_linecache_ipython,
debugger_cls=self.debugger_cls, parent=self)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
Notes
-----
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing.
"""
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def dummy_handler(self, etype, value, tb, tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :', etype)
print('Exception value:', value)
print('Traceback :', tb)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which expects to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
if hasattr(value, "_render_traceback_"):
stb = value._render_traceback_()
else:
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset
)
except Exception:
print(
"Unexpected exception formatting exception. Falling back to standard exception"
)
traceback.print_exc()
return None
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb: str):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
val = self.InteractiveTB.stb2text(stb)
try:
print(val)
except UnicodeEncodeError:
print(val.encode("utf-8", "backslashreplace").decode())
def showsyntaxerror(self, filename=None, running_compiled_code=False):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
longer stack trace will be displayed.
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
# If the error occurred when executing compiled code, we should provide full stacktrace.
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by _run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = s
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.get_indent_spaces() * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (
cd_completer,
magic_run_completer,
module_completer,
reset_completer,
)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
Notes
-----
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Examples
--------
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0) -> None:
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted.
`completer` should have the following signature::
def completion(self: Completer, text: string) -> List[str]:
raise NotImplementedError
It will be bound to the current Completer instance and pass some text
and return a list with current completions to suggest to the user.
"""
newcomp = types.MethodType(completer, self.Completer)
self.Completer.custom_matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
m.PylabMagics, m.ScriptMagics,
)
self.register_magics(m.AsyncMagics)
# Register Magic Aliases
mman = self.magics_manager
# FIXME: magic aliases should be defined by the Magics classes
# or in MagicsManager, not here
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.run_line_magic('colors', self.colors)
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(
func, magic_kind=magic_kind, magic_name=magic_name
)
def _find_with_lazy_load(self, /, type_, magic_name: str):
"""
Try to find a magic potentially lazy-loading it.
Parameters
----------
type_: "line"|"cell"
the type of magics we are trying to find/lazy load.
magic_name: str
The name of the magic we are trying to find/lazy load
Note that this may have any side effects
"""
finder = {"line": self.find_line_magic, "cell": self.find_cell_magic}[type_]
fn = finder(magic_name)
if fn is not None:
return fn
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy is None:
return None
self.run_line_magic("load_ext", lazy)
res = finder(magic_name)
return res
def run_line_magic(self, magic_name: str, line, _stack_depth=1):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
_stack_depth : int
If run_line_magic() is called from magic() then _stack_depth=2.
This is added to ensure backward compatibility for use of 'get_ipython().magic()'
"""
fn = self._find_with_lazy_load("line", magic_name)
if fn is None:
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy:
self.run_line_magic("load_ext", lazy)
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.get_local_scope(stack_depth)
with self.builtin_trap:
result = fn(*args, **kwargs)
return result
def get_local_scope(self, stack_depth):
"""Get local scope at given stack depth.
Parameters
----------
stack_depth : int
Depth relative to calling frame
"""
return sys._getframe(stack_depth + 1).f_locals
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self._find_with_lazy_load("cell", magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.user_ns
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""
DEPRECATED
Deprecated since IPython 0.13 (warning added in
8.1), use run_line_magic(magic_name, parameter_s).
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
warnings.warn(
"`magic(...)` is deprecated since IPython 0.13 (warning added in "
"8.1), use run_line_magic(magic_name, parameter_s).",
DeprecationWarning,
stacklevel=2,
)
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# warn if there is an IPython magic alternative.
main_cmd = cmd.split()[0]
has_magic_alternatives = ("pip", "conda", "cd")
if main_cmd in has_magic_alternatives:
warnings.warn(
(
"You executed the system command !{0} which may not work "
"as expected. Try the IPython magic %{0} instead."
).format(main_cmd)
)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
"status": "error",
"traceback": stb,
"ename": etype.__name__,
"evalue": py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
*where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.suffix == ".ipynb":
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
yield fname.read_text(encoding="utf-8")
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = None
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures)
finally:
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell', result)
return result
def _run_cell(self, raw_cell:str, store_history:bool, silent:bool, shell_futures:bool) -> ExecutionResult:
"""Internal method to run a complete IPython cell."""
# we need to avoid calling self.transform_cell multiple time on the same thing
# so we need to store some results:
preprocessing_exc_tuple = None
try:
transformed_cell = self.transform_cell(raw_cell)
except Exception:
transformed_cell = raw_cell
preprocessing_exc_tuple = sys.exc_info()
assert transformed_cell is not None
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
)
# run_cell_async is async, but may not actually need an eventloop.
# when this is the case, we want to run it using the pseudo_sync_runner
# so that code can invoke eventloops (for example via the %run , and
# `%paste` magic.
if self.trio_runner:
runner = self.trio_runner
elif self.should_run_async(
raw_cell,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
return runner(coro)
except BaseException as e:
info = ExecutionInfo(raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
return result
def should_run_async(
self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
) -> bool:
"""Return whether a cell should be run asynchronously via a coroutine runner
Parameters
----------
raw_cell : str
The code to be executed
Returns
-------
result: bool
Whether the code needs to be run with a coroutine runner or not
.. versionadded:: 7.0
"""
if not self.autoawait:
return False
if preprocessing_exc_tuple is not None:
return False
assert preprocessing_exc_tuple is None
if transformed_cell is None:
warnings.warn(
"`should_run_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
try:
cell = self.transform_cell(raw_cell)
except Exception:
# any exception during transform will be raised
# prior to execution
return False
else:
cell = transformed_cell
return _should_be_async(cell)
async def run_cell_async(
self,
raw_cell: str,
store_history=False,
silent=False,
shell_futures=True,
*,
transformed_cell: Optional[str] = None,
preprocessing_exc_tuple: Optional[Any] = None
) -> ExecutionResult:
"""Run a complete IPython cell asynchronously.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
transformed_cell: str
cell that was passed through transformers
preprocessing_exc_tuple:
trace if the transformation failed.
Returns
-------
result : :class:`ExecutionResult`
.. versionadded:: 7.0
"""
info = ExecutionInfo(
raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
if store_history:
self.execution_count += 1
result.error_before_exec = value
self.last_execution_succeeded = False
self.last_execution_result = result
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
if transformed_cell is None:
warnings.warn(
"`run_cell_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
preprocessing_exc_tuple = None
else:
if preprocessing_exc_tuple is None:
cell = transformed_cell
else:
cell = raw_cell
# Store raw and processed history
if store_history and raw_cell.strip(" %") != "paste":
self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[1])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else self.compiler_class()
_run_async = False
with self.builtin_trap:
cell_name = compiler.cache(cell, self.execution_count, raw_code=raw_cell)
with self.display_trap:
# Compile to bytecode
try:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_cell(self, raw_cell):
"""Transform an input cell before parsing it.
Static transformations, implemented in IPython.core.inputtransformer2,
deal with things like ``%magic`` and ``!system`` commands.
These run on all input.
Dynamic transformations, for things like unescaped magics and the exit
autocall, depend on the state of the interpreter.
These only apply to single line inputs.
These string-based transformations are followed by AST transformations;
see :meth:`transform_ast`.
"""
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
def _update_code_co_name(self, code):
"""Python 3.10 changed the behaviour so that whenever a code object
is assembled in the compile(ast) the co_firstlineno would be == 1.
This makes pydevd/debugpy think that all cells invoked are the same
since it caches information based on (co_firstlineno, co_name, co_filename).
Given that, this function changes the code 'co_name' to be unique
based on the first real lineno of the code (which also has a nice
side effect of customizing the name so that it's not always <module>).
See: https://github.com/ipython/ipykernel/issues/841
"""
if not hasattr(code, "replace"):
# It may not be available on older versions of Python (only
# available for 3.8 onwards).
return code
try:
first_real_line = next(dis.findlinestarts(code))[1]
except StopIteration:
return code
return code.replace(co_name="<cell line: %s>" % (first_real_line,))
async def run_ast_nodes(
self,
nodelist: ListType[stmt],
cell_name: str,
interactivity="last_expr",
compiler=compile,
result=None,
):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
specifying which nodes should be run interactively (displaying output
from expressions). 'last_expr' will run the last node interactively
only if it is an expression (i.e. expressions in loops or other blocks
are not displayed) 'last_expr_or_assign' will run the last expression
or the last assignment. Other values for this parameter will raise a
ValueError.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
def compare(code):
is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
return is_async
# refactor that to just change the mod constructor.
to_run = []
for node in to_run_exec:
to_run.append((node, "exec"))
for node in to_run_interactive:
to_run.append((node, "single"))
for node, mode in to_run:
if mode == "exec":
mod = Module([node], [])
elif mode == "single":
mod = ast.Interactive([node])
with compiler.extra_flags(
getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
if self.autoawait
else 0x0
):
code = compiler(mod, cell_name, mode)
code = self._update_code_co_name(code)
asy = compare(code)
if await self.run_code(code, result, async_=asy):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
async def run_code(self, code_obj, result=None, *, async_=False):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
async_ : Bool (Experimental)
Attempt to run top-level asynchronous code in a default loop.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# special value to say that anything above is IPython and should be
# hidden.
__tracebackhide__ = "__ipython_bottom__"
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
if async_:
await eval(code_obj, self.user_global_ns, self.user_ns)
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
# For backwards compatibility
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
code : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent : str
When status is 'incomplete', this is some whitespace to insert on
the next line of the prompt.
"""
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
active_eventloop = None
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from matplotlib_inline.backend_inline import configure_inline_support
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
welcome_message : deprecated
This argument is ignored, no welcome message will be displayed.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dir_path = Path(tempfile.mkdtemp(prefix=prefix))
self.tempdirs.append(dir_path)
handle, filename = tempfile.mkstemp(".py", prefix, dir=str(dir_path))
os.close(handle) # On Windows, there can only be one open handle on a file
file_path = Path(filename)
self.tempfiles.append(file_path)
if data:
file_path.write_text(data, encoding="utf-8")
return filename
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : str
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
If empty string is given, returns history of current session
without the last input.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
text = "\n".join(x for _, _, x in lines)
# Skip the last line, as it's probably the magic that called this
if not range_str:
if "\n" not in text:
text = ""
else:
text = text[: text.rfind("\n")]
return text
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
If empty string is given, returns complete history of current
session, without the last line.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
# Deferred import
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target) from e
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target) from e
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception as e:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target) from e
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
def _atexit_once(self):
"""
At exist operation that need to be called at most once.
Second call to this function per instance will do nothing.
"""
if not getattr(self, "_atexit_once_called", False):
self._atexit_once_called = True
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
self.history_manager = None
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
self._atexit_once()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
tfile.unlink()
self.tempfiles.remove(tfile)
except FileNotFoundError:
pass
del self.tempfiles
for tdir in self.tempdirs:
try:
tdir.rmdir()
self.tempdirs.remove(tdir)
except FileNotFoundError:
pass
del self.tempdirs
# Restore user's cursor
if hasattr(self, "editing_mode") and self.editing_mode == "vi":
sys.stdout.write("\x1b[0 q")
sys.stdout.flush()
def cleanup(self):
self.restore_sys_module_state()
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
class InteractiveShellABC(metaclass=abc.ABCMeta):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
| 39.53305 | 147 | 0.587781 | [
"BSD-3-Clause"
] | CMU-IDS-2022/final-project-the-evaluators | venv/lib/python3.9/site-packages/IPython/core/interactiveshell.py | 148,921 | Python |
# Copyright 2022 StackHPC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_limit import exception as limit_exceptions
from oslo_limit import fixture as limit_fixture
from oslo_limit import limit
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova.limit import placement as placement_limits
from nova.limit import utils as limit_utils
from nova import objects
from nova import quota
from nova.scheduler.client import report
from nova import test
CONF = cfg.CONF
class TestGetUsage(test.NoDBTestCase):
def setUp(self):
super(TestGetUsage, self).setUp()
self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
self.context = context.RequestContext()
@mock.patch.object(quota, "is_qfd_populated")
@mock.patch.object(objects.InstanceMappingList, "get_counts")
@mock.patch.object(report.SchedulerReportClient,
"get_usages_counts_for_limits")
def test_get_usage(self, mock_placement, mock_inst, mock_qfd):
resources = ["servers", "class:VCPU", "class:MEMORY_MB",
"class:CUSTOM_BAREMETAL"]
mock_qfd.return_value = True
mock_placement.return_value = {"VCPU": 1, "CUSTOM_BAREMETAL": 2}
mock_inst.return_value = {"project": {"instances": 42}}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42,
'class:CUSTOM_BAREMETAL': 2}
self.assertDictEqual(expected, usage)
def test_get_usage_bad_resources(self):
bad_resource = ["unknown_resource"]
self.assertRaises(ValueError, placement_limits._get_usage,
self.context, uuids.project, bad_resource)
bad_class = ["class:UNKNOWN_CLASS"]
self.assertRaises(ValueError, placement_limits._get_usage,
self.context, uuids.project, bad_class)
no_resources = []
self.assertRaises(ValueError, placement_limits._get_usage,
self.context, uuids.project, no_resources)
@mock.patch.object(quota, "is_qfd_populated")
def test_get_usage_bad_qfd(self, mock_qfd):
mock_qfd.return_value = False
resources = ["servers"]
e = self.assertRaises(ValueError, placement_limits._get_usage,
self.context, uuids.project, resources)
self.assertEqual("must first migrate instance mappings", str(e))
def test_get_usage_unified_limits_disabled(self):
self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
e = self.assertRaises(NotImplementedError, placement_limits._get_usage,
self.context, uuids.project, [])
self.assertEqual("unified limits is disabled", str(e))
@mock.patch.object(quota, "is_qfd_populated")
@mock.patch.object(objects.InstanceMappingList, "get_counts")
@mock.patch.object(report.SchedulerReportClient,
'get_usages_counts_for_limits')
def test_get_usage_placement_fail(self, mock_placement, mock_inst,
mock_qfd):
resources = ["servers", "class:VCPU", "class:MEMORY_MB",
"class:CUSTOM_BAREMETAL"]
mock_qfd.return_value = True
mock_placement.side_effect = exception.UsagesRetrievalFailed(
project_id=uuids.project, user_id=uuids.user)
mock_inst.return_value = {"project": {"instances": 42}}
e = self.assertRaises(
exception.UsagesRetrievalFailed, placement_limits._get_usage,
self.context, uuids.project, resources)
expected = ("Failed to retrieve usages from placement while enforcing "
"%s quota limits." % ", ".join(resources))
self.assertEqual(expected, str(e))
@mock.patch.object(quota, "is_qfd_populated")
@mock.patch.object(objects.InstanceMappingList, "get_counts")
@mock.patch.object(report.SchedulerReportClient,
"get_usages_counts_for_limits")
def test_get_usage_pcpu_as_vcpu(self, mock_placement, mock_inst, mock_qfd):
# Test that when configured, PCPU count is merged into VCPU count when
# appropriate.
self.flags(unified_limits_count_pcpu_as_vcpu=True, group="workarounds")
mock_qfd.return_value = True
mock_inst.return_value = {"project": {"instances": 42}}
# PCPU was not specified in the flavor but usage was found in
# placement. PCPU count should be merged into VCPU count.
resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
mock_placement.return_value = {"VCPU": 1, "PCPU": 2}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 3, 'servers': 42}
self.assertDictEqual(expected, usage)
# PCPU was not specified in the flavor and usage was found in placement
# and there was no VCPU usage in placement. The PCPU count should be
# returned as VCPU count.
resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
mock_placement.return_value = {"PCPU": 1}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42}
self.assertDictEqual(expected, usage)
# PCPU was not specified in the flavor but only VCPU usage was found in
# placement.
resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
mock_placement.return_value = {"VCPU": 1}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42}
self.assertDictEqual(expected, usage)
# PCPU was specified in the flavor, so the counts should be separate.
resources = ["servers", "class:VCPU", "class:MEMORY_MB", "class:PCPU"]
mock_placement.return_value = {"VCPU": 1, "PCPU": 2}
usage = placement_limits._get_usage(self.context, uuids.project,
resources)
expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42,
'class:PCPU': 2}
self.assertDictEqual(expected, usage)
class TestGetDeltas(test.NoDBTestCase):
def test_get_deltas(self):
flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5)
deltas = placement_limits._get_deltas_by_flavor(flavor, False, 2)
expected = {'servers': 2,
'class:VCPU': 20, 'class:MEMORY_MB': 200,
'class:DISK_GB': 14}
self.assertDictEqual(expected, deltas)
def test_get_deltas_recheck(self):
flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5)
deltas = placement_limits._get_deltas_by_flavor(flavor, False, 0)
expected = {'servers': 0,
'class:VCPU': 0, 'class:MEMORY_MB': 0,
'class:DISK_GB': 0}
self.assertDictEqual(expected, deltas)
def test_get_deltas_check_baremetal(self):
extra_specs = {"resources:VCPU": 0, "resources:MEMORY_MB": 0,
"resources:DISK_GB": 0, "resources:CUSTOM_BAREMETAL": 1}
flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5,
extra_specs=extra_specs)
deltas = placement_limits._get_deltas_by_flavor(flavor, True, 1)
expected = {'servers': 1, 'class:CUSTOM_BAREMETAL': 1}
self.assertDictEqual(expected, deltas)
def test_get_deltas_check_bfv(self):
flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5)
deltas = placement_limits._get_deltas_by_flavor(flavor, True, 2)
expected = {'servers': 2,
'class:VCPU': 20, 'class:MEMORY_MB': 200,
'class:DISK_GB': 4}
self.assertDictEqual(expected, deltas)
class TestEnforce(test.NoDBTestCase):
def setUp(self):
super(TestEnforce, self).setUp()
self.context = context.RequestContext()
self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
placement_limits._ENFORCER = mock.Mock(limit.Enforcer)
self.flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
ephemeral_gb=2, root_gb=5)
def test_enforce_num_instances_and_flavor_disabled(self):
self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
count = placement_limits.enforce_num_instances_and_flavor(
self.context, uuids.project_id, "flavor", False, 0, 42)
self.assertEqual(42, count)
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
count = placement_limits.enforce_num_instances_and_flavor(
self.context, uuids.project_id, self.flavor, False, 0, 2)
self.assertEqual(2, count)
mock_limit.assert_called_once_with(mock.ANY)
mock_enforcer.enforce.assert_called_once_with(
uuids.project_id,
{'servers': 2, 'class:VCPU': 20, 'class:MEMORY_MB': 200,
'class:DISK_GB': 14})
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor_recheck(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
count = placement_limits.enforce_num_instances_and_flavor(
self.context, uuids.project_id, self.flavor, False, 0, 0)
self.assertEqual(0, count)
mock_limit.assert_called_once_with(mock.ANY)
mock_enforcer.enforce.assert_called_once_with(
uuids.project_id,
{'servers': 0, 'class:VCPU': 0, 'class:MEMORY_MB': 0,
'class:DISK_GB': 0})
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor_retry(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
over_limit_info_list = [
limit_exceptions.OverLimitInfo("class:VCPU", 12, 0, 30)
]
mock_enforcer.enforce.side_effect = [
limit_exceptions.ProjectOverLimit(
uuids.project_id, over_limit_info_list),
None]
count = placement_limits.enforce_num_instances_and_flavor(
self.context, uuids.project_id, self.flavor, True, 0, 3)
self.assertEqual(2, count)
self.assertEqual(2, mock_enforcer.enforce.call_count)
mock_enforcer.enforce.assert_called_with(
uuids.project_id,
{'servers': 2, 'class:VCPU': 20, 'class:MEMORY_MB': 200,
'class:DISK_GB': 4})
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor_fails(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
over_limit_info_list = [
limit_exceptions.OverLimitInfo("class:VCPU", 12, 0, 20),
limit_exceptions.OverLimitInfo("servers", 2, 1, 2)
]
expected = limit_exceptions.ProjectOverLimit(uuids.project_id,
over_limit_info_list)
mock_enforcer.enforce.side_effect = expected
# Verify that the oslo.limit ProjectOverLimit gets translated to a
# TooManyInstances that the API knows how to handle
e = self.assertRaises(
exception.TooManyInstances,
placement_limits.enforce_num_instances_and_flavor, self.context,
uuids.project_id, self.flavor, True, 2, 4)
self.assertEqual(str(expected), str(e))
self.assertEqual(3, mock_enforcer.enforce.call_count)
@mock.patch('oslo_limit.limit.Enforcer')
def test_enforce_num_instances_and_flavor_placement_fail(self, mock_limit):
mock_enforcer = mock.MagicMock()
mock_limit.return_value = mock_enforcer
mock_enforcer.enforce.side_effect = exception.UsagesRetrievalFailed(
'Failed to retrieve usages')
e = self.assertRaises(
exception.UsagesRetrievalFailed,
placement_limits.enforce_num_instances_and_flavor, self.context,
uuids.project, self.flavor, True, 0, 5)
expected = str(mock_enforcer.enforce.side_effect)
self.assertEqual(expected, str(e))
class GetLegacyLimitsTest(test.NoDBTestCase):
def setUp(self):
super(GetLegacyLimitsTest, self).setUp()
self.new = {"servers": 1, "class:VCPU": 2, "class:MEMORY_MB": 3}
self.legacy = {"instances": 1, "cores": 2, "ram": 3}
self.resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
self.resources.sort()
self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
def test_convert_keys_to_legacy_name(self):
limits = placement_limits._convert_keys_to_legacy_name(self.new)
self.assertEqual(self.legacy, limits)
def test_get_legacy_default_limits(self):
reglimits = {'servers': 1, 'class:VCPU': 2}
self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
limits = placement_limits.get_legacy_default_limits()
self.assertEqual({'cores': 2, 'instances': 1, 'ram': 0}, limits)
def test_get_legacy_project_limits(self):
reglimits = {'servers': 5, 'class:MEMORY_MB': 7}
projlimits = {uuids.project_id: {'servers': 1}}
self.useFixture(limit_fixture.LimitFixture(reglimits, projlimits))
limits = placement_limits.get_legacy_project_limits(uuids.project_id)
self.assertEqual({'instances': 1, 'cores': 0, 'ram': 7}, limits)
@mock.patch.object(report.SchedulerReportClient,
"get_usages_counts_for_limits")
@mock.patch.object(objects.InstanceMappingList, "get_counts")
@mock.patch.object(quota, "is_qfd_populated")
def test_get_legacy_counts(self, mock_qfd, mock_counts, mock_placement):
mock_qfd.return_value = True
mock_counts.return_value = {"project": {"instances": 1}}
mock_placement.return_value = {
"VCPU": 2, "CUSTOM_BAREMETAL": 2, "MEMORY_MB": 3,
}
counts = placement_limits.get_legacy_counts(
"context", uuids.project_id)
self.assertEqual(self.legacy, counts)
| 43.771186 | 79 | 0.648725 | [
"Apache-2.0"
] | bahnwaerter/nova | nova/tests/unit/limit/test_placement.py | 15,495 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djorgification.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.125 | 78 | 0.690476 | [
"MIT"
] | sendsent/djorgification | manage.py | 546 | Python |
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as AddressProvider
class Provider(AddressProvider):
building_number_formats = ('###', '##', '#')
street_name_formats = ('{{street_prefix}}{{street_suffix}}', )
street_address_formats = ('{{street_name}} {{building_number}}',)
street_prefixes = (
'Björk', 'Järnvägs', 'Ring', 'Skol', 'Skogs', 'Ny', 'Gran', 'Idrotts',
'Stor', 'Kyrk', 'Industri', 'Park', 'Strand', 'Skol', 'Trädgårds',
'Industri', 'Ängs', 'Kyrko', 'Park', 'Villa', 'Ek', 'Kvarn', 'Stations',
'Back', 'Furu', 'Gen', 'Fabriks', 'Åker', 'Bäck', 'Asp'
)
street_suffixes = ('gatan', 'gatan', 'vägen', 'vägen',
'stigen', 'gränd', 'torget')
address_formats = ("{{street_address}}\n{{postcode}} {{city}}", )
postcode_formats = ('#####', )
city_formats = ('{{city_name}}', )
cities = (
'Stockholm', 'Göteborg', 'Malmö', 'Uppsala', 'Västerås', 'Örebro',
'Linköping', 'Helsingborg', 'Jönköping', 'Norrköping', 'Lund', 'Umeå',
'Gävle', 'Borås', 'Mölndal', 'Södertälje', 'Eskilstuna', 'Karlstad',
'Halmstad', 'Växjö', 'Sundsvall', 'Luleå', 'Trollhättan', 'Östersund',
'Borlänge', 'Falun', 'Kalmar', 'Skövde', 'Kristianstad', 'Karlskrona',
'Skellefteå', 'Uddevalla', 'Lidingö', 'Motala', 'Landskrona',
'Örnsköldsvik', 'Nyköping', 'Karlskoga', 'Varberg', 'Trelleborg',
'Lidköping', 'Alingsås', 'Piteå', 'Sandviken', 'Ängelholm'
)
countries = (
'Afghanistan', 'Albanien', 'Algeriet', 'Amerikanska Samoa', 'Andorra',
'Angola', 'Anguilla', 'Antarktis', 'Antigua och Barbuda', 'Argentina',
'Armenien', 'Aruba', 'Ascension', 'Australien', 'Azerbajdzjan',
'Bahamas', 'Bahrain', 'Bangladesh', 'Barbados', 'Belgien', 'Belize',
'Benin', 'Bermuda', 'Bhutan', 'Bolivia', 'Bosnien och Hercegovina',
'Botswana', 'Brasilien', 'Brittiska Jungfruöarna', 'Brunei',
'Bulgarien', 'Burkina Faso', 'Burma', 'Burundi', 'Caymanöarna',
'Centralafrikanska republiken', 'Chile', 'Colombia', 'Cooköarna',
'Costa Rica', 'Cypern', 'Danmark', 'Diego Garcia', 'Djibouti',
'Dominica', 'Dominikanska republiken', 'Ecuador', 'Egypten',
'Ekvatorialguinea', 'Elfenbenskusten', 'El Salvador', 'Eritrea',
'Estland', 'Etiopien', 'England', 'Falklandsöarna', 'Fiji',
'Filippinerna', 'Finland', 'Frankrike', 'Franska Guyana',
'Franska Polynesien', 'Färöarna', 'Förenade Arabemiraten', 'Gabon',
'Gambia', 'Georgien', 'Ghana', 'Gibraltar', 'Grekland', 'Grenada',
'Grönland', 'Guadeloupe', 'Guatemala', 'Guinea', 'Guinea-Bissau',
'Guyana', 'Haiti', 'Honduras', 'Hongkong', 'Indien', 'Indonesien',
'Irak', 'Iran', 'Irland', 'Island', 'Israel', 'Italien', 'Jamaica',
'Japan', 'Jemen', 'Jordanien', 'Kambodja', 'Kamerun', 'Kanada',
'Kap Verde', 'Kazakstan', 'Kenya', 'Kina', 'Kirgizistan', 'Kiribati',
'Komorerna', 'Kongo-Brazzaville', 'Kongo-Kinshasa', 'Kosovo',
'Kroatien', 'Kuba', 'Kuwait', 'Laos', 'Lesotho', 'Lettland', 'Libanon',
'Liberia', 'Libyen', 'Liechtenstein', 'Litauen', 'Luxemburg', 'Macao',
'Madagaskar', 'Makedonien', 'Malawi', 'Malaysia', 'Maldiverna', 'Mali',
'Malta', 'Marianerna', 'Marocko', 'Marshallöarna', 'Martinique',
'Mauretanien', 'Mauritius', 'Mayotte', 'Mexiko', 'Midwayöarna',
'Mikronesiens federerade stater', 'Moçambique', 'Moldavien', 'Monaco',
'Mongoliet', 'Montenegro', 'Montserrat', 'Namibia', 'Nauru',
'Nederländerna', 'Nederländska Antillerna', 'Nepal',
'Nicaragua', 'Niger', 'Nigeria', 'Niue', 'Nordkorea', 'Nordmarianerna',
'Norfolkön', 'Norge', 'Nya Kaledonien', 'Nya Zeeland', 'Oman',
'Pakistan', 'Palau', 'Palestina', 'Panama', 'Papua Nya Guinea',
'Paraguay', 'Peru', 'Pitcairnöarna', 'Polen', 'Portugal', 'Qatar',
'Réunion', 'Rumänien', 'Rwanda', 'Ryssland', 'Saint Kitts och Nevis',
'Saint Lucia', 'Saint-Pierre och Miquelon',
'Saint Vincent och Grenadinerna', 'Salomonöarna', 'Samoa',
'Sankta Helena', 'San Marino', 'São Tomé och Príncipe',
'Saudiarabien', 'Schweiz', 'Senegal', 'Serbien', 'Seychellerna',
'SierraLeone', 'Singapore', 'Sint Maarten', 'Slovakien', 'Slovenien',
'Somalia', 'Spanien', 'Sri Lanka', 'Storbritannien', 'Sudan',
'Surinam', 'Sverige', 'Swaziland', 'Sydafrika', 'Sydkorea', 'Sydsudan',
'Syrien', 'Tadzjikistan', 'Taiwan', 'Tanzania', 'Tchad', 'Thailand',
'Tjeckien', 'Togo', 'Tokelauöarna', 'Tonga', 'Trinidad och Tobago',
'Tunisien', 'Turkiet', 'Turkmenistan', 'Turks-och Caicosöarna',
'Tuvalu', 'Tyskland', 'Uganda', 'Ukraina', 'Ungern', 'Uruguay', 'USA',
'Uzbekistan', 'Vanuatu', 'Vatikanstaten', 'Venezuela', 'Vietnam',
'Vitryssland', 'Wake', 'Wallis-och Futunaöarna', 'Zambia', 'Zimbabwe',
'Österrike', 'Östtimor'
)
states = (
'Stockholms län', 'Uppsala län', 'Södermanlands län'
'Östergötlands län', 'Jönköpings län', 'Kronobergs län', 'Kalmar län',
'Gotlands län', 'Blekinge län', 'Skåne län', 'Hallands län',
'Västra Götalands län', 'Värmlands län', 'Örebro län',
'Västmanlands län', 'Dalarnas län', 'Gävleborgs län',
'Västernorrlands län', 'Jämtlands län', 'Västerbottens län',
'Norrbottens län'
)
def street_prefix(self):
return self.random_element(self.street_prefixes)
def city_name(self):
return self.random_element(self.cities)
def state(self):
return self.random_element(self.states)
| 51.567568 | 80 | 0.599581 | [
"BSD-3-Clause"
] | AMuratTuran/mkn | oscar/lib/python2.7/site-packages/faker/providers/address/sv_SE/__init__.py | 5,828 | Python |
from aioanticaptcha.antinetworking import *
import asyncio
class geetestProxyon(antiNetworking):
js_api_domain = ""
gt = ""
challenge = ""
geetest_lib = ""
async def solve_and_return_solution(self):
if (
await self.create_task(
{
"clientKey": self.client_key,
"task": {
"type": "GeeTestTask",
"websiteURL": self.website_url,
"gt": self.gt,
"challenge": self.challenge,
"geetestApiServerSubdomain": self.js_api_domain,
"geetestGetLib": self.geetest_lib,
"proxyType": self.proxy_type,
"proxyAddress": self.proxy_address,
"proxyPort": self.proxy_port,
"proxyLogin": self.proxy_login,
"proxyPassword": self.proxy_password,
"userAgent": self.user_agent,
},
}
)
== 1
):
self.log("created task with id " + str(self.task_id))
else:
self.log("could not create task")
self.log(self.err_string)
return 0
# checking result
await asyncio.sleep(3)
task_result = self.wait_for_result(600)
if task_result == 0:
return 0
else:
return task_result["solution"]
def set_gt_key(self, value):
self.gt = value
def set_challenge_key(self, value):
self.challenge = value
def set_js_api_domain(self, value):
self.js_api_domain = value
def set_geetest_lib(self, value):
self.geetest_lib = value
| 30.423729 | 72 | 0.494708 | [
"MIT"
] | andrersp/aioanticaptcha | aioanticaptcha/geetestproxyon.py | 1,795 | Python |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateDeploymentDetails(object):
"""
The information about new deployment.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateDeploymentDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.devops.models.CreateDeployPipelineRedeploymentDetails`
* :class:`~oci.devops.models.CreateDeployPipelineDeploymentDetails`
* :class:`~oci.devops.models.CreateSingleDeployStageDeploymentDetails`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param deploy_pipeline_id:
The value to assign to the deploy_pipeline_id property of this CreateDeploymentDetails.
:type deploy_pipeline_id: str
:param deployment_type:
The value to assign to the deployment_type property of this CreateDeploymentDetails.
:type deployment_type: str
:param display_name:
The value to assign to the display_name property of this CreateDeploymentDetails.
:type display_name: str
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateDeploymentDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateDeploymentDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'deploy_pipeline_id': 'str',
'deployment_type': 'str',
'display_name': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'deploy_pipeline_id': 'deployPipelineId',
'deployment_type': 'deploymentType',
'display_name': 'displayName',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._deploy_pipeline_id = None
self._deployment_type = None
self._display_name = None
self._freeform_tags = None
self._defined_tags = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['deploymentType']
if type == 'PIPELINE_REDEPLOYMENT':
return 'CreateDeployPipelineRedeploymentDetails'
if type == 'PIPELINE_DEPLOYMENT':
return 'CreateDeployPipelineDeploymentDetails'
if type == 'SINGLE_STAGE_DEPLOYMENT':
return 'CreateSingleDeployStageDeploymentDetails'
else:
return 'CreateDeploymentDetails'
@property
def deploy_pipeline_id(self):
"""
**[Required]** Gets the deploy_pipeline_id of this CreateDeploymentDetails.
The OCID of a pipeline.
:return: The deploy_pipeline_id of this CreateDeploymentDetails.
:rtype: str
"""
return self._deploy_pipeline_id
@deploy_pipeline_id.setter
def deploy_pipeline_id(self, deploy_pipeline_id):
"""
Sets the deploy_pipeline_id of this CreateDeploymentDetails.
The OCID of a pipeline.
:param deploy_pipeline_id: The deploy_pipeline_id of this CreateDeploymentDetails.
:type: str
"""
self._deploy_pipeline_id = deploy_pipeline_id
@property
def deployment_type(self):
"""
**[Required]** Gets the deployment_type of this CreateDeploymentDetails.
Specifies type for this deployment.
:return: The deployment_type of this CreateDeploymentDetails.
:rtype: str
"""
return self._deployment_type
@deployment_type.setter
def deployment_type(self, deployment_type):
"""
Sets the deployment_type of this CreateDeploymentDetails.
Specifies type for this deployment.
:param deployment_type: The deployment_type of this CreateDeploymentDetails.
:type: str
"""
self._deployment_type = deployment_type
@property
def display_name(self):
"""
Gets the display_name of this CreateDeploymentDetails.
Deployment display name. Avoid entering confidential information.
:return: The display_name of this CreateDeploymentDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateDeploymentDetails.
Deployment display name. Avoid entering confidential information.
:param display_name: The display_name of this CreateDeploymentDetails.
:type: str
"""
self._display_name = display_name
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this CreateDeploymentDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See `Resource Tags`__. Example: `{\"bar-key\": \"value\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this CreateDeploymentDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this CreateDeploymentDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See `Resource Tags`__. Example: `{\"bar-key\": \"value\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this CreateDeploymentDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this CreateDeploymentDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace. See `Resource Tags`__. Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this CreateDeploymentDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this CreateDeploymentDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace. See `Resource Tags`__. Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this CreateDeploymentDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 35.495614 | 245 | 0.66922 | [
"Apache-2.0",
"BSD-3-Clause"
] | LaudateCorpus1/oci-python-sdk | src/oci/devops/models/create_deployment_details.py | 8,093 | Python |
import song_generator
from markov_gen import markov_generator
import os, json
nino_dir = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])
import generator
generator.set_dir_write_note(nino_dir + '/trainer/generated_notes')
def gen_kwargs():
kwargs = {
#What the general scale for the shond should be - chosen randomly from this list.
'song_scale' : ['major', 'minor'],
'block_same_note_range' : [0],
#How many segments the song has.
'number_of_segments_range' : [1],
'chord_exp_var': 2,
#The range of BPMs for each segment. Chooses randomly for each segment.
'bpm_range': [400],
#A range for beats per bar for each segment. Will choose randomly.
'beats_per_bar_range' : [4],
#A range for how many chords each segment should have. Chooses randomly from this list.
'chords_per_segment_range': [1],
#A list containing the program numbers of instruments that should be used. The program will choose randomly from these.
'instruments_range' : [1, 2],
#The number of instruments that will be active throughout the song.
'number_of_song_instruments_range' : [1],
#The bias_same_note chances for the main instruments. It's a list that is randomly chosen from.
'main_instrument_bias_same_note' : [0],
#The maximum pattern note length for the main instrument
'pattern_note_max_len_range' : 1,
#Each segment will have a different accent from the previous, determined by a random value from this list.
'accent_offset' : range(-5, 5),
#A list from accents from which segments will receive their base accent.
'default_accent_range' : range(70, 85),
#Volume to be added to percussions.
'percussion_accent_offset' : 10,
#Number of extra instruments per segment.
'no_segment_instruments_range' : [0],
#Range for the number of bars per segment. Will choose randomly from this list.
'number_segment_bars_range' : [32],
#Accent range offset for instrument specific blocks.
'block_default_accent_range' : range(-5, 5),
#Chance for each instrument to follow a pattern for the duration of the segment.
'segment_instrument_pattern_chance' : 1.0,
#Upper range for how long a pattern note can last. Should not be longer than the maximum amount of beats per bar for the instrument.
'pattern_note_len_range' : 1,
#And the lower range for how long the note can last. Should not be less than 1.
'pattern_note_min_len_range' : 1,
#The dir where the songs are saved.
'generate_dir' : nino_dir + '/trainer/',
#The directory for the soundfont. This is an example, and should be supplied for specific use cases.
'soundfont' : nino_dir + '/soundfonts/FluidR3_GM.sf2',
#The song generator will randomly repeat segments and then shuffle them. This is a range of the numbers of repeats for each segment.
'segment_shuffle_range' : [1],
#We may want to have segments with few instruments and no drums. This is the percentage that there are drums if the number of instruments is below the defined treshold.
'segment_percussion_chance': 0.0,
#If there's less than this many instruments in a segment, there's a chance (defined above) that there will be no percussion for that segment.
'skip_percussion_treshold' : 3,
'get_mp3' : True,
'dir_write_note' : nino_dir + '/trainer/generated_notes',
'markov_values' : nino_dir + '/trainer/results.json',
}
return kwargs
def main():
kwargs = gen_kwargs()
song_generator.generate_song(**kwargs)
markov_weight = float(raw_input('Enter a weight for the markov values. '))
generated_notes = ''
with open(nino_dir + '/trainer/generated_notes') as f:
generated_notes = '[' + f.read()[2:] + ']'
generated_notes = json.loads(generated_notes)
old_results = ''
with open(nino_dir + '/trainer/results.json') as f:
old_results = json.loads(f.read())
new_results = markov_generator.markov_from_values(old_results, generated_notes, 4, weight = markov_weight)
with open(nino_dir + '/trainer/results.json', 'w') as f:
f.write(json.dumps(new_results))
if __name__ == '__main__' :
main()
| 37.588235 | 177 | 0.667337 | [
"MIT"
] | NinoDoko/nino_pianino | ninopianino/markov_trainer.py | 4,473 | Python |
"""
Main agent for DQN
"""
import math
import random
import shutil
import gym
import torch
from tensorboardX import SummaryWriter
from torch.backends import cudnn
from tqdm import tqdm
from agents.base import BaseAgent
from graphs.losses.huber_loss import HuberLoss
from graphs.models.dqn import DQN
from utils.env_utils import CartPoleEnv
from utils.misc import print_cuda_statistics
from utils.replay_memory import ReplayMemory, Transition
cudnn.benchmark = True
class DQNAgent(BaseAgent):
def __init__(self, config):
super().__init__(config)
# define models (policy and target)
self.policy_model = DQN(self.config)
self.target_model = DQN(self.config)
# define memory
self.memory = ReplayMemory(self.config)
# define loss
self.loss = HuberLoss()
# define optimizer
self.optim = torch.optim.RMSprop(self.policy_model.parameters())
# define environment
self.env = gym.make('CartPole-v0').unwrapped
self.cartpole = CartPoleEnv(self.config.screen_width)
# initialize counter
self.current_episode = 0
self.current_iteration = 0
self.episode_durations = []
# set cuda flag
self.is_cuda = torch.cuda.is_available()
if self.is_cuda and not self.config.cuda:
self.logger.info("WARNING: You have a CUDA device, so you should probably enable CUDA")
self.cuda = self.is_cuda & self.config.cuda
if self.cuda:
self.device = torch.device("cuda")
torch.cuda.set_device(self.config.gpu_device)
self.logger.info("Program will run on *****GPU-CUDA***** ")
print_cuda_statistics()
else:
self.device = torch.device("cpu")
self.logger.info("Program will run on *****CPU***** ")
self.policy_model = self.policy_model.to(self.device)
self.target_model = self.target_model.to(self.device)
self.loss = self.loss.to(self.device)
# Initialize Target model with policy model state dict
self.target_model.load_state_dict(self.policy_model.state_dict())
self.target_model.eval()
# Summary Writer
self.summary_writer = SummaryWriter(log_dir=self.config.summary_dir, comment='DQN')
def load_checkpoint(self, file_name):
filename = self.config.checkpoint_dir + file_name
try:
self.logger.info("Loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename)
self.current_episode = checkpoint['episode']
self.current_iteration = checkpoint['iteration']
self.policy_model.load_state_dict(checkpoint['state_dict'])
self.optim.load_state_dict(checkpoint['optimizer'])
self.logger.info("Checkpoint loaded successfully from '{}' at (epoch {}) at (iteration {})\n"
.format(self.config.checkpoint_dir, checkpoint['episode'], checkpoint['iteration']))
except OSError as e:
self.logger.info("No checkpoint exists from '{}'. Skipping...".format(self.config.checkpoint_dir))
self.logger.info("**First time to train**")
def save_checkpoint(self, file_name="checkpoint.pth.tar", is_best=0):
state = {
'episode': self.current_episode,
'iteration': self.current_iteration,
'state_dict': self.policy_model.state_dict(),
'optimizer': self.optim.state_dict(),
}
# Save the state
torch.save(state, self.config.checkpoint_dir + file_name)
# If it is the best copy it to another file 'model_best.pth.tar'
if is_best:
shutil.copyfile(self.config.checkpoint_dir + file_name,
self.config.checkpoint_dir + 'model_best.pth.tar')
def run(self):
"""
This function will the operator
:return:
"""
try:
self.train()
except KeyboardInterrupt:
self.logger.info("You have entered CTRL+C.. Wait to finalize")
def select_action(self, state):
"""
The action selection function, it either uses the model to choose an action or samples one uniformly.
:param state: current state of the model
:return:
"""
if self.cuda:
state = state.cuda()
sample = random.random()
eps_threshold = self.config.eps_start + (self.config.eps_start - self.config.eps_end) * math.exp(
-1. * self.current_iteration / self.config.eps_decay)
self.current_iteration += 1
if sample > eps_threshold:
with torch.no_grad():
return self.policy_model(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(2)]], device=self.device, dtype=torch.long)
def optimize_policy_model(self):
"""
performs a single step of optimization for the policy model
:return:
"""
if self.memory.length() < self.config.batch_size:
return
# sample a batch
transitions = self.memory.sample_batch(self.config.batch_size)
one_batch = Transition(*zip(*transitions))
# create a mask of non-final states
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, one_batch.next_state)), device=self.device,dtype=torch.uint8)
non_final_next_states = torch.cat([s for s in one_batch.next_state if s is not None])
# concatenate all batch elements into one
state_batch = torch.cat(one_batch.state)
action_batch = torch.cat(one_batch.action)
reward_batch = torch.cat(one_batch.reward)
state_batch = state_batch.to(self.device)
non_final_next_states = non_final_next_states.to(self.device)
curr_state_values = self.policy_model(state_batch)
curr_state_action_values = curr_state_values.gather(1, action_batch)
next_state_values = torch.zeros(self.config.batch_size, device=self.device)
next_state_values[non_final_mask] = self.target_model(non_final_next_states).max(1)[0].detach()
# Get the expected Q values
expected_state_action_values = (next_state_values * self.config.gamma) + reward_batch
# compute loss: temporal difference error
loss = self.loss(curr_state_action_values, expected_state_action_values.unsqueeze(1))
# optimizer step
self.optim.zero_grad()
loss.backward()
for param in self.policy_model.parameters():
param.grad.data.clamp_(-1, 1)
self.optim.step()
return loss
def train(self):
"""
Training loop based on the number of episodes
:return:
"""
for episode in tqdm(range(self.current_episode, self.config.num_episodes)):
self.current_episode = episode
# reset environment
self.env.reset()
self.train_one_epoch()
# The target network has its weights kept frozen most of the time
if self.current_episode % self.config.target_update == 0:
self.target_model.load_state_dict(self.policy_model.state_dict())
self.env.render()
self.env.close()
def train_one_epoch(self):
"""
One episode of training; it samples an action, observe next screen and optimize the model once
:return:
"""
episode_duration = 0
prev_frame = self.cartpole.get_screen(self.env)
curr_frame = self.cartpole.get_screen(self.env)
# get state
curr_state = curr_frame - prev_frame
while(1):
episode_duration += 1
# select action
action = self.select_action(curr_state)
# perform action and get reward
_, reward, done, _ = self.env.step(action.item())
if self.cuda:
reward = torch.Tensor([reward]).to(self.device)
else:
reward = torch.Tensor([reward]).to(self.device)
prev_frame = curr_frame
curr_frame = self.cartpole.get_screen(self.env)
# assign next state
if done:
next_state = None
else:
next_state = curr_frame - prev_frame
# add this transition into memory
self.memory.push_transition(curr_state, action, next_state, reward)
curr_state = next_state
# Policy model optimization step
curr_loss = self.optimize_policy_model()
if curr_loss is not None:
if self.cuda:
curr_loss = curr_loss.cpu()
self.summary_writer.add_scalar("Temporal_Difference_Loss", curr_loss.detach().numpy(), self.current_iteration)
# check if done
if done:
break
self.summary_writer.add_scalar("Training_Episode_Duration", episode_duration, self.current_episode)
def validate(self):
pass
def finalize(self):
"""
Finalize all the operations of the 2 Main classes of the process the operator and the data loader
:return:
"""
self.logger.info("Please wait while finalizing the operation.. Thank you")
self.save_checkpoint()
self.summary_writer.export_scalars_to_json("{}all_scalars.json".format(self.config.summary_dir))
self.summary_writer.close()
| 37.30315 | 134 | 0.629868 | [
"MIT"
] | Cheng-XJTU/Pytorch-Project-Template | agents/dqn.py | 9,475 | Python |
a = [1, 1]
for i in range(10):
a.append(a[-1] + a[-2])
print a
| 10.714286 | 28 | 0.44 | [
"MIT"
] | amiraliakbari/sharif-mabani-python | by-session/ta-921/j5/list7.py | 75 | Python |
# Copyright (c) 2013, RC and contributors
# For license information, please see license.txt
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
data = get_data(filters)
return columns, data
def get_columns():
return [
{
"fieldname": "po_number",
"fieldtype": "Data",
"label": "Po Number",
"width": 120
},
{
"fieldname": "ordered_qty",
"fieldtype": "Float",
"label": "Ordered Qty",
"width": 150
},
{
"fieldname": "received_qty",
"fieldtype": "Float",
"label": "Received Qty",
"width": 150
},
{
"fieldname": "pending_qty",
"fieldtype": "Float",
"label": "Pending Qty",
"width": 150
}
]
def get_data(filters):
if not filters.get('company'):
frappe.throw(_("Select Company!"))
if not filters.get('from_date'):
frappe.throw(_("Select From Date!"))
if not filters.get('to_date'):
frappe.throw(_("Select To Date!"))
query = """select po_number, sum(cust_total_box) as order_qty from `tabPurchase Order`
where company = '{0}' and transaction_date between '{1}' and '{2}'
and po_number is not null and po_number != 'PENDING'
and docstatus = 1""".format(filters.get('company'),filters.get('from_date'),filters.get('to_date'))
if filters.get('supplier'):
query += " and supplier = '{0}'".format(filters.get('supplier'))
query += " group by po_number"
po = frappe.db.sql(query, as_dict=True)
data = []
for res in po:
query1 = """select sum(boxes) from `tabPurchase Invoice` as pi
inner join `tabPurchase Invoice Item` as pii on pii.parent = pi.name
where company = '{0}' and pi.posting_date between '{1}' and '{2}'
and pi.po_number = '{3}'
and pi.docstatus = 1""".format(filters.get('company'), filters.get('from_date'),
filters.get('to_date'), res.po_number)
if filters.get('supplier'):
query1 += " and pi.supplier = '{0}'".format(filters.get('supplier'))
pi = float(frappe.db.sql(query1)[0][0] or 0)
data.append(frappe._dict({
"po_number": res.po_number,
"ordered_qty": res.order_qty,
"received_qty": pi,
"pending_qty": res.order_qty - pi
}))
return data
| 26.580247 | 103 | 0.640502 | [
"MIT"
] | Ehtasham-Muzaffar/turk | turk/turk/report/pending_order_detail/pending_order_detail.py | 2,153 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import string
from telethon import events
from telethon.utils import add_surrogate
from telethon.tl.types import MessageEntityPre
from telethon.tl.tlobject import TLObject
import datetime
PRINTABLE_SET = set(bytes(string.printable, 'ascii'))
STR_LEN_MAX = 256
BYTE_LEN_MAX = 64
def parse_pre(text):
text = text.strip()
return (
text,
[MessageEntityPre(offset=0, length=len(add_surrogate(text)), language='potato')]
)
def yaml_format(obj, indent=0):
"""
Pretty formats the given object as a YAML string which is returned.
(based on TLObject.pretty_format)
"""
result = []
if isinstance(obj, TLObject):
obj = obj.to_dict()
if isinstance(obj, dict):
result.append(obj.get('_', 'dict') + ':')
if obj:
items = obj.items()
has_multiple_items = len(items) > 2
if has_multiple_items:
result.append('\n')
indent += 2
for k, v in items:
if k == '_' or v is None:
continue
formatted = yaml_format(v, indent)
if not formatted.strip():
continue
result.append(' ' * (indent if has_multiple_items else 1))
result.append(f'{k}: {formatted}')
result.append('\n')
result.pop()
indent -= 2
result.append(' ' * indent)
elif isinstance(obj, str):
# truncate long strings and display elipsis
result.append(repr(obj[:STR_LEN_MAX]))
if len(obj) > STR_LEN_MAX:
result.append('…')
elif isinstance(obj, bytes):
# repr() bytes if it's printable, hex like "FF EE BB" otherwise
if all(c in PRINTABLE_SET for c in obj):
result.append(repr(obj))
else:
if len(obj) > BYTE_LEN_MAX:
result.append('<…>')
else:
result.append(' '.join(f'{b:02X}' for b in obj))
elif isinstance(obj, datetime.datetime):
# ISO-8601 without timezone offset (telethon dates are always UTC)
result.append(obj.strftime('%Y-%m-%d %H:%M:%S'))
elif hasattr(obj, '__iter__'):
# display iterables one after another at the base indentation level
result.append('\n')
indent += 2
for x in obj:
result.append(' ' * indent)
result.append(yaml_format(x, indent))
result.append('\n')
result.pop()
indent -= 2
result.append(' ' * indent)
else:
result.append(repr(obj))
return ''.join(result)
@borg.on(events.NewMessage(pattern=r"\.new", outgoing=True))
async def _(event):
if not event.message.is_reply:
return
msg = await event.message.get_reply_message()
yaml_text = yaml_format(msg)
await event.edit(
yaml_text,
parse_mode=parse_pre
)
| 31.464646 | 88 | 0.579775 | [
"MIT"
] | Abhiramabr/weaponx | userbot/plugins/new.py | 3,119 | Python |
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from accounts.models import BookingAgent
class Station(models.Model):
name=models.CharField(max_length=30)
def __str__(self):
return self.name
# Create your models here.
class Train(models.Model):
id = models.AutoField(primary_key=True)
train_name = models.CharField(max_length=30)
source_station = models.ForeignKey(Station,on_delete=models.CASCADE,related_name='source_station')
dest_station = models.ForeignKey(Station,on_delete=models.CASCADE,related_name='dest_station')
def __str__(self):
return (
str(self.id)
+ " "
+ self.train_name
+ " "
+ self.source_station.name
+ " "
+ self.dest_station.name
)
class TrainSchedule(models.Model):
journey_id = models.AutoField(primary_key=True)
train = models.ForeignKey(Train, on_delete=models.CASCADE, blank=False, null=False)
journey_date = models.DateField(blank=False, null=False)
num_ac_coaches = models.IntegerField(
validators=[MaxValueValidator(100), MinValueValidator(1)], default=10
)
num_sleeper_coaches = models.IntegerField(
validators=[MaxValueValidator(100), MinValueValidator(1)], default=10
)
def __str__(self):
return (
self.train.train_name
+ " "
+ str(self.journey_date.strftime("%d/%m/%Y, %H:%M:%S"))
)
class Passenger(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
age = models.IntegerField(validators=[MaxValueValidator(200), MinValueValidator(1)])
gender = models.CharField(
max_length=1, choices=[("M", "Male"), ("F", "Female"), ("O", "Other")]
)
def __str__(self):
return self.name
class Ticket(models.Model):
ticketId = models.AutoField(primary_key=True)
journey=models.ForeignKey(TrainSchedule,on_delete=models.CASCADE,blank=False,null=True,related_name='tickets')
seat_type=models.CharField(max_length=10,default="AC")
pnrNumber = models.CharField(max_length=12)
booking_agent = models.ForeignKey(
BookingAgent,
on_delete=models.CASCADE,
blank=False,
null=False,
related_name="tickets",
)
passenger1 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=False,
null=False,
related_name="ticket1",
)
passenger2 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket2",
)
passenger3 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket3",
)
passenger4 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket4",
)
passenger5 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket5",
)
passenger6 = models.OneToOneField(
Passenger,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="ticket6",
)
def __str__(self):
return str(self.ticketId) + " " + self.pnrNumber
class BookingStatus(models.Model):
journey = models.ForeignKey(
TrainSchedule, on_delete=models.CASCADE, blank=False, null=False
)
noOfACSeatsRemaining = models.IntegerField()
noOfSleeperSeatsRemaining = models.IntegerField()
def __str__(self):
return str(self.journey.journey_id)+" "+str(self.noOfACSeatsRemaining)+" "+str(self.noOfSleeperSeatsRemaining)
class ACBookingStatus(models.Model):
journey = models.ForeignKey(
TrainSchedule, on_delete=models.CASCADE, blank=False, null=False
)
coachNumber = models.IntegerField()
seatNumber = models.IntegerField()
ticket = models.ForeignKey(
Ticket, on_delete=models.CASCADE, blank=False, null=False
)
passenger = models.ForeignKey(
Passenger, on_delete=models.CASCADE, blank=False, null=False
)
def __str__(self):
return str(self.journey.journey_id) + " " + str(self.coachNumber)+" "+str(self.seatNumber)
class SleeperBookingStatus(models.Model):
journey = models.ForeignKey(
TrainSchedule, on_delete=models.CASCADE, blank=False, null=False
)
coachNumber = models.IntegerField()
seatNumber = models.IntegerField()
ticket = models.ForeignKey(
Ticket, on_delete=models.CASCADE, blank=False, null=False
)
passenger = models.ForeignKey(
Passenger, on_delete=models.CASCADE, blank=False, null=False
)
def __str__(self):
return str(self.journey.journey_id) + " " + self.coachNumber+" "+str(self.seatNumber)
class CoachStructureAC(models.Model):
seatNumber = models.IntegerField()
seatType = models.CharField(max_length=2)
def __str__(self):
return str(self.seatNumber) + " " + self.seatType
class CoachStructureSleeper(models.Model):
seatNumber = models.IntegerField()
seatType = models.CharField(max_length=2)
def __str__(self):
return str(self.seatNumber) + " " + self.seatType | 30.559322 | 118 | 0.659641 | [
"MIT"
] | shobhit907/reserway | reserway/bookings/models.py | 5,409 | Python |
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.preprocessing.label import LabelBinarizer as LB
from dask.distributed import default_client
from cuml.dask.common.input_utils import _extract_partitions
from cuml.common import rmm_cupy_ary
import dask
import cupy as cp
class LabelBinarizer(object):
"""
A distributed version of LabelBinarizer for one-hot encoding
a collection of labels.
Examples
--------
Examples
--------
Create an array with labels and dummy encode them
.. code-block:: python
import cupy as cp
from cuml.dask.preprocessing import LabelBinarizer
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
import dask
cluster = LocalCUDACluster()
client = Client(cluster)
labels = cp.asarray([0, 5, 10, 7, 2, 4, 1, 0, 0, 4, 3, 2, 1],
dtype=cp.int32)
labels = dask.array.from_array(labels)
lb = LabelBinarizer()
encoded = lb.fit_transform(labels)
print(str(encoded.compute())
decoded = lb.inverse_transform(encoded)
print(str(decoded.compute())
Output:
.. code-block::
[[1 0 0 0 0 0 0 0]
[0 0 0 0 0 1 0 0]
[0 0 0 0 0 0 0 1]
[0 0 0 0 0 0 1 0]
[0 0 1 0 0 0 0 0]
[0 0 0 0 1 0 0 0]
[0 1 0 0 0 0 0 0]
[1 0 0 0 0 0 0 0]
[1 0 0 0 0 0 0 0]
[0 0 0 0 1 0 0 0]
[0 0 0 1 0 0 0 0]
[0 0 1 0 0 0 0 0]
[0 1 0 0 0 0 0 0]]
[ 0 5 10 7 2 4 1 0 0 4 3 2 1]
"""
def __init__(self, client=None, **kwargs):
"""
Initialize new LabelBinarizer instance
Parameters
----------
client : dask.Client optional client to use
kwargs : dict of arguments to proxy to underlying single-process
LabelBinarizer
"""
self.client_ = client if client is not None else default_client()
self.kwargs = kwargs
# Sparse output will be added once sparse CuPy arrays are supported
# by Dask.Array: https://github.com/rapidsai/cuml/issues/1665
if "sparse_output" in self.kwargs and \
self.kwargs["sparse_output"] is True:
raise ValueError("Sparse output not yet "
"supported in distributed mode")
@staticmethod
def _func_create_model(**kwargs):
return LB(**kwargs)
@staticmethod
def _func_unique_classes(y):
return rmm_cupy_ary(cp.unique, y)
@staticmethod
def _func_xform(model, y):
xform_in = rmm_cupy_ary(cp.asarray, y, dtype=y.dtype)
return model.transform(xform_in)
@staticmethod
def _func_inv_xform(model, y, threshold):
y = rmm_cupy_ary(cp.asarray, y, dtype=y.dtype)
return model.inverse_transform(y, threshold)
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : Dask.Array of shape [n_samples,] or [n_samples, n_classes]
chunked by row.
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
# Take the unique classes and broadcast them all around the cluster.
futures = self.client_.sync(_extract_partitions, y)
unique = [self.client_.submit(LabelBinarizer._func_unique_classes, f)
for w, f in futures]
classes = self.client_.compute(unique, True)
self.classes_ = rmm_cupy_ary(cp.unique,
rmm_cupy_ary(cp.stack,
classes,
axis=0))
self.model = LB(**self.kwargs).fit(self.classes_)
return self
def fit_transform(self, y):
"""
Fit the label encoder and return transformed labels
Parameters
----------
y : Dask.Array of shape [n_samples,] or [n_samples, n_classes]
target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
arr : Dask.Array backed by CuPy arrays containing encoded labels
"""
return self.fit(y).transform(y)
def transform(self, y):
"""
Transform and return encoded labels
Parameters
----------
y : Dask.Array of shape [n_samples,] or [n_samples, n_classes]
Returns
-------
arr : Dask.Array backed by CuPy arrays containing encoded labels
"""
parts = self.client_.sync(_extract_partitions, y)
xform_func = dask.delayed(LabelBinarizer._func_xform)
meta = rmm_cupy_ary(cp.zeros, 1)
if self.model.sparse_output:
meta = cp.sparse.csr_matrix(meta)
f = [dask.array.from_delayed(xform_func(self.model, part),
meta=meta, dtype=cp.float32,
shape=(len(y), len(self.classes_))) for w, part in parts]
arr = dask.array.asarray(f)
return arr.reshape(arr.shape[1:])
def inverse_transform(self, y, threshold=None):
"""
Invert a set of encoded labels back to original labels
Parameters
----------
y : Dask.Array of shape [n_samples, n_classes] containing encoded
labels
threshold : float This value is currently ignored
Returns
-------
arr : Dask.Array backed by CuPy arrays containing original labels
"""
parts = self.client_.sync(_extract_partitions, y)
inv_func = dask.delayed(LabelBinarizer._func_inv_xform)
dtype = self.classes_.dtype
meta = rmm_cupy_ary(cp.zeros, 1, dtype=dtype)
f = [dask.array.from_delayed(
inv_func(self.model, part, threshold),
dtype=dtype, shape=(y.shape[0],), meta=meta)
for w, part in parts]
ret = dask.array.stack(f, axis=0)
return ret.reshape(ret.shape[1:])
| 28.696581 | 77 | 0.586746 | [
"Apache-2.0"
] | Chetank99/cuml | python/cuml/dask/preprocessing/label.py | 6,715 | Python |
#!/usr/bin/env python
#
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Check VINTF compatibility from a target files package.
Usage: check_target_files_vintf target_files
target_files can be a ZIP file or an extracted target files directory.
"""
import logging
import subprocess
import sys
import os
import zipfile
import common
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
# Keys are paths that VINTF searches. Must keep in sync with libvintf's search
# paths (VintfObject.cpp).
# These paths are stored in different directories in target files package, so
# we have to search for the correct path and tell checkvintf to remap them.
# Look for TARGET_COPY_OUT_* variables in board_config.mk for possible paths for
# each partition.
DIR_SEARCH_PATHS = {
'/system': ('SYSTEM',),
'/vendor': ('VENDOR', 'SYSTEM/vendor'),
'/product': ('PRODUCT', 'SYSTEM/product'),
'/odm': ('ODM', 'VENDOR/odm', 'SYSTEM/vendor/odm'),
'/system_ext': ('SYSTEM_EXT', 'SYSTEM/system_ext'),
# vendor_dlkm does not have VINTF files.
}
UNZIP_PATTERN = ['META/*', '*/build.prop']
def GetDirmap(input_tmp):
dirmap = {}
for device_path, target_files_rel_paths in DIR_SEARCH_PATHS.items():
for target_files_rel_path in target_files_rel_paths:
target_files_path = os.path.join(input_tmp, target_files_rel_path)
if os.path.isdir(target_files_path):
dirmap[device_path] = target_files_path
break
if device_path not in dirmap:
raise ValueError("Can't determine path for device path " + device_path +
". Searched the following:" +
("\n".join(target_files_rel_paths)))
return dirmap
def GetArgsForSkus(info_dict):
odm_skus = info_dict.get('vintf_odm_manifest_skus', '').strip().split()
if info_dict.get('vintf_include_empty_odm_sku', '') == "true" or not odm_skus:
odm_skus += ['']
vendor_skus = info_dict.get('vintf_vendor_manifest_skus', '').strip().split()
if info_dict.get('vintf_include_empty_vendor_sku', '') == "true" or \
not vendor_skus:
vendor_skus += ['']
return [['--property', 'ro.boot.product.hardware.sku=' + odm_sku,
'--property', 'ro.boot.product.vendor.sku=' + vendor_sku]
for odm_sku in odm_skus for vendor_sku in vendor_skus]
def GetArgsForShippingApiLevel(info_dict):
shipping_api_level = info_dict['vendor.build.prop'].GetProp(
'ro.product.first_api_level')
if not shipping_api_level:
logger.warning('Cannot determine ro.product.first_api_level')
return []
return ['--property', 'ro.product.first_api_level=' + shipping_api_level]
def GetArgsForKernel(input_tmp):
version_path = os.path.join(input_tmp, 'META/kernel_version.txt')
config_path = os.path.join(input_tmp, 'META/kernel_configs.txt')
if not os.path.isfile(version_path) or not os.path.isfile(config_path):
logger.info('Skipping kernel config checks because '
'PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS is not set')
return []
with open(version_path) as f:
version = f.read().strip()
return ['--kernel', '{}:{}'.format(version, config_path)]
def CheckVintfFromExtractedTargetFiles(input_tmp, info_dict=None):
"""
Checks VINTF metadata of an extracted target files directory.
Args:
inp: path to the directory that contains the extracted target files archive.
info_dict: The build-time info dict. If None, it will be loaded from inp.
Returns:
True if VINTF check is skipped or compatible, False if incompatible. Raise
a RuntimeError if any error occurs.
"""
if info_dict is None:
info_dict = common.LoadInfoDict(input_tmp)
if info_dict.get('vintf_enforce') != 'true':
logger.warning('PRODUCT_ENFORCE_VINTF_MANIFEST is not set, skipping checks')
return True
dirmap = GetDirmap(input_tmp)
args_for_skus = GetArgsForSkus(info_dict)
shipping_api_level_args = GetArgsForShippingApiLevel(info_dict)
kernel_args = GetArgsForKernel(input_tmp)
common_command = [
'checkvintf',
'--check-compat',
]
for device_path, real_path in dirmap.items():
common_command += ['--dirmap', '{}:{}'.format(device_path, real_path)]
common_command += kernel_args
common_command += shipping_api_level_args
success = True
for sku_args in args_for_skus:
command = common_command + sku_args
proc = common.Run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode == 0:
logger.info("Command `%s` returns 'compatible'", ' '.join(command))
elif out.strip() == "INCOMPATIBLE":
logger.info("Command `%s` returns 'incompatible'", ' '.join(command))
success = False
else:
raise common.ExternalError(
"Failed to run command '{}' (exit code {}):\nstdout:{}\nstderr:{}"
.format(' '.join(command), proc.returncode, out, err))
logger.info("stdout: %s", out)
logger.info("stderr: %s", err)
return success
def GetVintfFileList():
"""
Returns a list of VINTF metadata files that should be read from a target files
package before executing checkvintf.
"""
def PathToPatterns(path):
if path[-1] == '/':
path += '*'
for device_path, target_files_rel_paths in DIR_SEARCH_PATHS.items():
if path.startswith(device_path):
suffix = path[len(device_path):]
return [rel_path + suffix for rel_path in target_files_rel_paths]
raise RuntimeError('Unrecognized path from checkvintf --dump-file-list: ' +
path)
out = common.RunAndCheckOutput(['checkvintf', '--dump-file-list'])
paths = out.strip().split('\n')
paths = sum((PathToPatterns(path) for path in paths if path), [])
return paths
def CheckVintfFromTargetFiles(inp, info_dict=None):
"""
Checks VINTF metadata of a target files zip.
Args:
inp: path to the target files archive.
info_dict: The build-time info dict. If None, it will be loaded from inp.
Returns:
True if VINTF check is skipped or compatible, False if incompatible. Raise
a RuntimeError if any error occurs.
"""
input_tmp = common.UnzipTemp(inp, GetVintfFileList() + UNZIP_PATTERN)
return CheckVintfFromExtractedTargetFiles(input_tmp, info_dict)
def CheckVintf(inp, info_dict=None):
"""
Checks VINTF metadata of a target files zip or extracted target files
directory.
Args:
inp: path to the (possibly extracted) target files archive.
info_dict: The build-time info dict. If None, it will be loaded from inp.
Returns:
True if VINTF check is skipped or compatible, False if incompatible. Raise
a RuntimeError if any error occurs.
"""
if os.path.isdir(inp):
logger.info('Checking VINTF compatibility extracted target files...')
return CheckVintfFromExtractedTargetFiles(inp, info_dict)
if zipfile.is_zipfile(inp):
logger.info('Checking VINTF compatibility target files...')
return CheckVintfFromTargetFiles(inp, info_dict)
raise ValueError('{} is not a valid directory or zip file'.format(inp))
def main(argv):
args = common.ParseOptions(argv, __doc__)
if len(args) != 1:
common.Usage(__doc__)
sys.exit(1)
common.InitLogging()
if not CheckVintf(args[0]):
sys.exit(1)
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError:
logger.exception('\n ERROR:\n')
sys.exit(1)
finally:
common.Cleanup()
| 32.962963 | 80 | 0.705243 | [
"MIT"
] | FabriSC/Alioth-SC | tools/check_target_files_vintf.py | 8,010 | Python |
# coding: utf-8
import re
from sqlalchemy import BigInteger
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import DDL
from sqlalchemy import DefaultClause
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import LargeBinary
from sqlalchemy import MetaData
from sqlalchemy import NCHAR
from sqlalchemy import select
from sqlalchemy import SmallInteger
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy import TIMESTAMP
from sqlalchemy import Unicode
from sqlalchemy import UnicodeText
from sqlalchemy import UniqueConstraint
from sqlalchemy import util
from sqlalchemy.dialects.mysql import base as mysql
from sqlalchemy.dialects.mysql import reflection as _reflection
from sqlalchemy.schema import CreateIndex
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
class TypeReflectionTest(fixtures.TestBase):
__only_on__ = "mysql"
__backend__ = True
@testing.provide_metadata
def _run_test(self, specs, attributes):
columns = [Column("c%i" % (i + 1), t[0]) for i, t in enumerate(specs)]
# Early 5.0 releases seem to report more "general" for columns
# in a view, e.g. char -> varchar, tinyblob -> mediumblob
use_views = testing.db.dialect.server_version_info > (5, 0, 10)
m = self.metadata
Table("mysql_types", m, *columns)
if use_views:
event.listen(
m,
"after_create",
DDL(
"CREATE OR REPLACE VIEW mysql_types_v "
"AS SELECT * from mysql_types"
),
)
event.listen(
m, "before_drop", DDL("DROP VIEW IF EXISTS mysql_types_v")
)
m.create_all()
m2 = MetaData(testing.db)
tables = [Table("mysql_types", m2, autoload=True)]
if use_views:
tables.append(Table("mysql_types_v", m2, autoload=True))
for table in tables:
for i, (reflected_col, spec) in enumerate(zip(table.c, specs)):
expected_spec = spec[1]
reflected_type = reflected_col.type
is_(type(reflected_type), type(expected_spec))
for attr in attributes:
eq_(
getattr(reflected_type, attr),
getattr(expected_spec, attr),
"Column %s: Attribute %s value of %s does not "
"match %s for type %s"
% (
"c%i" % (i + 1),
attr,
getattr(reflected_type, attr),
getattr(expected_spec, attr),
spec[0],
),
)
def test_time_types(self):
specs = []
if testing.requires.mysql_fsp.enabled:
fsps = [None, 0, 5]
else:
fsps = [None]
for type_ in (mysql.TIMESTAMP, mysql.DATETIME, mysql.TIME):
# MySQL defaults fsp to 0, and if 0 does not report it.
# we don't actually render 0 right now in DDL but even if we do,
# it comes back blank
for fsp in fsps:
if fsp:
specs.append((type_(fsp=fsp), type_(fsp=fsp)))
else:
specs.append((type_(), type_()))
specs.extend(
[(TIMESTAMP(), mysql.TIMESTAMP()), (DateTime(), mysql.DATETIME())]
)
# note 'timezone' should always be None on both
self._run_test(specs, ["fsp", "timezone"])
def test_year_types(self):
specs = [
(mysql.YEAR(), mysql.YEAR(display_width=4)),
(mysql.YEAR(display_width=4), mysql.YEAR(display_width=4)),
]
self._run_test(specs, ["display_width"])
def test_string_types(self):
specs = [
(String(1), mysql.MSString(1)),
(String(3), mysql.MSString(3)),
(Text(), mysql.MSText()),
(Unicode(1), mysql.MSString(1)),
(Unicode(3), mysql.MSString(3)),
(UnicodeText(), mysql.MSText()),
(mysql.MSChar(1), mysql.MSChar(1)),
(mysql.MSChar(3), mysql.MSChar(3)),
(NCHAR(2), mysql.MSChar(2)),
(mysql.MSNChar(2), mysql.MSChar(2)),
(mysql.MSNVarChar(22), mysql.MSString(22)),
]
self._run_test(specs, ["length"])
def test_integer_types(self):
specs = []
for type_ in [
mysql.TINYINT,
mysql.SMALLINT,
mysql.MEDIUMINT,
mysql.INTEGER,
mysql.BIGINT,
]:
for display_width in [None, 4, 7]:
for unsigned in [False, True]:
for zerofill in [None, True]:
kw = {}
if display_width:
kw["display_width"] = display_width
if unsigned is not None:
kw["unsigned"] = unsigned
if zerofill is not None:
kw["zerofill"] = zerofill
zerofill = bool(zerofill)
source_type = type_(**kw)
if display_width is None:
display_width = {
mysql.MEDIUMINT: 9,
mysql.SMALLINT: 6,
mysql.TINYINT: 4,
mysql.INTEGER: 11,
mysql.BIGINT: 20,
}[type_]
if zerofill:
unsigned = True
expected_type = type_(
display_width=display_width,
unsigned=unsigned,
zerofill=zerofill,
)
specs.append((source_type, expected_type))
specs.extend(
[
(SmallInteger(), mysql.SMALLINT(display_width=6)),
(Integer(), mysql.INTEGER(display_width=11)),
(BigInteger, mysql.BIGINT(display_width=20)),
]
)
self._run_test(specs, ["display_width", "unsigned", "zerofill"])
def test_binary_types(self):
specs = [
(LargeBinary(3), mysql.TINYBLOB()),
(LargeBinary(), mysql.BLOB()),
(mysql.MSBinary(3), mysql.MSBinary(3)),
(mysql.MSVarBinary(3), mysql.MSVarBinary(3)),
(mysql.MSTinyBlob(), mysql.MSTinyBlob()),
(mysql.MSBlob(), mysql.MSBlob()),
(mysql.MSBlob(1234), mysql.MSBlob()),
(mysql.MSMediumBlob(), mysql.MSMediumBlob()),
(mysql.MSLongBlob(), mysql.MSLongBlob()),
]
self._run_test(specs, [])
@testing.uses_deprecated("Manually quoting ENUM value literals")
def test_legacy_enum_types(self):
specs = [(mysql.ENUM("''", "'fleem'"), mysql.ENUM("''", "'fleem'"))]
self._run_test(specs, ["enums"])
class ReflectionTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "mysql"
__backend__ = True
def test_default_reflection(self):
"""Test reflection of column defaults."""
from sqlalchemy.dialects.mysql import VARCHAR
def_table = Table(
"mysql_def",
MetaData(testing.db),
Column(
"c1",
VARCHAR(10, collation="utf8_unicode_ci"),
DefaultClause(""),
nullable=False,
),
Column("c2", String(10), DefaultClause("0")),
Column("c3", String(10), DefaultClause("abc")),
Column("c4", TIMESTAMP, DefaultClause("2009-04-05 12:00:00")),
Column("c5", TIMESTAMP),
Column(
"c6",
TIMESTAMP,
DefaultClause(
sql.text(
"CURRENT_TIMESTAMP " "ON UPDATE CURRENT_TIMESTAMP"
)
),
),
)
def_table.create()
try:
reflected = Table("mysql_def", MetaData(testing.db), autoload=True)
finally:
def_table.drop()
assert def_table.c.c1.server_default.arg == ""
assert def_table.c.c2.server_default.arg == "0"
assert def_table.c.c3.server_default.arg == "abc"
assert def_table.c.c4.server_default.arg == "2009-04-05 12:00:00"
assert str(reflected.c.c1.server_default.arg) == "''"
assert str(reflected.c.c2.server_default.arg) == "'0'"
assert str(reflected.c.c3.server_default.arg) == "'abc'"
assert (
str(reflected.c.c4.server_default.arg) == "'2009-04-05 12:00:00'"
)
assert reflected.c.c5.default is None
assert reflected.c.c5.server_default is None
assert reflected.c.c6.default is None
assert re.match(
r"CURRENT_TIMESTAMP(\(\))? ON UPDATE CURRENT_TIMESTAMP(\(\))?",
str(reflected.c.c6.server_default.arg).upper(),
)
reflected.create()
try:
reflected2 = Table(
"mysql_def", MetaData(testing.db), autoload=True
)
finally:
reflected.drop()
assert str(reflected2.c.c1.server_default.arg) == "''"
assert str(reflected2.c.c2.server_default.arg) == "'0'"
assert str(reflected2.c.c3.server_default.arg) == "'abc'"
assert (
str(reflected2.c.c4.server_default.arg) == "'2009-04-05 12:00:00'"
)
assert reflected.c.c5.default is None
assert reflected.c.c5.server_default is None
assert reflected.c.c6.default is None
assert re.match(
r"CURRENT_TIMESTAMP(\(\))? ON UPDATE CURRENT_TIMESTAMP(\(\))?",
str(reflected.c.c6.server_default.arg).upper(),
)
def test_reflection_with_table_options(self):
comment = r"""Comment types type speedily ' " \ '' Fun!"""
def_table = Table(
"mysql_def",
MetaData(testing.db),
Column("c1", Integer()),
mysql_engine="MEMORY",
comment=comment,
mysql_default_charset="utf8",
mysql_auto_increment="5",
mysql_avg_row_length="3",
mysql_password="secret",
mysql_connection="fish",
)
def_table.create()
try:
reflected = Table("mysql_def", MetaData(testing.db), autoload=True)
finally:
def_table.drop()
assert def_table.kwargs["mysql_engine"] == "MEMORY"
assert def_table.comment == comment
assert def_table.kwargs["mysql_default_charset"] == "utf8"
assert def_table.kwargs["mysql_auto_increment"] == "5"
assert def_table.kwargs["mysql_avg_row_length"] == "3"
assert def_table.kwargs["mysql_password"] == "secret"
assert def_table.kwargs["mysql_connection"] == "fish"
assert reflected.kwargs["mysql_engine"] == "MEMORY"
assert reflected.comment == comment
assert reflected.kwargs["mysql_comment"] == comment
assert reflected.kwargs["mysql_default charset"] == "utf8"
assert reflected.kwargs["mysql_avg_row_length"] == "3"
assert reflected.kwargs["mysql_connection"] == "fish"
# This field doesn't seem to be returned by mysql itself.
# assert reflected.kwargs['mysql_password'] == 'secret'
# This is explicitly ignored when reflecting schema.
# assert reflected.kwargs['mysql_auto_increment'] == '5'
def test_reflection_on_include_columns(self):
"""Test reflection of include_columns to be sure they respect case."""
case_table = Table(
"mysql_case",
MetaData(testing.db),
Column("c1", String(10)),
Column("C2", String(10)),
Column("C3", String(10)),
)
try:
case_table.create()
reflected = Table(
"mysql_case",
MetaData(testing.db),
autoload=True,
include_columns=["c1", "C2"],
)
for t in case_table, reflected:
assert "c1" in t.c.keys()
assert "C2" in t.c.keys()
reflected2 = Table(
"mysql_case",
MetaData(testing.db),
autoload=True,
include_columns=["c1", "c2"],
)
assert "c1" in reflected2.c.keys()
for c in ["c2", "C2", "C3"]:
assert c not in reflected2.c.keys()
finally:
case_table.drop()
def test_autoincrement(self):
meta = MetaData(testing.db)
try:
Table(
"ai_1",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column("int_n", Integer, DefaultClause("0"), primary_key=True),
mysql_engine="MyISAM",
)
Table(
"ai_2",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column("int_n", Integer, DefaultClause("0"), primary_key=True),
mysql_engine="MyISAM",
)
Table(
"ai_3",
meta,
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_4",
meta,
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
Column(
"int_n2",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
mysql_engine="MyISAM",
)
Table(
"ai_5",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
mysql_engine="MyISAM",
)
Table(
"ai_6",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_7",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("o2", String(1), DefaultClause("x"), primary_key=True),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_8",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("o2", String(1), DefaultClause("x"), primary_key=True),
mysql_engine="MyISAM",
)
meta.create_all()
table_names = [
"ai_1",
"ai_2",
"ai_3",
"ai_4",
"ai_5",
"ai_6",
"ai_7",
"ai_8",
]
mr = MetaData(testing.db)
mr.reflect(only=table_names)
for tbl in [mr.tables[name] for name in table_names]:
for c in tbl.c:
if c.name.startswith("int_y"):
assert c.autoincrement
elif c.name.startswith("int_n"):
assert not c.autoincrement
tbl.insert().execute()
if "int_y" in tbl.c:
assert select([tbl.c.int_y]).scalar() == 1
assert list(tbl.select().execute().first()).count(1) == 1
else:
assert 1 not in list(tbl.select().execute().first())
finally:
meta.drop_all()
@testing.provide_metadata
def test_view_reflection(self):
Table(
"x", self.metadata, Column("a", Integer), Column("b", String(50))
)
self.metadata.create_all()
with testing.db.connect() as conn:
conn.execute("CREATE VIEW v1 AS SELECT * FROM x")
conn.execute("CREATE ALGORITHM=MERGE VIEW v2 AS SELECT * FROM x")
conn.execute(
"CREATE ALGORITHM=UNDEFINED VIEW v3 AS SELECT * FROM x"
)
conn.execute(
"CREATE DEFINER=CURRENT_USER VIEW v4 AS SELECT * FROM x"
)
@event.listens_for(self.metadata, "before_drop")
def cleanup(*arg, **kw):
with testing.db.connect() as conn:
for v in ["v1", "v2", "v3", "v4"]:
conn.execute("DROP VIEW %s" % v)
insp = inspect(testing.db)
for v in ["v1", "v2", "v3", "v4"]:
eq_(
[
(col["name"], col["type"].__class__)
for col in insp.get_columns(v)
],
[("a", mysql.INTEGER), ("b", mysql.VARCHAR)],
)
@testing.provide_metadata
def test_skip_not_describable(self):
@event.listens_for(self.metadata, "before_drop")
def cleanup(*arg, **kw):
with testing.db.connect() as conn:
conn.execute("DROP TABLE IF EXISTS test_t1")
conn.execute("DROP TABLE IF EXISTS test_t2")
conn.execute("DROP VIEW IF EXISTS test_v")
with testing.db.connect() as conn:
conn.execute("CREATE TABLE test_t1 (id INTEGER)")
conn.execute("CREATE TABLE test_t2 (id INTEGER)")
conn.execute("CREATE VIEW test_v AS SELECT id FROM test_t1")
conn.execute("DROP TABLE test_t1")
m = MetaData()
with expect_warnings(
"Skipping .* Table or view named .?test_v.? could not be "
"reflected: .* references invalid table"
):
m.reflect(views=True, bind=conn)
eq_(m.tables["test_t2"].name, "test_t2")
assert_raises_message(
exc.UnreflectableTableError,
"references invalid table",
Table,
"test_v",
MetaData(),
autoload_with=conn,
)
@testing.exclude("mysql", "<", (5, 0, 0), "no information_schema support")
def test_system_views(self):
dialect = testing.db.dialect
connection = testing.db.connect()
view_names = dialect.get_view_names(connection, "information_schema")
self.assert_("TABLES" in view_names)
@testing.provide_metadata
def test_nullable_reflection(self):
"""test reflection of NULL/NOT NULL, in particular with TIMESTAMP
defaults where MySQL is inconsistent in how it reports CREATE TABLE.
"""
meta = self.metadata
# this is ideally one table, but older MySQL versions choke
# on the multiple TIMESTAMP columns
row = testing.db.execute(
"show variables like '%%explicit_defaults_for_timestamp%%'"
).first()
explicit_defaults_for_timestamp = row[1].lower() in ("on", "1", "true")
reflected = []
for idx, cols in enumerate(
[
[
"x INTEGER NULL",
"y INTEGER NOT NULL",
"z INTEGER",
"q TIMESTAMP NULL",
],
["p TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP"],
["r TIMESTAMP NOT NULL"],
["s TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP"],
["t TIMESTAMP"],
["u TIMESTAMP DEFAULT CURRENT_TIMESTAMP"],
]
):
Table("nn_t%d" % idx, meta) # to allow DROP
testing.db.execute(
"""
CREATE TABLE nn_t%d (
%s
)
"""
% (idx, ", \n".join(cols))
)
reflected.extend(
{
"name": d["name"],
"nullable": d["nullable"],
"default": d["default"],
}
for d in inspect(testing.db).get_columns("nn_t%d" % idx)
)
if testing.db.dialect._is_mariadb_102:
current_timestamp = "current_timestamp()"
else:
current_timestamp = "CURRENT_TIMESTAMP"
eq_(
reflected,
[
{"name": "x", "nullable": True, "default": None},
{"name": "y", "nullable": False, "default": None},
{"name": "z", "nullable": True, "default": None},
{"name": "q", "nullable": True, "default": None},
{"name": "p", "nullable": True, "default": current_timestamp},
{
"name": "r",
"nullable": False,
"default": None
if explicit_defaults_for_timestamp
else (
"%(current_timestamp)s "
"ON UPDATE %(current_timestamp)s"
)
% {"current_timestamp": current_timestamp},
},
{"name": "s", "nullable": False, "default": current_timestamp},
{
"name": "t",
"nullable": True
if explicit_defaults_for_timestamp
else False,
"default": None
if explicit_defaults_for_timestamp
else (
"%(current_timestamp)s "
"ON UPDATE %(current_timestamp)s"
)
% {"current_timestamp": current_timestamp},
},
{
"name": "u",
"nullable": True
if explicit_defaults_for_timestamp
else False,
"default": current_timestamp,
},
],
)
@testing.provide_metadata
def test_reflection_with_unique_constraint(self):
insp = inspect(testing.db)
meta = self.metadata
uc_table = Table(
"mysql_uc",
meta,
Column("a", String(10)),
UniqueConstraint("a", name="uc_a"),
)
uc_table.create()
# MySQL converts unique constraints into unique indexes.
# separately we get both
indexes = dict((i["name"], i) for i in insp.get_indexes("mysql_uc"))
constraints = set(
i["name"] for i in insp.get_unique_constraints("mysql_uc")
)
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"]["unique"])
self.assert_("uc_a" in constraints)
# reflection here favors the unique index, as that's the
# more "official" MySQL construct
reflected = Table("mysql_uc", MetaData(testing.db), autoload=True)
indexes = dict((i.name, i) for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"].unique)
self.assert_("uc_a" not in constraints)
@testing.provide_metadata
def test_reflect_fulltext(self):
mt = Table(
"mytable",
self.metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mysql_engine="InnoDB",
)
Index("textdata_ix", mt.c.textdata, mysql_prefix="FULLTEXT")
self.metadata.create_all(testing.db)
mt = Table("mytable", MetaData(), autoload_with=testing.db)
idx = list(mt.indexes)[0]
eq_(idx.name, "textdata_ix")
eq_(idx.dialect_options["mysql"]["prefix"], "FULLTEXT")
self.assert_compile(
CreateIndex(idx),
"CREATE FULLTEXT INDEX textdata_ix ON mytable (textdata)",
)
@testing.requires.mysql_ngram_fulltext
@testing.provide_metadata
def test_reflect_fulltext_comment(self):
mt = Table(
"mytable",
self.metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mysql_engine="InnoDB",
)
Index(
"textdata_ix",
mt.c.textdata,
mysql_prefix="FULLTEXT",
mysql_with_parser="ngram",
)
self.metadata.create_all(testing.db)
mt = Table("mytable", MetaData(), autoload_with=testing.db)
idx = list(mt.indexes)[0]
eq_(idx.name, "textdata_ix")
eq_(idx.dialect_options["mysql"]["prefix"], "FULLTEXT")
eq_(idx.dialect_options["mysql"]["with_parser"], "ngram")
self.assert_compile(
CreateIndex(idx),
"CREATE FULLTEXT INDEX textdata_ix ON mytable "
"(textdata) WITH PARSER ngram",
)
@testing.provide_metadata
def test_non_column_index(self):
m1 = self.metadata
t1 = Table(
"add_ix", m1, Column("x", String(50)), mysql_engine="InnoDB"
)
Index("foo_idx", t1.c.x.desc())
m1.create_all()
insp = inspect(testing.db)
eq_(
insp.get_indexes("add_ix"),
[{"name": "foo_idx", "column_names": ["x"], "unique": False}],
)
def _bug_88718_casing_0(self):
fkeys_casing_0 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_0 = [
("test", "Track", "TrackID"),
("test_schema", "Track", "TrackID"),
]
return fkeys_casing_0, ischema_casing_0
def _bug_88718_casing_1(self):
fkeys_casing_1 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_1 = [
(util.u("test"), util.u("Track"), "TrackID"),
(util.u("test_schema"), util.u("Track"), "TrackID"),
]
return fkeys_casing_1, ischema_casing_1
def _bug_88718_casing_2(self):
fkeys_casing_2 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_2 = [
("test", "Track", "TrackID"),
("test_schema", "Track", "TrackID"),
]
return fkeys_casing_2, ischema_casing_2
def test_correct_for_mysql_bug_88718(self):
dialect = mysql.dialect()
for casing, (fkeys, ischema) in [
(0, self._bug_88718_casing_0()),
(1, self._bug_88718_casing_1()),
(2, self._bug_88718_casing_2()),
]:
dialect._casing = casing
dialect.default_schema_name = "test"
connection = mock.Mock(
dialect=dialect, execute=lambda stmt, **params: ischema
)
dialect._correct_for_mysql_bug_88718(fkeys, connection)
eq_(
fkeys,
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "test_schema",
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
@testing.provide_metadata
def test_case_sensitive_column_constraint_reflection(self):
# test for issue #4344 which works around
# MySQL 8.0 bug https://bugs.mysql.com/bug.php?id=88718
m1 = self.metadata
Table(
"Track",
m1,
Column("TrackID", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"Track",
m1,
Column("TrackID", Integer, primary_key=True),
schema=testing.config.test_schema,
mysql_engine="InnoDB",
)
Table(
"PlaylistTrack",
m1,
Column("id", Integer, primary_key=True),
Column(
"TrackID",
ForeignKey("Track.TrackID", name="FK_PlaylistTrackId"),
),
Column(
"TTrackID",
ForeignKey(
"%s.Track.TrackID" % (testing.config.test_schema,),
name="FK_PlaylistTTrackId",
),
),
mysql_engine="InnoDB",
)
m1.create_all()
if testing.db.dialect._casing in (1, 2):
eq_(
inspect(testing.db).get_foreign_keys("PlaylistTrack"),
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": testing.config.test_schema,
"referred_table": "track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
else:
eq_(
inspect(testing.db).get_foreign_keys("PlaylistTrack"),
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": testing.config.test_schema,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
@testing.requires.mysql_fully_case_sensitive
@testing.provide_metadata
def test_case_sensitive_reflection_dual_case_references(self):
# this tests that within the fix we do for MySQL bug
# 88718, we don't do case-insensitive logic if the backend
# is case sensitive
m = self.metadata
Table(
"t1",
m,
Column("some_id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"T1",
m,
Column("Some_Id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"t2",
m,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.some_id", name="t1id_fk")),
Column("cap_t1id", ForeignKey("T1.Some_Id", name="cap_t1id_fk")),
mysql_engine="InnoDB",
)
m.create_all(testing.db)
eq_(
dict(
(rec["name"], rec)
for rec in inspect(testing.db).get_foreign_keys("t2")
),
{
"cap_t1id_fk": {
"name": "cap_t1id_fk",
"constrained_columns": ["cap_t1id"],
"referred_schema": None,
"referred_table": "T1",
"referred_columns": ["Some_Id"],
"options": {},
},
"t1id_fk": {
"name": "t1id_fk",
"constrained_columns": ["t1id"],
"referred_schema": None,
"referred_table": "t1",
"referred_columns": ["some_id"],
"options": {},
},
},
)
class RawReflectionTest(fixtures.TestBase):
__backend__ = True
def setup(self):
dialect = mysql.dialect()
self.parser = _reflection.MySQLTableDefinitionParser(
dialect, dialect.identifier_preparer
)
def test_key_reflection(self):
regex = self.parser._re_key
assert regex.match(" PRIMARY KEY (`id`),")
assert regex.match(" PRIMARY KEY USING BTREE (`id`),")
assert regex.match(" PRIMARY KEY (`id`) USING BTREE,")
assert regex.match(" PRIMARY KEY (`id`)")
assert regex.match(" PRIMARY KEY USING BTREE (`id`)")
assert regex.match(" PRIMARY KEY (`id`) USING BTREE")
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE 16"
)
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE=16"
)
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = 16"
)
assert not regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = = 16"
)
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'comment'")
# `SHOW CREATE TABLE` returns COMMENT '''comment'
# after creating table with COMMENT '\'comment'
assert regex.match(" KEY (`id`) USING BTREE COMMENT '''comment'")
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'comment'''")
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'prefix''suffix'")
assert regex.match(
" KEY (`id`) USING BTREE COMMENT 'prefix''text''suffix'"
)
# https://forums.mysql.com/read.php?20,567102,567111#msg-567111
# "It means if the MySQL version >= 501, execute what's in the comment"
assert regex.match(
" FULLTEXT KEY `ix_fulltext_oi_g_name` (`oi_g_name`) "
"/*!50100 WITH PARSER `ngram` */ "
)
def test_key_reflection_columns(self):
regex = self.parser._re_key
exprs = self.parser._re_keyexprs
m = regex.match(" KEY (`id`) USING BTREE COMMENT '''comment'")
eq_(m.group("columns"), "`id`")
m = regex.match(" KEY (`x`, `y`) USING BTREE")
eq_(m.group("columns"), "`x`, `y`")
eq_(exprs.findall(m.group("columns")), [("x", "", ""), ("y", "", "")])
m = regex.match(" KEY (`x`(25), `y`(15)) USING BTREE")
eq_(m.group("columns"), "`x`(25), `y`(15)")
eq_(
exprs.findall(m.group("columns")),
[("x", "25", ""), ("y", "15", "")],
)
m = regex.match(" KEY (`x`(25) DESC, `y`(15) ASC) USING BTREE")
eq_(m.group("columns"), "`x`(25) DESC, `y`(15) ASC")
eq_(
exprs.findall(m.group("columns")),
[("x", "25", "DESC"), ("y", "15", "ASC")],
)
m = regex.match(" KEY `foo_idx` (`x` DESC)")
eq_(m.group("columns"), "`x` DESC")
eq_(exprs.findall(m.group("columns")), [("x", "", "DESC")])
eq_(exprs.findall(m.group("columns")), [("x", "", "DESC")])
m = regex.match(" KEY `foo_idx` (`x` DESC, `y` ASC)")
eq_(m.group("columns"), "`x` DESC, `y` ASC")
def test_fk_reflection(self):
regex = self.parser._re_fk_constraint
m = regex.match(
" CONSTRAINT `addresses_user_id_fkey` "
"FOREIGN KEY (`user_id`) "
"REFERENCES `users` (`id`) "
"ON DELETE CASCADE ON UPDATE CASCADE"
)
eq_(
m.groups(),
(
"addresses_user_id_fkey",
"`user_id`",
"`users`",
"`id`",
None,
"CASCADE",
"CASCADE",
),
)
m = regex.match(
" CONSTRAINT `addresses_user_id_fkey` "
"FOREIGN KEY (`user_id`) "
"REFERENCES `users` (`id`) "
"ON DELETE CASCADE ON UPDATE SET NULL"
)
eq_(
m.groups(),
(
"addresses_user_id_fkey",
"`user_id`",
"`users`",
"`id`",
None,
"CASCADE",
"SET NULL",
),
)
| 35.085538 | 79 | 0.487647 | [
"MIT"
] | AngelLiang/hacking-sqlalchemy | test/dialect/mysql/test_reflection.py | 39,787 | Python |
"""
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@=ozqw=1&(*&)$sdvl_o1#r=+kf=5s#0g^#mo72^ctn1mmzse$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
'django_extensions',
'import_export',
]
IMPORT_EXPORT_USE_TRANSACTIONS = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = './var/www/myProject/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = '/media/'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'qr-code': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'qr-code-cache',
'TIMEOUT': 3600
}
}
QR_CODE_CACHE_ALIAS = 'qr-code'
| 24.993197 | 91 | 0.685084 | [
"BSD-2-Clause"
] | panipp/cs459_2018 | myproject/myproject/settings.py | 3,674 | Python |
from __future__ import annotations
import operator
import re
import sys
import types
from enum import Enum
from typing import TYPE_CHECKING, Optional, List, Union, Iterable, Type, Tuple
from olo.expression import UnaryExpression, BinaryExpression, Expression
from olo.funcs import DISTINCT, Function
if TYPE_CHECKING:
from olo.database import OLOCursor
from olo.model import Model, ModelMeta
from itertools import chain
from decorator import decorator
from olo.compat import izip, imap, str_types, iteritems, reduce
from olo.interfaces import SQLASTInterface
from olo.field import Field
from olo.errors import ExpressionError, OrderByError, SupportError, ORMError
from olo.libs.compiler.translators.func_translator import transform_func
from olo.session import QuerySession
from olo.utils import optimize_sql_ast, friendly_repr
PATTERN_NEG = re.compile(r'^\-')
PATTERN_BACKQUOTE = re.compile('^`(?P<name>.*)`$')
def _strip_backquote(s):
m = PATTERN_BACKQUOTE.search(s)
if not m:
return s
return m.group('name') # pragma: no cover
def _dict_to_expressions(model_class, dct):
return [
getattr(model_class, k) == v
for k, v in iteritems(dct)
]
def _process_order_by(model_class, order_by) -> List[UnaryExpression]:
new = []
for item in order_by:
if isinstance(item, str_types):
_item = item
_item = _strip_backquote(_item)
is_negative = bool(PATTERN_NEG.search(_item))
if is_negative:
_item = PATTERN_NEG.sub('', _item)
else:
_item, _, sort = _item.partition(' ')
is_negative = sort.lower() == 'desc'
if sort:
_item = _strip_backquote(_item)
f = getattr(model_class, _item, None)
if f is None:
raise OrderByError('`{}` is an invalid order_by'.format( # noqa pragma: no cover pylint: disable=W
item
))
item = f
item = item.desc() if is_negative else item.asc()
elif isinstance(item, Field):
item = item.asc()
elif not isinstance(item, UnaryExpression):
raise OrderByError('`{}` is an invalid order_by'.format( # noqa pragma: no cover pylint: disable=W
item
))
new.append(item)
return new
@decorator
def _lambda_eval(func, self, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], types.FunctionType):
lamb = transform_func(args[0])
return func(self, lamb(self._model_class), **kwargs)
return func(self, *args, **kwargs)
def _check_aggregation(exp: Expression) -> bool:
if isinstance(exp, BinaryExpression):
if isinstance(exp.left, Function):
return True
if isinstance(exp.right, Function):
return True
if isinstance(exp.left, Expression) and _check_aggregation(exp.left):
return True
if isinstance(exp.right, Expression) and _check_aggregation(exp.right):
return True
return False
def _split_where_expression_and_having_expression(expression: BinaryExpression) -> Tuple[Optional[BinaryExpression],
Optional[BinaryExpression]]:
stack = [expression]
and_expressions = []
while stack:
exp = stack.pop()
if exp is None:
continue
if exp.operator == 'AND':
stack.append(exp.right)
stack.append(exp.left)
continue
and_expressions.append(exp)
where_expressions = []
having_expressions = []
for exp in and_expressions:
if _check_aggregation(exp):
having_expressions.append(exp)
else:
where_expressions.append(exp)
where_expression = None
having_expression = None
if where_expressions:
where_expression = reduce(operator.and_, where_expressions)
if having_expressions:
having_expression = reduce(operator.and_, having_expressions)
return where_expression, having_expression
class JoinType(Enum):
INNER = 0
LEFT = 1
RIGHT = 2
FULL = 3
class JoinChain(SQLASTInterface):
on_: Optional[BinaryExpression]
def __init__(self, type_: JoinType, left: Union[Model, JoinChain], right: Model) -> None:
self.type = type_
self.left = left
self.right = right
self.on_ = None
def on(self, on: BinaryExpression) -> None:
if self.on_ is None:
self.on_ = on
return
self.on_ = self.on_ & on
def get_sql_ast(self) -> List:
from olo.model import Model
if isinstance(self.left, type) and issubclass(self.left, Model):
left_ast = ['TABLE', self.left._get_table_name()]
else:
left_ast = self.left.get_sql_ast()
on_ast = self.on_.get_sql_ast() if self.on_ else []
return ['JOIN', self.type.name, left_ast, ['TABLE', self.right._get_table_name()], on_ast]
def clone(self) -> JoinChain:
cloned = JoinChain(self.type, self.left, self.right)
cloned.on(self.on_)
return cloned
if TYPE_CHECKING:
Entity = Union[Type[Model], Field, Function]
class Query(SQLASTInterface):
def __init__(self, model_class: Type[Model]):
self._model_class = model_class
self._expression: Optional[BinaryExpression] = None
self._having_expression: Optional[BinaryExpression] = None
self._offset = 0
self._limit = None
self._order_by: List[UnaryExpression] = []
self._group_by = []
self._entities: List[Entity] = [model_class]
self._raw = False
self._join_chain: Optional[JoinChain] = None
self._for_update = False
def _update(self, **kwargs):
inst = self.__class__(self._model_class)
inst.__dict__.update(self.__dict__)
inst.__dict__.update(kwargs)
return inst
def _get_entities(self, fields: Iterable[Union[Entity, str]]) -> List[Entity]:
from olo.model import ModelMeta
if not isinstance(fields, (list, tuple, set)):
fields = [fields]
res = []
for field in fields:
if isinstance(field, str_types):
field_ = self._model_class._olo_get_field(field)
if field_ is None:
raise ORMError(f'{friendly_repr(field)} is not a valid field in Model {self._model_class.__name__}')
field = field_
if not isinstance(field, (ModelMeta, Field, Function)):
raise ORMError(f'{field} is an not valid entity!')
res.append(field)
return res
@_lambda_eval
def map(self, *entities: Union[Entity, str], **kwargs):
from olo.model import ModelMeta
self._raw = kwargs.get('raw', False)
entities = self._get_entities(entities)
q = self._update(_entities=list(
chain.from_iterable(
x if isinstance(x, (list, tuple, set)) else (x,)
for x in entities
)
))
has_aggregation = False
first_field = None
for entity in q._entities:
if isinstance(entity, ModelMeta) and first_field is None:
first_field = self._model_class.get_singleness_pk_field()
if isinstance(entity, Field) and first_field is None:
first_field = entity
if isinstance(entity, Function):
has_aggregation = True
if has_aggregation and first_field is not None:
q = q.group_by(first_field)
return q
def __call__(self, *entities, **kwargs):
return self.map(*entities, **kwargs)
@_lambda_eval
def flat_map(self, query):
return self.join(query._model_class).on(
query._expression
).map(*query._entities)
def __getitem__(self, item):
if isinstance(item, slice):
q = self
start = item.start or 0
stop = item.stop
step = item.step
if step is not None:
raise SupportError(
'Cannot support step in __getitem__ now!'
)
if start:
q = q.offset(start)
if stop is not None and (start or stop != sys.maxsize):
q = q.limit(stop - start)
return q.all()
field = self._model_class.get_singleness_pk_field()
return self.filter(field == item).first()
@property
def db(self):
return self._model_class._get_db()
@property
def cq(self):
return self
query = cq
@_lambda_eval
def join(self, model_class):
left = self._join_chain if self._join_chain is not None else self._model_class
return self._update(_join_chain=JoinChain(JoinType.INNER, left, model_class))
@_lambda_eval
def left_join(self, model_class):
left = self._join_chain if self._join_chain is not None else self._model_class
return self._update(_join_chain=JoinChain(JoinType.LEFT, left, model_class))
@_lambda_eval
def right_join(self, model_class):
left = self._join_chain if self._join_chain is not None else self._model_class
return self._update(_join_chain=JoinChain(JoinType.RIGHT, left, model_class))
@_lambda_eval
def full_join(self, model_class):
left = self._join_chain if self._join_chain is not None else self._model_class
return self._update(_join_chain=JoinChain(JoinType.FULL, left, model_class))
@_lambda_eval
def filter(self, *expressions, **expression_dict):
self._model_class._check_attrs(expression_dict)
expression_dict = self._model_class._wash_attrs(
expression_dict
)
expressions = list(expressions) + list(
_dict_to_expressions(
self._model_class, expression_dict
)
)
expression = self._expression
if expressions:
_expression = reduce(
operator.and_,
expressions,
)
if expression is not None:
expression &= _expression
else:
expression = _expression
expression, having_expression = _split_where_expression_and_having_expression(expression)
q = self
if expression is not None:
q = q._update(_expression=expression)
if having_expression is not None:
q = q.having(having_expression)
return q
@_lambda_eval
def on(self, *on_expressions, **on_expression_dict):
if self._join_chain is None:
raise ORMError('this query does not have a join chain!')
self._model_class._check_attrs(on_expression_dict)
on_expression_dict = self._model_class._wash_attrs(
on_expression_dict
)
on_expressions = list(on_expressions) + list(
_dict_to_expressions(
self._model_class, on_expression_dict
)
)
on_expression = reduce(
operator.and_,
on_expressions
)
join_chain = self._join_chain.clone()
join_chain.on(on_expression)
return self._update(_join_chain=join_chain)
@_lambda_eval
def having(self, *having_expressions, **having_expression_dict):
self._model_class._check_attrs(having_expression_dict)
having_expression_dict = self._model_class._wash_attrs(
having_expression_dict
)
having_expressions = (
list(having_expressions) + list(
_dict_to_expressions(
self._model_class, having_expression_dict
)
)
)
q = self
if having_expressions:
having_expression = reduce(operator.and_, having_expressions)
if self._having_expression is not None:
having_expression = self._having_expression & having_expression
q = q._update(_having_expression=having_expression)
return q
def for_update(self):
return self._update(_for_update=True)
def offset(self, offset):
return self._update(_offset=offset)
def limit(self, limit):
return self._update(_limit=limit)
def order_by(self, *order_by):
order_by = _process_order_by(self._model_class, order_by)
_order_by = self._order_by + list(order_by)
return self._update(_order_by=_order_by)
def group_by(self, *group_by):
_group_by = self._group_by + list(group_by)
return self._update(_group_by=_group_by)
def first(self):
res = self.limit(1).all()
return res[0] if res else None
one = first
def __iter__(self):
rv = self._get_rv()
return self._iter_wrap_rv(rv)
def all(self):
return list(self.__iter__())
def count(self):
from olo.funcs import COUNT
return COUNT(self).first() # pylint: disable=E1101
def count_and_all(self):
base_sql_ast = self._get_base_sql_ast(
modifier='SQL_CALC_FOUND_ROWS'
)
with self.db.transaction():
cursor = self.db.get_cursor()
rv = self._get_rv(base_sql_ast=base_sql_ast,
cursor=cursor)
cursor.ast_execute(['SELECT', ['CALL', 'FOUND_ROWS']])
count = cursor.fetchone()[0]
items = list(self._iter_wrap_rv(rv))
return count, items
__len__ = count
def update(self, **values):
from olo import PostgreSQLDataBase
expression = self._get_expression()
if not expression:
raise ExpressionError('Cannot execute update because of '
'without expression')
assignments, _, _ = self._model_class._split_attrs(values)
update_sql_ast = [
'UPDATE',
['TABLE', self.table_name],
['SET',
['SERIES'] + [asg.get_sql_ast() for asg in assignments]],
]
db = self._model_class._get_db()
# FIXME(PG)
if isinstance(db, PostgreSQLDataBase):
pk = self._model_class.get_singleness_pk_field()
if self._order_by:
base_sql_ast = self.map(pk).for_update()._get_base_sql_ast()
sql_ast = self.get_sql_ast(
base_sql_ast=base_sql_ast,
)
update_sql_ast.append(
['WHERE', ['BINARY_OPERATE', 'IN', ['QUOTE', pk.name], ['BRACKET', sql_ast]]]
)
with self.db.transaction():
rows = self._get_rv_by_sql_ast(sql_ast=update_sql_ast)
else:
with self.db.transaction():
rows = self._get_rv(base_sql_ast=update_sql_ast)
else:
with self.db.transaction():
rows = self._get_rv(base_sql_ast=update_sql_ast)
return rows
def delete(self):
expression = self._get_expression()
if not expression:
raise ExpressionError('Cannot execute delete because of '
'without expression')
sql_ast = [
'DELETE',
['TABLE', self.table_name]
]
with self.db.transaction():
rows = self._get_rv(base_sql_ast=sql_ast)
return rows
@property
def table_name(self):
return self._model_class._get_table_name()
def _get_rv(self, base_sql_ast=None,
cursor=None):
return self.__get_rv(
base_sql_ast=base_sql_ast,
cursor=cursor,
)
def __get_rv(self, base_sql_ast=None,
cursor=None):
sql_ast = self.get_sql_ast(
base_sql_ast=base_sql_ast,
)
return self._get_rv_by_sql_ast(sql_ast, cursor=cursor)
def _get_rv_by_sql_ast(self, sql_ast, cursor: Optional[OLOCursor] = None):
if cursor is not None:
cursor.ast_execute(sql_ast)
return cursor.fetchall()
with self.db.transaction():
return self.db.ast_execute(sql_ast)
def get_sql_ast(self, base_sql_ast=None):
sql_ast = self.get_primitive_sql_ast(
base_sql_ast=base_sql_ast)
return optimize_sql_ast(sql_ast)
def get_primitive_sql_ast(self, base_sql_ast=None):
if base_sql_ast is None:
base_sql_ast = self._get_base_sql_ast()
return self._get_primitive_sql_ast(base_sql_ast)
def _entities_contains(self, field):
if len(self._entities) == 1 and self._entities[0] is self._model_class:
return True
for f in self._entities:
# f == field will return an Expression object, so must compare with True explicitly
if (f == field) is True:
return True
if getattr(f, 'name', 'f.name') == getattr(field, 'name', 'field.name'):
return True
return False
def _get_primitive_sql_ast(self, base_sql_ast):
sql_ast = list(base_sql_ast) # copy ast
if self._expression is not None:
sql_ast.append([
'WHERE',
self._expression.get_sql_ast()
])
if self._having_expression is not None and not self._group_by:
group_by = []
for entity in self._entities:
if entity is self._model_class:
pk = self._model_class.get_singleness_pk_field()
group_by.append(pk)
break
if isinstance(entity, Field):
group_by.append(entity)
self._group_by = group_by
if self._group_by:
entities = self._get_entities(self._group_by)
field_names = {getattr(f, 'name', '') for f in entities}
pk = self._model_class.get_singleness_pk_field()
# self._entities must casting to set or pk in self._entities will always be True!!!
if self._entities_contains(pk) and pk.name not in field_names:
entities.append(pk)
sql_ast.append([
'GROUP BY',
['SERIES'] + [f.get_sql_ast() for f in entities]
])
if self._having_expression is not None:
sql_ast.append([
'HAVING',
self._having_expression.get_sql_ast()
])
if self._order_by:
sql_ast.append([
'ORDER BY',
['SERIES'] + [f.get_sql_ast() for f in self._order_by]
])
if self._limit is not None:
limit_section = ['LIMIT', None, ['VALUE', self._limit]]
if self._offset is not None and self._offset != 0:
limit_section[1] = ['VALUE', self._offset]
sql_ast.append(limit_section)
if self._for_update:
sql_ast.append(['FOR UPDATE'])
return sql_ast
def _get_expression(self, is_having=False):
return self._having_expression if is_having else self._expression
def _get_base_sql_ast(self, modifier=None, entities=None):
entities = self._entities if entities is None else entities
if self._join_chain:
table_section = self._join_chain.get_sql_ast()
else:
table_section = ['TABLE', self.table_name]
contains_distinct = any(isinstance(entity, DISTINCT) for entity in entities)
# FIXME(PG)
if contains_distinct and self._order_by:
for ob in self._order_by:
if not self._entities_contains(ob.value):
entities = entities + [ob.value]
select_ast = [
'SERIES',
] + [e.get_sql_ast() if hasattr(e, 'get_sql_ast') else e
for e in entities]
if len(select_ast) == 2 and select_ast[1][0] == 'SERIES':
select_ast = select_ast[1]
if modifier is not None:
select_ast = ['MODIFIER', modifier, select_ast]
return ['SELECT', select_ast, ['FROM', table_section]]
# pylint: disable=E0602
def _iter_wrap_rv(self, rv):
from olo.model import ModelMeta
entity_count = len(self._entities)
raw = self._raw
producers = []
idx = -1
def make_field_producer(idx, v):
def producer(item):
if raw:
return item[idx]
model = v.get_model()
attrs = model._parse_attrs({
v.attr_name: item[idx]
})
return attrs[v.attr_name]
return producer
for v in self._entities:
idx += 1
if isinstance(v, ModelMeta):
fields_count = len(v.__fields__)
producers.append((
lambda idx, v:
lambda item: v._olo_instantiate(**dict(
izip(
v.__sorted_fields__,
item[idx: idx + fields_count]
) # pylint: disable=W
))
)(idx, v))
idx += fields_count - 1
continue
if isinstance(v, Field):
producers.append(make_field_producer(idx, v))
continue
producers.append((
lambda idx, v:
lambda item: item[idx]
)(idx, v))
session = QuerySession()
seen = set()
for idx, item in enumerate(rv):
new_item = tuple(imap(lambda f: f(item), producers)) # noqa pylint: disable=W
new_item = new_item[:entity_count]
if entity_count == 1:
new_item = new_item[0]
# TODO
if isinstance(self._entities[0], DISTINCT):
if new_item in seen:
continue
seen.add(new_item)
session.add_entity(new_item)
for entity in session.entities:
yield entity
| 32.464338 | 120 | 0.584092 | [
"Apache-2.0"
] | kadaliao/olo | olo/query.py | 22,303 | Python |
# 6. Больше числа п. В программе напишите функцию, которая принимает два
# аргумента: список и число п. Допустим, что список содержит числа. Функция
# должна показать все числа в списке, которые больше п.
import random
def main():
list_num = [random.randint(0, 100) for i in range(20)]
print(list_num)
n = int(input('entered n: '))
print("This is list " + str(check_n(list_num, n)) + " of numbers\nthat are "
"greater than the number you provided ", n, ".", sep="")
def check_n(list_num, n):
num_greater_n = []
for i in range(len(list_num)):
if list_num[i] > n:
num_greater_n.append(list_num[i])
return num_greater_n
main()
| 28.75 | 80 | 0.650725 | [
"MIT"
] | SergeHall/Tony-Gaddis-Python-4th | chapter_07/06_larger_than_n.py | 848 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "snippets_java.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.272727 | 77 | 0.777344 | [
"MIT"
] | edilio/snippets-javaos | snippets_java/manage.py | 256 | Python |
import ast
import math
import copy
with open('input.txt', 'r') as f:
lines = f.readlines()
lines = [line[:-1] for line in lines]
lines = [ast.literal_eval(line) for line in lines]
def reduce(num):
explodeCriteriaMet = True
splitCriteriaMet = True
while explodeCriteriaMet or splitCriteriaMet:
explodeCriteriaMet = False
splitCriteriaMet = False
# check for pair nested inside four pairs
for idx1, val1 in enumerate(num):
if explodeCriteriaMet:
break
if type(val1) == list:
for idx2, val2 in enumerate(val1):
if explodeCriteriaMet:
break
if type(val2) == list:
for idx3, val3 in enumerate(val2):
if explodeCriteriaMet:
break
if type(val3) == list:
for idx4, val4 in enumerate(val3):
if type(val4) == list:
explodeCriteriaMet = True
num = explode(num, val4, [idx1, idx2, idx3, idx4])
break
if explodeCriteriaMet:
continue
# regular number is 10 or greater
for idx1, val1 in enumerate(num):
if splitCriteriaMet:
break
if type(val1) == list:
for idx2, val2 in enumerate(val1):
if splitCriteriaMet:
break
if type(val2) == list:
for idx3, val3 in enumerate(val2):
if splitCriteriaMet:
break
if type(val3) == list:
for idx4, val4 in enumerate(val3):
if val4 >= 10:
splitCriteriaMet = True
num = split(num, val4, [idx1, idx2, idx3, idx4])
break
elif val3 >= 10:
splitCriteriaMet = True
num = split(num, val3, [idx1, idx2, idx3])
break
elif val2 >= 10:
splitCriteriaMet = True
num = split(num, val2, [idx1, idx2])
break
elif val1 >= 10:
splitCriteriaMet = True
num = split(num, val1, [idx1])
break
return num
def split(num, value, idx):
# print("SPLIT")
# print("IDX:", idx)
# print("VAL:", value)
pair = [math.floor(value/2), math.ceil(value/2)]
if len(idx) == 4:
num[idx[0]][idx[1]][idx[2]][idx[3]] = pair
elif len(idx) == 3:
num[idx[0]][idx[1]][idx[2]] = pair
elif len(idx) == 2:
num[idx[0]][idx[1]] = pair
elif len(idx) == 1:
num[idx[0]] = pair
return num
def getValueAtIndex(num, idx):
for i in idx:
num = num[i]
return num
def changeValueAtIndex(num, idx, value):
if len(idx) == 5:
num[idx[0]][idx[1]][idx[2]][idx[3]][idx[4]] += value
elif len(idx) == 4:
num[idx[0]][idx[1]][idx[2]][idx[3]] += value
elif len(idx) == 3:
num[idx[0]][idx[1]][idx[2]] += value
elif len(idx) == 2:
num[idx[0]][idx[1]] += value
elif len(idx) == 1:
num[idx[0]] += value
return num
def explode(num, item, idx):
# store values to add:
left = item[0]
right = item[1]
# the exploding pair is replaced with the regular number 0
num[idx[0]][idx[1]][idx[2]][idx[3]] = 0
# adding the values to their neighbors
# left:
for index, indexValue in enumerate(idx[:: -1]):
if indexValue != 0: # there is a neighbour to the left when the indexValue is 1
idxLeft = idx[: 4-index]
idxLeft[-1] = 0
while type(getValueAtIndex(num, idxLeft)) == list:
idxLeft.append(1)
num = changeValueAtIndex(num, idxLeft, left)
break
# right
for index, indexValue in enumerate(idx[:: -1]):
if indexValue != 1: # there is a neighbour to the right when the indexValue is 0
idxRight = idx[: 4-index]
idxRight[-1] = 1
while type(getValueAtIndex(num, idxRight)) == list:
idxRight.append(0)
num = changeValueAtIndex(num, idxRight, right)
break
return num
def calculateSum(num1, num2):
sum = [copy.deepcopy(num1), copy.deepcopy(num2)]
return reduce(sum)
def calculateMagnitude(num):
left = num[0]
right = num[1]
if type(left) == list:
left = calculateMagnitude(left)
if type(right) == list:
right = calculateMagnitude(right)
return (3*left + 2*right)
# part 1
sum = lines[0]
for line in lines[1:]:
sum = calculateSum(sum, line)
print("What is the magnitude of the final sum?", calculateMagnitude(sum))
# part 2
largest = 0
for x in lines:
for y in lines:
if x != y:
magnitude = calculateMagnitude(calculateSum(x, y))
if magnitude > largest:
largest = magnitude
print("What is the largest magnitude of any sum of two different snailfish numbers from the homework assignment?", largest)
| 29.089474 | 123 | 0.486159 | [
"MIT"
] | StrangeGirlMurph/Coding-Projects | 07-AdventOfCode2021/18/day-18.py | 5,527 | Python |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import unittest
import code
class TestDay01(unittest.TestCase):
# Part 01
def test_example01(self):
expense_report = [1721, 299]
expected = 514579
result = code.part01(expense_report)
self.assertEqual(result, expected)
# Don't count a 2020/2 value twice
def test_duplicate(self):
expense_report = [1010, 1721, 299]
expected = 514579
result = code.part01(expense_report)
self.assertEqual(result, expected)
# Part 02
def test_example02(self):
expense_report = [979, 366, 675]
expected = 241861950
result = code.part02(expense_report)
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
| 23.264706 | 44 | 0.635904 | [
"MIT"
] | mr-bigbang/advent-of-code | 2020/01/test.py | 791 | Python |
from rcrs_core.commands.Command import Command
from rcrs_core.worldmodel.entityID import EntityID
from rcrs_core.connection import URN
from rcrs_core.connection import RCRSProto_pb2
class AKTell(Command):
def __init__(self, agent_id: EntityID, time: int, message: str) -> None:
super().__init__()
self.urn = URN.Command.AK_TELL
self.agent_id = agent_id
self.message = message.encode('utf-8')
self.time = time
def prepare_cmd(self):
msg = RCRSProto_pb2.MessageProto()
msg.urn = self.urn
msg.components[URN.ComponentControlMSG.AgentID].entityID = self.agent_id.get_value()
msg.components[URN.ComponentControlMSG.Time].intValue = self.time
msg.components[URN.ComponentCommand.Message].rawData = self.message
return msg
| 35.478261 | 92 | 0.708333 | [
"BSD-3-Clause"
] | roborescue/rcrs-core-python | rcrs_core/commands/AKTell.py | 816 | Python |
import sys
import cj_function_lib as cj
import init_file as variables
import mdbtools as mdt
#print variables.ProjMDB
#print variables.QSWAT_MDB
wwqrng = cj.extract_table_from_mdb(variables.QSWAT_MDB, 'wwqrng', variables.path + "\\wwqrng.tmp~")
wwq_defaults={}
for record in wwqrng: # Getting a list of parameter names for wwq and their defaults
if record.split(",")[0].strip(" ") != "":
wwq_defaults[record.split(",")[0].strip("\[").strip("\]")] = record.split(",")[3]
"""
# here we commit to table the parameters for the wwq to the row in the table wwq
"""
wwq = mdt.mdb_with_ops(variables.ProjMDB)
wwq.clear_table("wwq")
wwq_defaults["OID"] = 1
wwq_defaults = cj.format_data_type(wwq_defaults, wwqrng)
wwq.insert_row("wwq", wwq_defaults, True)
wwq.disconnect()
| 27.068966 | 99 | 0.719745 | [
"MIT"
] | VUB-HYDR/2018_Chawanda_etal | workflow_lib/wwq_dbase.py | 785 | Python |
from rdkit import Chem
from AnalysisModule.routines.util import load_pkl
# logit_result = yaml_fileread("../logistic.yml")
logit_result = load_pkl("../clf3d/logistic.pkl")
"""
epg-string --> maxscore
--> [(f, s)] --> xx, yy, zz, [(x, y, d)] --> refcode, amine
"""
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
def moltosvg(mol, molSize=(450, 450), kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0], molSize[1])
# drawer.DrawMolecule(mc, legend="lalala") # legend fontsize hardcoded, too small
drawer.DrawMolecule(mc, )
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
# It seems that the svg renderer used doesn't quite hit the spec.
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
return svg.replace('svg:', '')
def plot_amine(smi):
m = Chem.MolFromSmiles(smi)
return moltosvg(m)
def insert_url(svg, n=12, url="https://www.google.com", urlname="ABCDEF"):
lines = svg.split("\n")
template = '<a xmlns="http://www.w3.org/2000/svg" xlink:href="{}" xmlns:xlink="http://www.w3.org/1999/xlink" target="__blank"><text x="150" y="400" font-size="4em" fill="black">{}</text></a>'.format(
url, urlname)
s = ""
for il, l in enumerate(lines):
if il == n:
s += template + "\n"
s += l + "\n"
return s
for epg, epginfo in logit_result.items():
if epginfo is None:
print(epg, "info is None")
continue
for i, refcode in enumerate(epginfo["refcodes"]):
a = epginfo["amines"][i]
svg = plot_amine(a)
url = "https://www.ccdc.cam.ac.uk/structures/Search?Ccdcid={}".format(refcode)
# svg = insert_url(svg, urlname=refcode, url=url)
with open("amines/{}.svg".format(refcode), "w") as f:
f.write(svg)
| 33.390625 | 203 | 0.621432 | [
"MIT"
] | qai222/ATMOxide | Revision/vis3d/prepare.py | 2,137 | Python |
#Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines tensorflow_graphics version information (https://semver.org/)."""
_MAJOR_VERSION = "1"
_MINOR_VERSION = "0"
_PATCH_VERSION = "0"
_VERSION_SUFFIX = ""
__version__ = ".".join([
_MAJOR_VERSION,
_MINOR_VERSION,
_PATCH_VERSION,
])
if _VERSION_SUFFIX:
__version__ = "{}-{}".format(__version__, _VERSION_SUFFIX)
| 31.241379 | 76 | 0.743929 | [
"Apache-2.0"
] | BachiLi/graphics | tensorflow_graphics/version.py | 906 | Python |
"Plugin registration"
from pylint.lint import PyLinter
from .checkers import register_checkers
from .suppression import suppress_warnings
def register(linter: PyLinter) -> None:
"Register the plugin"
register_checkers(linter)
suppress_warnings(linter)
| 22.25 | 42 | 0.790262 | [
"BSD-3-Clause"
] | troyjfarrell/pylint_django_translations | pylint_django_translations/plugin.py | 267 | Python |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from __future__ import division
import copy
from fnmatch import translate
from math import isinf, isnan
from os.path import isfile
from re import compile
import requests
from prometheus_client.samples import Sample
from six import PY3, iteritems, string_types
from ...config import is_affirmative
from ...errors import CheckException
from ...utils.common import to_native_string
from ...utils.http import RequestsWrapper
from .. import AgentCheck
from ..libs.prometheus import text_fd_to_metric_families
if PY3:
long = int
class OpenMetricsScraperMixin(object):
# pylint: disable=E1101
# This class is not supposed to be used by itself, it provides scraping behavior but
# need to be within a check in the end
REQUESTS_CHUNK_SIZE = 1024 * 10 # use 10kb as chunk size when using the Stream feature in requests.get
# indexes in the sample tuple of core.Metric
SAMPLE_NAME = 0
SAMPLE_LABELS = 1
SAMPLE_VALUE = 2
MICROS_IN_S = 1000000
MINUS_INF = float("-inf")
TELEMETRY_GAUGE_MESSAGE_SIZE = "payload.size"
TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT = "metrics.blacklist.count"
TELEMETRY_COUNTER_METRICS_INPUT_COUNT = "metrics.input.count"
TELEMETRY_COUNTER_METRICS_IGNORE_COUNT = "metrics.ignored.count"
TELEMETRY_COUNTER_METRICS_PROCESS_COUNT = "metrics.processed.count"
METRIC_TYPES = ['counter', 'gauge', 'summary', 'histogram']
KUBERNETES_TOKEN_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/token'
def __init__(self, *args, **kwargs):
# Initialize AgentCheck's base class
super(OpenMetricsScraperMixin, self).__init__(*args, **kwargs)
def create_scraper_configuration(self, instance=None):
"""
Creates a scraper configuration.
If instance does not specify a value for a configuration option, the value will default to the `init_config`.
Otherwise, the `default_instance` value will be used.
A default mixin configuration will be returned if there is no instance.
"""
if 'openmetrics_endpoint' in instance:
raise CheckException('The setting `openmetrics_endpoint` is only available for Agent version 7 or later')
# We can choose to create a default mixin configuration for an empty instance
if instance is None:
instance = {}
# Supports new configuration options
config = copy.deepcopy(instance)
# Set the endpoint
endpoint = instance.get('prometheus_url')
if instance and endpoint is None:
raise CheckException("You have to define a prometheus_url for each prometheus instance")
config['prometheus_url'] = endpoint
# `NAMESPACE` is the prefix metrics will have. Need to be hardcoded in the
# child check class.
namespace = instance.get('namespace')
# Check if we have a namespace
if instance and namespace is None:
if self.default_namespace is None:
raise CheckException("You have to define a namespace for each prometheus check")
namespace = self.default_namespace
config['namespace'] = namespace
# Retrieve potential default instance settings for the namespace
default_instance = self.default_instances.get(namespace, {})
# `metrics_mapper` is a dictionary where the keys are the metrics to capture
# and the values are the corresponding metrics names to have in datadog.
# Note: it is empty in the parent class but will need to be
# overloaded/hardcoded in the final check not to be counted as custom metric.
# Metrics are preprocessed if no mapping
metrics_mapper = {}
# We merge list and dictionaries from optional defaults & instance settings
metrics = default_instance.get('metrics', []) + instance.get('metrics', [])
for metric in metrics:
if isinstance(metric, string_types):
metrics_mapper[metric] = metric
else:
metrics_mapper.update(metric)
config['metrics_mapper'] = metrics_mapper
# `_wildcards_re` is a Pattern object used to match metric wildcards
config['_wildcards_re'] = None
wildcards = set()
for metric in config['metrics_mapper']:
if "*" in metric:
wildcards.add(translate(metric))
if wildcards:
config['_wildcards_re'] = compile('|'.join(wildcards))
# `prometheus_metrics_prefix` allows to specify a prefix that all
# prometheus metrics should have. This can be used when the prometheus
# endpoint we are scrapping allows to add a custom prefix to it's
# metrics.
config['prometheus_metrics_prefix'] = instance.get(
'prometheus_metrics_prefix', default_instance.get('prometheus_metrics_prefix', '')
)
# `label_joins` holds the configuration for extracting 1:1 labels from
# a target metric to all metric matching the label, example:
# self.label_joins = {
# 'kube_pod_info': {
# 'labels_to_match': ['pod'],
# 'labels_to_get': ['node', 'host_ip']
# }
# }
config['label_joins'] = default_instance.get('label_joins', {})
config['label_joins'].update(instance.get('label_joins', {}))
# `_label_mapping` holds the additionals label info to add for a specific
# label value, example:
# self._label_mapping = {
# 'pod': {
# 'dd-agent-9s1l1': {
# "node": "yolo",
# "host_ip": "yey"
# }
# }
# }
config['_label_mapping'] = {}
# `_active_label_mapping` holds a dictionary of label values found during the run
# to cleanup the label_mapping of unused values, example:
# self._active_label_mapping = {
# 'pod': {
# 'dd-agent-9s1l1': True
# }
# }
config['_active_label_mapping'] = {}
# `_watched_labels` holds the sets of labels to watch for enrichment
config['_watched_labels'] = {}
config['_dry_run'] = True
# Some metrics are ignored because they are duplicates or introduce a
# very high cardinality. Metrics included in this list will be silently
# skipped without a 'Unable to handle metric' debug line in the logs
config['ignore_metrics'] = instance.get('ignore_metrics', default_instance.get('ignore_metrics', []))
config['_ignored_metrics'] = set()
# `_ignored_re` is a Pattern object used to match ignored metric patterns
config['_ignored_re'] = None
ignored_patterns = set()
# Separate ignored metric names and ignored patterns in different sets for faster lookup later
for metric in config['ignore_metrics']:
if '*' in metric:
ignored_patterns.add(translate(metric))
else:
config['_ignored_metrics'].add(metric)
if ignored_patterns:
config['_ignored_re'] = compile('|'.join(ignored_patterns))
# Ignore metrics based on label keys or specific label values
config['ignore_metrics_by_labels'] = instance.get(
'ignore_metrics_by_labels', default_instance.get('ignore_metrics_by_labels', {})
)
# If you want to send the buckets as tagged values when dealing with histograms,
# set send_histograms_buckets to True, set to False otherwise.
config['send_histograms_buckets'] = is_affirmative(
instance.get('send_histograms_buckets', default_instance.get('send_histograms_buckets', True))
)
# If you want the bucket to be non cumulative and to come with upper/lower bound tags
# set non_cumulative_buckets to True, enabled when distribution metrics are enabled.
config['non_cumulative_buckets'] = is_affirmative(
instance.get('non_cumulative_buckets', default_instance.get('non_cumulative_buckets', False))
)
# Send histograms as datadog distribution metrics
config['send_distribution_buckets'] = is_affirmative(
instance.get('send_distribution_buckets', default_instance.get('send_distribution_buckets', False))
)
# Non cumulative buckets are mandatory for distribution metrics
if config['send_distribution_buckets'] is True:
config['non_cumulative_buckets'] = True
# If you want to send `counter` metrics as monotonic counts, set this value to True.
# Set to False if you want to instead send those metrics as `gauge`.
config['send_monotonic_counter'] = is_affirmative(
instance.get('send_monotonic_counter', default_instance.get('send_monotonic_counter', True))
)
# If you want `counter` metrics to be submitted as both gauges and monotonic counts. Set this value to True.
config['send_monotonic_with_gauge'] = is_affirmative(
instance.get('send_monotonic_with_gauge', default_instance.get('send_monotonic_with_gauge', False))
)
config['send_distribution_counts_as_monotonic'] = is_affirmative(
instance.get(
'send_distribution_counts_as_monotonic',
default_instance.get('send_distribution_counts_as_monotonic', False),
)
)
config['send_distribution_sums_as_monotonic'] = is_affirmative(
instance.get(
'send_distribution_sums_as_monotonic',
default_instance.get('send_distribution_sums_as_monotonic', False),
)
)
# If the `labels_mapper` dictionary is provided, the metrics labels names
# in the `labels_mapper` will use the corresponding value as tag name
# when sending the gauges.
config['labels_mapper'] = default_instance.get('labels_mapper', {})
config['labels_mapper'].update(instance.get('labels_mapper', {}))
# Rename bucket "le" label to "upper_bound"
config['labels_mapper']['le'] = 'upper_bound'
# `exclude_labels` is an array of labels names to exclude. Those labels
# will just not be added as tags when submitting the metric.
config['exclude_labels'] = default_instance.get('exclude_labels', []) + instance.get('exclude_labels', [])
# `type_overrides` is a dictionary where the keys are prometheus metric names
# and the values are a metric type (name as string) to use instead of the one
# listed in the payload. It can be used to force a type on untyped metrics.
# Note: it is empty in the parent class but will need to be
# overloaded/hardcoded in the final check not to be counted as custom metric.
config['type_overrides'] = default_instance.get('type_overrides', {})
config['type_overrides'].update(instance.get('type_overrides', {}))
# `_type_override_patterns` is a dictionary where we store Pattern objects
# that match metric names as keys, and their corresponding metric type overrrides as values.
config['_type_override_patterns'] = {}
with_wildcards = set()
for metric, type in iteritems(config['type_overrides']):
if '*' in metric:
config['_type_override_patterns'][compile(translate(metric))] = type
with_wildcards.add(metric)
# cleanup metric names with wildcards from the 'type_overrides' dict
for metric in with_wildcards:
del config['type_overrides'][metric]
# Some metrics are retrieved from differents hosts and often
# a label can hold this information, this transfers it to the hostname
config['label_to_hostname'] = instance.get('label_to_hostname', default_instance.get('label_to_hostname', None))
# In combination to label_as_hostname, allows to add a common suffix to the hostnames
# submitted. This can be used for instance to discriminate hosts between clusters.
config['label_to_hostname_suffix'] = instance.get(
'label_to_hostname_suffix', default_instance.get('label_to_hostname_suffix', None)
)
# Add a 'health' service check for the prometheus endpoint
config['health_service_check'] = is_affirmative(
instance.get('health_service_check', default_instance.get('health_service_check', True))
)
# Can either be only the path to the certificate and thus you should specify the private key
# or it can be the path to a file containing both the certificate & the private key
config['ssl_cert'] = instance.get('ssl_cert', default_instance.get('ssl_cert', None))
# Needed if the certificate does not include the private key
#
# /!\ The private key to your local certificate must be unencrypted.
# Currently, Requests does not support using encrypted keys.
config['ssl_private_key'] = instance.get('ssl_private_key', default_instance.get('ssl_private_key', None))
# The path to the trusted CA used for generating custom certificates
config['ssl_ca_cert'] = instance.get('ssl_ca_cert', default_instance.get('ssl_ca_cert', None))
# Whether or not to validate SSL certificates
config['ssl_verify'] = is_affirmative(instance.get('ssl_verify', default_instance.get('ssl_verify', True)))
# Extra http headers to be sent when polling endpoint
config['extra_headers'] = default_instance.get('extra_headers', {})
config['extra_headers'].update(instance.get('extra_headers', {}))
# Timeout used during the network request
config['prometheus_timeout'] = instance.get(
'prometheus_timeout', default_instance.get('prometheus_timeout', 10)
)
# Authentication used when polling endpoint
config['username'] = instance.get('username', default_instance.get('username', None))
config['password'] = instance.get('password', default_instance.get('password', None))
# Custom tags that will be sent with each metric
config['custom_tags'] = instance.get('tags', [])
# Additional tags to be sent with each metric
config['_metric_tags'] = []
# List of strings to filter the input text payload on. If any line contains
# one of these strings, it will be filtered out before being parsed.
# INTERNAL FEATURE, might be removed in future versions
config['_text_filter_blacklist'] = []
# Whether or not to use the service account bearer token for authentication
# if 'bearer_token_path' is not set, we use /var/run/secrets/kubernetes.io/serviceaccount/token
# as a default path to get the token.
config['bearer_token_auth'] = is_affirmative(
instance.get('bearer_token_auth', default_instance.get('bearer_token_auth', False))
)
# Can be used to get a service account bearer token from files
# other than /var/run/secrets/kubernetes.io/serviceaccount/token
# 'bearer_token_auth' should be enabled.
config['bearer_token_path'] = instance.get('bearer_token_path', default_instance.get('bearer_token_path', None))
# The service account bearer token to be used for authentication
config['_bearer_token'] = self._get_bearer_token(config['bearer_token_auth'], config['bearer_token_path'])
config['telemetry'] = is_affirmative(instance.get('telemetry', default_instance.get('telemetry', False)))
# The metric name services use to indicate build information
config['metadata_metric_name'] = instance.get(
'metadata_metric_name', default_instance.get('metadata_metric_name')
)
# Map of metadata key names to label names
config['metadata_label_map'] = instance.get(
'metadata_label_map', default_instance.get('metadata_label_map', {})
)
config['_default_metric_transformers'] = {}
if config['metadata_metric_name'] and config['metadata_label_map']:
config['_default_metric_transformers'][config['metadata_metric_name']] = self.transform_metadata
# Whether or not to enable flushing of the first value of monotonic counts
config['_successfully_executed'] = False
return config
def get_http_handler(self, scraper_config):
"""
Get http handler for a specific scraper config.
The http handler is cached using `prometheus_url` as key.
"""
prometheus_url = scraper_config['prometheus_url']
if prometheus_url in self._http_handlers:
return self._http_handlers[prometheus_url]
# TODO: Deprecate this behavior in Agent 8
if scraper_config['ssl_ca_cert'] is False:
scraper_config['ssl_verify'] = False
# TODO: Deprecate this behavior in Agent 8
if scraper_config['ssl_verify'] is False:
scraper_config.setdefault('tls_ignore_warning', True)
http_handler = self._http_handlers[prometheus_url] = RequestsWrapper(
scraper_config, self.init_config, self.HTTP_CONFIG_REMAPPER, self.log
)
headers = http_handler.options['headers']
bearer_token = scraper_config['_bearer_token']
if bearer_token is not None:
headers['Authorization'] = 'Bearer {}'.format(bearer_token)
# TODO: Determine if we really need this
headers.setdefault('accept-encoding', 'gzip')
# Explicitly set the content type we accept
headers.setdefault('accept', 'text/plain')
return http_handler
def reset_http_config(self):
"""
You may need to use this when configuration is determined dynamically during every
check run, such as when polling an external resource like the Kubelet.
"""
self._http_handlers.clear()
def parse_metric_family(self, response, scraper_config):
"""
Parse the MetricFamily from a valid `requests.Response` object to provide a MetricFamily object.
The text format uses iter_lines() generator.
"""
if response.encoding is None:
response.encoding = 'utf-8'
input_gen = response.iter_lines(chunk_size=self.REQUESTS_CHUNK_SIZE, decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_INPUT_COUNT, len(metric.samples), scraper_config
)
type_override = scraper_config['type_overrides'].get(metric.name)
if type_override:
metric.type = type_override
elif scraper_config['_type_override_patterns']:
for pattern, new_type in iteritems(scraper_config['_type_override_patterns']):
if pattern.search(metric.name):
metric.type = new_type
break
if metric.type not in self.METRIC_TYPES:
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
yield metric
def _text_filter_input(self, input_gen, scraper_config):
"""
Filters out the text input line by line to avoid parsing and processing
metrics we know we don't want to process. This only works on `text/plain`
payloads, and is an INTERNAL FEATURE implemented for the kubelet check
:param input_get: line generator
:output: generator of filtered lines
"""
for line in input_gen:
for item in scraper_config['_text_filter_blacklist']:
if item in line:
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_BLACKLIST_COUNT, 1, scraper_config)
break
else:
# No blacklist matches, passing the line through
yield line
def _remove_metric_prefix(self, metric, scraper_config):
prometheus_metrics_prefix = scraper_config['prometheus_metrics_prefix']
return metric[len(prometheus_metrics_prefix) :] if metric.startswith(prometheus_metrics_prefix) else metric
def scrape_metrics(self, scraper_config):
"""
Poll the data from Prometheus and return the metrics as a generator.
"""
response = self.poll(scraper_config)
if scraper_config['telemetry']:
if 'content-length' in response.headers:
content_len = int(response.headers['content-length'])
else:
content_len = len(response.content)
self._send_telemetry_gauge(self.TELEMETRY_GAUGE_MESSAGE_SIZE, content_len, scraper_config)
try:
# no dry run if no label joins
if not scraper_config['label_joins']:
scraper_config['_dry_run'] = False
elif not scraper_config['_watched_labels']:
watched = scraper_config['_watched_labels']
watched['sets'] = {}
watched['keys'] = {}
watched['singles'] = set()
for key, val in iteritems(scraper_config['label_joins']):
labels = []
if 'labels_to_match' in val:
labels = val['labels_to_match']
elif 'label_to_match' in val:
self.log.warning("`label_to_match` is being deprecated, please use `labels_to_match`")
if isinstance(val['label_to_match'], list):
labels = val['label_to_match']
else:
labels = [val['label_to_match']]
if labels:
s = frozenset(labels)
watched['sets'][key] = s
watched['keys'][key] = ','.join(s)
if len(labels) == 1:
watched['singles'].add(labels[0])
for metric in self.parse_metric_family(response, scraper_config):
yield metric
# Set dry run off
scraper_config['_dry_run'] = False
# Garbage collect unused mapping and reset active labels
for metric, mapping in list(iteritems(scraper_config['_label_mapping'])):
for key in list(mapping):
if (
metric in scraper_config['_active_label_mapping']
and key not in scraper_config['_active_label_mapping'][metric]
):
del scraper_config['_label_mapping'][metric][key]
scraper_config['_active_label_mapping'] = {}
finally:
response.close()
def process(self, scraper_config, metric_transformers=None):
"""
Polls the data from Prometheus and submits them as Datadog metrics.
`endpoint` is the metrics endpoint to use to poll metrics from Prometheus
Note that if the instance has a `tags` attribute, it will be pushed
automatically as additional custom tags and added to the metrics
"""
transformers = scraper_config['_default_metric_transformers'].copy()
if metric_transformers:
transformers.update(metric_transformers)
for metric in self.scrape_metrics(scraper_config):
self.process_metric(metric, scraper_config, metric_transformers=transformers)
scraper_config['_successfully_executed'] = True
def transform_metadata(self, metric, scraper_config):
labels = metric.samples[0][self.SAMPLE_LABELS]
for metadata_name, label_name in iteritems(scraper_config['metadata_label_map']):
if label_name in labels:
self.set_metadata(metadata_name, labels[label_name])
def _metric_name_with_namespace(self, metric_name, scraper_config):
namespace = scraper_config['namespace']
if not namespace:
return metric_name
return '{}.{}'.format(namespace, metric_name)
def _telemetry_metric_name_with_namespace(self, metric_name, scraper_config):
namespace = scraper_config['namespace']
if not namespace:
return '{}.{}'.format('telemetry', metric_name)
return '{}.{}.{}'.format(namespace, 'telemetry', metric_name)
def _send_telemetry_gauge(self, metric_name, val, scraper_config):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
# Determine the tags to send
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
self.gauge(metric_name_with_namespace, val, tags=tags)
def _send_telemetry_counter(self, metric_name, val, scraper_config, extra_tags=None):
if scraper_config['telemetry']:
metric_name_with_namespace = self._telemetry_metric_name_with_namespace(metric_name, scraper_config)
# Determine the tags to send
custom_tags = scraper_config['custom_tags']
tags = list(custom_tags)
tags.extend(scraper_config['_metric_tags'])
if extra_tags:
tags.extend(extra_tags)
self.count(metric_name_with_namespace, val, tags=tags)
def _store_labels(self, metric, scraper_config):
# If targeted metric, store labels
if metric.name not in scraper_config['label_joins']:
return
watched = scraper_config['_watched_labels']
matching_labels = watched['sets'][metric.name]
mapping_key = watched['keys'][metric.name]
labels_to_get = scraper_config['label_joins'][metric.name]['labels_to_get']
get_all = '*' in labels_to_get
match_all = mapping_key == '*'
for sample in metric.samples:
# metadata-only metrics that are used for label joins are always equal to 1
# this is required for metrics where all combinations of a state are sent
# but only the active one is set to 1 (others are set to 0)
# example: kube_pod_status_phase in kube-state-metrics
if sample[self.SAMPLE_VALUE] != 1:
continue
sample_labels = sample[self.SAMPLE_LABELS]
sample_labels_keys = sample_labels.keys()
if match_all or matching_labels.issubset(sample_labels_keys):
label_dict = dict()
if get_all:
for label_name, label_value in iteritems(sample_labels):
if label_name in matching_labels:
continue
label_dict[label_name] = label_value
else:
for label_name in labels_to_get:
if label_name in sample_labels:
label_dict[label_name] = sample_labels[label_name]
if match_all:
mapping_value = '*'
else:
mapping_value = ','.join([sample_labels[l] for l in matching_labels])
scraper_config['_label_mapping'].setdefault(mapping_key, {}).setdefault(mapping_value, {}).update(
label_dict
)
def _join_labels(self, metric, scraper_config):
# Filter metric to see if we can enrich with joined labels
if not scraper_config['label_joins']:
return
label_mapping = scraper_config['_label_mapping']
active_label_mapping = scraper_config['_active_label_mapping']
watched = scraper_config['_watched_labels']
sets = watched['sets']
keys = watched['keys']
singles = watched['singles']
for sample in metric.samples:
sample_labels = sample[self.SAMPLE_LABELS]
sample_labels_keys = sample_labels.keys()
# Match with wildcard label
# Label names are [a-zA-Z0-9_]*, so no risk of collision
if '*' in singles:
active_label_mapping.setdefault('*', {})['*'] = True
if '*' in label_mapping and '*' in label_mapping['*']:
sample_labels.update(label_mapping['*']['*'])
# Match with single labels
matching_single_labels = singles.intersection(sample_labels_keys)
for label in matching_single_labels:
mapping_key = label
mapping_value = sample_labels[label]
active_label_mapping.setdefault(mapping_key, {})[mapping_value] = True
if mapping_key in label_mapping and mapping_value in label_mapping[mapping_key]:
sample_labels.update(label_mapping[mapping_key][mapping_value])
# Match with tuples of labels
for key, mapping_key in iteritems(keys):
if mapping_key in matching_single_labels:
continue
matching_labels = sets[key]
if matching_labels.issubset(sample_labels_keys):
matching_values = [sample_labels[l] for l in matching_labels]
mapping_value = ','.join(matching_values)
active_label_mapping.setdefault(mapping_key, {})[mapping_value] = True
if mapping_key in label_mapping and mapping_value in label_mapping[mapping_key]:
sample_labels.update(label_mapping[mapping_key][mapping_value])
def _ignore_metrics_by_label(self, scraper_config, metric_name, sample):
ignore_metrics_by_label = scraper_config['ignore_metrics_by_labels']
sample_labels = sample[self.SAMPLE_LABELS]
for label_key, label_values in ignore_metrics_by_label.items():
if not label_values:
self.log.debug(
"Skipping filter label `%s` with an empty values list, did you mean to use '*' wildcard?", label_key
)
elif '*' in label_values:
# Wildcard '*' means all metrics with label_key will be ignored
self.log.debug("Detected wildcard for label `%s`", label_key)
if label_key in sample_labels.keys():
self.log.debug("Skipping metric `%s` due to label key matching: %s", metric_name, label_key)
return True
else:
for val in label_values:
if label_key in sample_labels and sample_labels[label_key] == val:
self.log.debug(
"Skipping metric `%s` due to label `%s` value matching: %s", metric_name, label_key, val
)
return True
return False
def process_metric(self, metric, scraper_config, metric_transformers=None):
"""
Handle a Prometheus metric according to the following flow:
- search `scraper_config['metrics_mapper']` for a prometheus.metric to datadog.metric mapping
- call check method with the same name as the metric
- log info if none of the above worked
`metric_transformers` is a dict of `<metric name>:<function to run when the metric name is encountered>`
"""
# If targeted metric, store labels
self._store_labels(metric, scraper_config)
if scraper_config['ignore_metrics']:
if metric.name in scraper_config['_ignored_metrics']:
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config
)
return # Ignore the metric
if scraper_config['_ignored_re'] and scraper_config['_ignored_re'].search(metric.name):
# Metric must be ignored
scraper_config['_ignored_metrics'].add(metric.name)
self._send_telemetry_counter(
self.TELEMETRY_COUNTER_METRICS_IGNORE_COUNT, len(metric.samples), scraper_config
)
return # Ignore the metric
self._send_telemetry_counter(self.TELEMETRY_COUNTER_METRICS_PROCESS_COUNT, len(metric.samples), scraper_config)
if self._filter_metric(metric, scraper_config):
return # Ignore the metric
# Filter metric to see if we can enrich with joined labels
self._join_labels(metric, scraper_config)
if scraper_config['_dry_run']:
return
try:
self.submit_openmetric(scraper_config['metrics_mapper'][metric.name], metric, scraper_config)
except KeyError:
if metric_transformers is not None and metric.name in metric_transformers:
try:
# Get the transformer function for this specific metric
transformer = metric_transformers[metric.name]
transformer(metric, scraper_config)
except Exception as err:
self.log.warning('Error handling metric: %s - error: %s', metric.name, err)
return
# check for wilcards in transformers
for transformer_name, transformer in iteritems(metric_transformers):
if transformer_name.endswith('*') and metric.name.startswith(transformer_name[:-1]):
transformer(metric, scraper_config, transformer_name)
# try matching wildcards
if scraper_config['_wildcards_re'] and scraper_config['_wildcards_re'].search(metric.name):
self.submit_openmetric(metric.name, metric, scraper_config)
return
self.log.debug(
'Skipping metric `%s` as it is not defined in the metrics mapper, '
'has no transformer function, nor does it match any wildcards.',
metric.name,
)
def poll(self, scraper_config, headers=None):
"""
Returns a valid `requests.Response`, otherwise raise requests.HTTPError if the status code of the
response isn't valid - see `response.raise_for_status()`
The caller needs to close the requests.Response.
Custom headers can be added to the default headers.
"""
endpoint = scraper_config.get('prometheus_url')
# Should we send a service check for when we make a request
health_service_check = scraper_config['health_service_check']
service_check_name = self._metric_name_with_namespace('prometheus.health', scraper_config)
service_check_tags = ['endpoint:{}'.format(endpoint)]
service_check_tags.extend(scraper_config['custom_tags'])
try:
response = self.send_request(endpoint, scraper_config, headers)
except requests.exceptions.SSLError:
self.log.error("Invalid SSL settings for requesting %s endpoint", endpoint)
raise
except IOError:
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
try:
response.raise_for_status()
if health_service_check:
self.service_check(service_check_name, AgentCheck.OK, tags=service_check_tags)
return response
except requests.HTTPError:
response.close()
if health_service_check:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=service_check_tags)
raise
def send_request(self, endpoint, scraper_config, headers=None):
kwargs = {}
if headers:
kwargs['headers'] = headers
http_handler = self.get_http_handler(scraper_config)
return http_handler.get(endpoint, stream=True, **kwargs)
def get_hostname_for_sample(self, sample, scraper_config):
"""
Expose the label_to_hostname mapping logic to custom handler methods
"""
return self._get_hostname(None, sample, scraper_config)
def submit_openmetric(self, metric_name, metric, scraper_config, hostname=None):
"""
For each sample in the metric, report it as a gauge with all labels as tags
except if a labels `dict` is passed, in which case keys are label names we'll extract
and corresponding values are tag names we'll use (eg: {'node': 'node'}).
Histograms generate a set of values instead of a unique metric.
`send_histograms_buckets` is used to specify if you want to
send the buckets as tagged values when dealing with histograms.
`custom_tags` is an array of `tag:value` that will be added to the
metric when sending the gauge to Datadog.
"""
if metric.type in ["gauge", "counter", "rate"]:
metric_name_with_namespace = self._metric_name_with_namespace(metric_name, scraper_config)
for sample in metric.samples:
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
# Determine the tags to send
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
if metric.type == "counter" and scraper_config['send_monotonic_counter']:
self.monotonic_count(
metric_name_with_namespace,
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif metric.type == "rate":
self.rate(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
else:
self.gauge(metric_name_with_namespace, val, tags=tags, hostname=custom_hostname)
# Metric is a "counter" but legacy behavior has "send_as_monotonic" defaulted to False
# Submit metric as monotonic_count with appended name
if metric.type == "counter" and scraper_config['send_monotonic_with_gauge']:
self.monotonic_count(
metric_name_with_namespace + '.total',
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif metric.type == "histogram":
self._submit_gauges_from_histogram(metric_name, metric, scraper_config)
elif metric.type == "summary":
self._submit_gauges_from_summary(metric_name, metric, scraper_config)
else:
self.log.error("Metric type %s unsupported for metric %s.", metric.type, metric_name)
def _get_hostname(self, hostname, sample, scraper_config):
"""
If hostname is None, look at label_to_hostname setting
"""
if (
hostname is None
and scraper_config['label_to_hostname'] is not None
and sample[self.SAMPLE_LABELS].get(scraper_config['label_to_hostname'])
):
hostname = sample[self.SAMPLE_LABELS][scraper_config['label_to_hostname']]
suffix = scraper_config['label_to_hostname_suffix']
if suffix is not None:
hostname += suffix
return hostname
def _submit_gauges_from_summary(self, metric_name, metric, scraper_config, hostname=None):
"""
Extracts metrics from a prometheus summary metric and sends them as gauges
"""
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(
scraper_config['send_distribution_sums_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.sum".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif sample[self.SAMPLE_NAME].endswith("_count"):
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
else:
try:
quantile = sample[self.SAMPLE_LABELS]["quantile"]
except KeyError:
# TODO: In the Prometheus spec the 'quantile' label is optional, but it's not clear yet
# what we should do in this case. Let's skip for now and submit the rest of metrics.
message = (
'"quantile" label not present in metric %r. '
'Quantile-less summary metrics are not currently supported. Skipping...'
)
self.log.debug(message, metric_name)
continue
sample[self.SAMPLE_LABELS]["quantile"] = str(float(quantile))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname=custom_hostname)
self.gauge(
"{}.quantile".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
)
def _submit_gauges_from_histogram(self, metric_name, metric, scraper_config, hostname=None):
"""
Extracts metrics from a prometheus histogram and sends them as gauges
"""
if scraper_config['non_cumulative_buckets']:
self._decumulate_histogram_buckets(metric)
for sample in metric.samples:
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if self._ignore_metrics_by_label(scraper_config, metric_name, sample):
continue
custom_hostname = self._get_hostname(hostname, sample, scraper_config)
if sample[self.SAMPLE_NAME].endswith("_sum") and not scraper_config['send_distribution_buckets']:
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(
scraper_config['send_distribution_sums_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.sum".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif sample[self.SAMPLE_NAME].endswith("_count") and not scraper_config['send_distribution_buckets']:
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
if scraper_config['send_histograms_buckets']:
tags.append("upper_bound:none")
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
elif scraper_config['send_histograms_buckets'] and sample[self.SAMPLE_NAME].endswith("_bucket"):
if scraper_config['send_distribution_buckets']:
self._submit_sample_histogram_buckets(metric_name, sample, scraper_config, hostname)
elif "Inf" not in sample[self.SAMPLE_LABELS]["le"] or scraper_config['non_cumulative_buckets']:
sample[self.SAMPLE_LABELS]["le"] = str(float(sample[self.SAMPLE_LABELS]["le"]))
tags = self._metric_tags(metric_name, val, sample, scraper_config, hostname)
self._submit_distribution_count(
scraper_config['send_distribution_counts_as_monotonic'],
scraper_config['send_monotonic_with_gauge'],
"{}.count".format(self._metric_name_with_namespace(metric_name, scraper_config)),
val,
tags=tags,
hostname=custom_hostname,
flush_first_value=scraper_config['_successfully_executed'],
)
def _compute_bucket_hash(self, tags):
# we need the unique context for all the buckets
# hence we remove the "le" tag
return hash(frozenset(sorted((k, v) for k, v in iteritems(tags) if k != 'le')))
def _decumulate_histogram_buckets(self, metric):
"""
Decumulate buckets in a given histogram metric and adds the lower_bound label (le being upper_bound)
"""
bucket_values_by_context_upper_bound = {}
for sample in metric.samples:
if sample[self.SAMPLE_NAME].endswith("_bucket"):
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
if context_key not in bucket_values_by_context_upper_bound:
bucket_values_by_context_upper_bound[context_key] = {}
bucket_values_by_context_upper_bound[context_key][float(sample[self.SAMPLE_LABELS]["le"])] = sample[
self.SAMPLE_VALUE
]
sorted_buckets_by_context = {}
for context in bucket_values_by_context_upper_bound:
sorted_buckets_by_context[context] = sorted(bucket_values_by_context_upper_bound[context])
# Tuples (lower_bound, upper_bound, value)
bucket_tuples_by_context_upper_bound = {}
for context in sorted_buckets_by_context:
for i, upper_b in enumerate(sorted_buckets_by_context[context]):
if i == 0:
if context not in bucket_tuples_by_context_upper_bound:
bucket_tuples_by_context_upper_bound[context] = {}
if upper_b > 0:
# positive buckets start at zero
bucket_tuples_by_context_upper_bound[context][upper_b] = (
0,
upper_b,
bucket_values_by_context_upper_bound[context][upper_b],
)
else:
# negative buckets start at -inf
bucket_tuples_by_context_upper_bound[context][upper_b] = (
self.MINUS_INF,
upper_b,
bucket_values_by_context_upper_bound[context][upper_b],
)
continue
tmp = (
bucket_values_by_context_upper_bound[context][upper_b]
- bucket_values_by_context_upper_bound[context][sorted_buckets_by_context[context][i - 1]]
)
bucket_tuples_by_context_upper_bound[context][upper_b] = (
sorted_buckets_by_context[context][i - 1],
upper_b,
tmp,
)
# modify original metric to inject lower_bound & modified value
for i, sample in enumerate(metric.samples):
if not sample[self.SAMPLE_NAME].endswith("_bucket"):
continue
context_key = self._compute_bucket_hash(sample[self.SAMPLE_LABELS])
matching_bucket_tuple = bucket_tuples_by_context_upper_bound[context_key][
float(sample[self.SAMPLE_LABELS]["le"])
]
# Replacing the sample tuple
sample[self.SAMPLE_LABELS]["lower_bound"] = str(matching_bucket_tuple[0])
metric.samples[i] = Sample(sample[self.SAMPLE_NAME], sample[self.SAMPLE_LABELS], matching_bucket_tuple[2])
def _submit_sample_histogram_buckets(self, metric_name, sample, scraper_config, hostname=None):
if "lower_bound" not in sample[self.SAMPLE_LABELS] or "le" not in sample[self.SAMPLE_LABELS]:
self.log.warning(
"Metric: %s was not containing required bucket boundaries labels: %s",
metric_name,
sample[self.SAMPLE_LABELS],
)
return
sample[self.SAMPLE_LABELS]["le"] = str(float(sample[self.SAMPLE_LABELS]["le"]))
sample[self.SAMPLE_LABELS]["lower_bound"] = str(float(sample[self.SAMPLE_LABELS]["lower_bound"]))
if sample[self.SAMPLE_LABELS]["le"] == sample[self.SAMPLE_LABELS]["lower_bound"]:
# this can happen for -inf/-inf bucket that we don't want to send (always 0)
self.log.warning(
"Metric: %s has bucket boundaries equal, skipping: %s", metric_name, sample[self.SAMPLE_LABELS]
)
return
tags = self._metric_tags(metric_name, sample[self.SAMPLE_VALUE], sample, scraper_config, hostname)
self.submit_histogram_bucket(
self._metric_name_with_namespace(metric_name, scraper_config),
sample[self.SAMPLE_VALUE],
float(sample[self.SAMPLE_LABELS]["lower_bound"]),
float(sample[self.SAMPLE_LABELS]["le"]),
True,
hostname,
tags,
)
def _submit_distribution_count(
self,
monotonic,
send_monotonic_with_gauge,
metric_name,
value,
tags=None,
hostname=None,
flush_first_value=False,
):
if monotonic:
self.monotonic_count(metric_name, value, tags=tags, hostname=hostname, flush_first_value=flush_first_value)
else:
self.gauge(metric_name, value, tags=tags, hostname=hostname)
if send_monotonic_with_gauge:
self.monotonic_count(
metric_name + ".total", value, tags=tags, hostname=hostname, flush_first_value=flush_first_value
)
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
custom_tags = scraper_config['custom_tags']
_tags = list(custom_tags)
_tags.extend(scraper_config['_metric_tags'])
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config['exclude_labels']:
tag_name = scraper_config['labels_mapper'].get(label_name, label_name)
_tags.append('{}:{}'.format(to_native_string(tag_name), to_native_string(label_value)))
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
def _is_value_valid(self, val):
return not (isnan(val) or isinf(val))
def _get_bearer_token(self, bearer_token_auth, bearer_token_path):
if bearer_token_auth is False:
return None
path = None
if bearer_token_path is not None:
if isfile(bearer_token_path):
path = bearer_token_path
else:
self.log.error("File not found: %s", bearer_token_path)
elif isfile(self.KUBERNETES_TOKEN_PATH):
path = self.KUBERNETES_TOKEN_PATH
if path is None:
self.log.error("Cannot get bearer token from bearer_token_path or auto discovery")
raise IOError("Cannot get bearer token from bearer_token_path or auto discovery")
try:
with open(path, 'r') as f:
return f.read().rstrip()
except Exception as err:
self.log.error("Cannot get bearer token from path: %s - error: %s", path, err)
raise
def _histogram_convert_values(self, metric_name, converter):
def _convert(metric, scraper_config=None):
for index, sample in enumerate(metric.samples):
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if sample[self.SAMPLE_NAME].endswith("_sum"):
lst = list(sample)
lst[self.SAMPLE_VALUE] = converter(val)
metric.samples[index] = tuple(lst)
elif sample[self.SAMPLE_NAME].endswith("_bucket") and "Inf" not in sample[self.SAMPLE_LABELS]["le"]:
sample[self.SAMPLE_LABELS]["le"] = str(converter(float(sample[self.SAMPLE_LABELS]["le"])))
self.submit_openmetric(metric_name, metric, scraper_config)
return _convert
def _histogram_from_microseconds_to_seconds(self, metric_name):
return self._histogram_convert_values(metric_name, lambda v: v / self.MICROS_IN_S)
def _histogram_from_seconds_to_microseconds(self, metric_name):
return self._histogram_convert_values(metric_name, lambda v: v * self.MICROS_IN_S)
def _summary_convert_values(self, metric_name, converter):
def _convert(metric, scraper_config=None):
for index, sample in enumerate(metric.samples):
val = sample[self.SAMPLE_VALUE]
if not self._is_value_valid(val):
self.log.debug("Metric value is not supported for metric %s", sample[self.SAMPLE_NAME])
continue
if sample[self.SAMPLE_NAME].endswith("_count"):
continue
else:
lst = list(sample)
lst[self.SAMPLE_VALUE] = converter(val)
metric.samples[index] = tuple(lst)
self.submit_openmetric(metric_name, metric, scraper_config)
return _convert
def _summary_from_microseconds_to_seconds(self, metric_name):
return self._summary_convert_values(metric_name, lambda v: v / self.MICROS_IN_S)
def _summary_from_seconds_to_microseconds(self, metric_name):
return self._summary_convert_values(metric_name, lambda v: v * self.MICROS_IN_S)
| 47.554054 | 120 | 0.627327 | [
"BSD-3-Clause"
] | DingGGu/integrations-core | datadog_checks_base/datadog_checks/base/checks/openmetrics/mixins.py | 56,304 | Python |
import seq_db
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('mysql+pymysql://ppliu:[email protected]/test')
seq_db.Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
"""
try:
s = seq_db.CLIPSeq()
s.sample_name = 'test_sample'
s.expr_name = 'test_experiment'
s.file_location = 'some/where/on/the/server'
s.species = 'hg19'
s.collab = 'dr. sequencing'
s.collab_institute = 'ucsd'
session.add(s)
session.commit()
except Exception as e:
print e
session.rollback()
session.commit()
"""
try:
for expr in session.query(seq_db.SeqExpr).all():
print expr.sample_name,
print expr.project_name,
print expr.check_file()
except Exception as e:
print e
| 20.825 | 78 | 0.684274 | [
"MIT"
] | YeoLab/gscripts | gscripts/expr_db/test_seqdb.py | 833 | Python |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The utility class for simulator."""
import json
import logging
import os
import pwd
import re
import shutil
import subprocess
import time
from shared import ios_constants
from shared import ios_errors
from shared import plist_util
from shared import xcode_info_util
from simulator_control import simtype_profile
_SIMULATOR_STATES_MAPPING = {
0: ios_constants.SimState.CREATING,
1: ios_constants.SimState.SHUTDOWN,
3: ios_constants.SimState.BOOTED
}
_PREFIX_RUNTIME_ID = 'com.apple.CoreSimulator.SimRuntime.'
_SIM_OPERATION_MAX_ATTEMPTS = 3
_SIMCTL_MAX_ATTEMPTS = 2
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC = 10
_SIMULATOR_SHUTDOWN_TIMEOUT_SEC = 30
_SIM_ERROR_RETRY_INTERVAL_SEC = 2
_SIM_CHECK_STATE_INTERVAL_SEC = 0.5
_PATTERN_APP_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(UIKitApplication:%s(.+)\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\((.+)xctest\[[0-9]+\]\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_CORESIMULATOR_CRASH = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(com\.apple\.CoreSimulator(.+)\): Service exited due to ')
class Simulator(object):
"""The object for simulator in MacOS."""
def __init__(self, simulator_id):
"""Constructor of Simulator object.
Args:
simulator_id: string, the identity of the simulator.
"""
self._simulator_id = simulator_id
self._simulator_root_dir = None
self._simulator_log_root_dir = None
self._device_plist_object = None
@property
def simulator_id(self):
if not self._simulator_id:
raise ios_errors.SimError(
'The simulator has not been created or has been deleted.')
return self._simulator_id
@property
def simulator_system_log_path(self):
return os.path.join(self.simulator_log_root_dir, 'system.log')
@property
def simulator_root_dir(self):
"""Gets the simulator's root directory."""
if not self._simulator_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_root_dir = os.path.join(
'%s/Library/Developer/CoreSimulator/Devices/%s' %
(home_dir, self.simulator_id))
return self._simulator_root_dir
@property
def simulator_log_root_dir(self):
"""Gets the root directory of the simulator's logs."""
if not self._simulator_log_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_log_root_dir = os.path.join(
'%s/Library/Logs/CoreSimulator/%s' % (home_dir, self.simulator_id))
return self._simulator_log_root_dir
@property
def device_plist_object(self):
"""Gets the plist_util.Plist object of device.plist of the simulator.
Returns:
a plist_util.Plist object of device.plist of the simulator or None when
the simulator does not exist or is being created.
"""
if not self._device_plist_object:
device_plist_path = os.path.join(self.simulator_root_dir, 'device.plist')
if not os.path.exists(device_plist_path):
return None
self._device_plist_object = plist_util.Plist(device_plist_path)
return self._device_plist_object
def Shutdown(self):
"""Shuts down the simulator."""
sim_state = self.GetSimulatorState()
if sim_state == ios_constants.SimState.SHUTDOWN:
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
if sim_state == ios_constants.SimState.CREATING:
raise ios_errors.SimError(
'Can not shut down the simulator in state CREATING.')
logging.info('Shutting down simulator %s.', self.simulator_id)
try:
RunSimctlCommand(['xcrun', 'simctl', 'shutdown', self.simulator_id])
except ios_errors.SimError as e:
if 'Unable to shutdown device in current state: Shutdown' in str(e):
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
raise ios_errors.SimError('Failed to shutdown simulator %s: %s' %
(self.simulator_id, str(e)))
self.WaitUntilStateShutdown()
logging.info('Shut down simulator %s.', self.simulator_id)
def Delete(self):
"""Deletes the simulator asynchronously.
The simulator state should be SHUTDOWN when deleting it. Otherwise, it will
raise exception.
Raises:
ios_errors.SimError: The simulator's state is not SHUTDOWN.
"""
# In Xcode 9+, simctl can delete Booted simulator. In prior of Xcode 9,
# we have to shutdown the simulator first before deleting it.
if xcode_info_util.GetXcodeVersionNumber() < 900:
sim_state = self.GetSimulatorState()
if sim_state != ios_constants.SimState.SHUTDOWN:
raise ios_errors.SimError(
'Can only delete the simulator with state SHUTDOWN. The current '
'state of simulator %s is %s.' % (self._simulator_id, sim_state))
logging.info('Deleting simulator %s asynchronously.', self.simulator_id)
subprocess.Popen(['xcrun', 'simctl', 'delete', self.simulator_id],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setpgrp)
# The delete command won't delete the simulator log directory.
if os.path.exists(self.simulator_log_root_dir):
shutil.rmtree(self.simulator_log_root_dir, ignore_errors=True)
self._simulator_id = None
def FetchLogToFile(self, output_file_path, start_time=None, end_time=None):
"""Gets simulator log via running `log` tool on simulator.
Args:
output_file_path: string, the path of the stdout file.
start_time: datetime, the start time of the simulatro log.
end_time: datetime, the end time of the simulatro log.
"""
command = [
'xcrun', 'simctl', 'spawn', self._simulator_id, 'log', 'show',
'--style', 'syslog'
]
if start_time:
command.extend(('--start', start_time.strftime('%Y-%m-%d %H:%M:%S')))
if end_time:
command.extend(('--end', end_time.strftime('%Y-%m-%d %H:%M:%S')))
with open(output_file_path, 'w') as stdout_file:
try:
subprocess.Popen(command, stdout=stdout_file, stderr=subprocess.STDOUT)
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to get log on simulator %s: %s' %
(self.simulator_id, str(e)))
def GetAppDocumentsPath(self, app_bundle_id):
"""Gets the path of the app's Documents directory."""
if xcode_info_util.GetXcodeVersionNumber() >= 830:
try:
app_data_container = RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id, 'data'
])
return os.path.join(app_data_container, 'Documents')
except ios_errors.SimError as e:
raise ios_errors.SimError(
'Failed to get data container of the app %s in simulator %s: %s' %
(app_bundle_id, self._simulator_id, str(e)))
apps_dir = os.path.join(self.simulator_root_dir,
'data/Containers/Data/Application')
for sub_dir_name in os.listdir(apps_dir):
container_manager_plist = plist_util.Plist(
os.path.join(apps_dir, sub_dir_name,
'.com.apple.mobile_container_manager.metadata.plist'))
current_app_bundle_id = container_manager_plist.GetPlistField(
'MCMMetadataIdentifier')
if current_app_bundle_id == app_bundle_id:
return os.path.join(apps_dir, sub_dir_name, 'Documents')
raise ios_errors.SimError(
'Failed to get Documents directory of the app %s in simulator %s' %
(app_bundle_id, self._simulator_id))
def IsAppInstalled(self, app_bundle_id):
"""Checks if the simulator has installed the app with given bundle id."""
try:
RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id
])
return True
except ios_errors.SimError:
return False
def WaitUntilStateShutdown(self, timeout_sec=_SIMULATOR_SHUTDOWN_TIMEOUT_SEC):
"""Waits until the simulator state becomes SHUTDOWN.
Args:
timeout_sec: int, timeout of waiting simulator state for becoming SHUTDOWN
in seconds.
Raises:
ios_errors.SimError: when it is timeout to wait the simulator state
becomes SHUTDOWN.
"""
start_time = time.time()
while start_time + timeout_sec >= time.time():
if self.GetSimulatorState() == ios_constants.SimState.SHUTDOWN:
return
time.sleep(_SIM_CHECK_STATE_INTERVAL_SEC)
raise ios_errors.SimError('Timeout to wait for simulator shutdown in %ss.' %
timeout_sec)
def GetSimulatorState(self):
"""Gets the state of the simulator in real time.
Returns:
shared.ios_constants.SimState, the state of the simulator.
Raises:
ios_errors.SimError: The state can not be recognized.
"""
if self.device_plist_object is None:
return ios_constants.SimState.CREATING
state_num = self.device_plist_object.GetPlistField('state')
if state_num not in _SIMULATOR_STATES_MAPPING.keys():
logging.warning('The state %s of simulator %s can not be recognized.',
state_num, self.simulator_id)
return ios_constants.SimState.UNKNOWN
return _SIMULATOR_STATES_MAPPING[state_num]
def CreateNewSimulator(device_type=None, os_version=None, name_prefix=None):
"""Creates a new simulator according to arguments.
If neither device_type nor os_version is given, will use the latest iOS
version and latest iPhone type.
If os_version is given but device_type is not, will use latest iPhone type
according to the OS version limitation. E.g., if the given os_version is 9.3,
the latest simulator type is iPhone 6s Plus. Because the min OS version of
iPhone 7 is 10.0.
If device_type is given but os_version is not, will use the min value
between max OS version of the simulator type and current latest OS version.
E.g., if the given device_type is iPhone 5 and latest OS version is 10.3,
will use 10.2. Because the max OS version of iPhone 5 is 10.2.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
name_prefix: string, name prefix of the new simulator. By default, it is
"New".
Returns:
a tuple with four items:
string, id of the new simulator.
string, simulator device type of the new simulator.
string, OS version of the new simulator.
string, name of the new simulator.
Raises:
ios_errors.SimError: when failed to create new simulator.
ios_errors.IllegalArgumentError: when the given argument is invalid.
"""
if not device_type:
os_type = ios_constants.OS.IOS
else:
_ValidateSimulatorType(device_type)
os_type = GetOsType(device_type)
if not os_version:
os_version = GetLastSupportedSimOsVersion(os_type, device_type=device_type)
else:
supported_sim_os_versions = GetSupportedSimOsVersions(os_type)
if os_version not in supported_sim_os_versions:
raise ios_errors.IllegalArgumentError(
'The simulator os version %s is not supported. Supported simulator '
'os versions are %s.' % (os_version, supported_sim_os_versions))
if not device_type:
device_type = GetLastSupportedIphoneSimType(os_version)
else:
_ValidateSimulatorTypeWithOsVersion(device_type, os_version)
if not name_prefix:
name_prefix = 'New'
name = '%s-%s-%s' % (name_prefix, device_type, os_version)
# Example
# Runtime ID of iOS 10.2: com.apple.CoreSimulator.SimRuntime.iOS-10-2
runtime_id = _PREFIX_RUNTIME_ID + os_type + '-' + os_version.replace('.', '-')
logging.info('Creating a new simulator:\nName: %s\nOS: %s %s\nType: %s', name,
os_type, os_version, device_type)
for i in range(0, _SIM_OPERATION_MAX_ATTEMPTS):
try:
new_simulator_id = RunSimctlCommand(
['xcrun', 'simctl', 'create', name, device_type, runtime_id])
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to create simulator: %s' % str(e))
new_simulator_obj = Simulator(new_simulator_id)
# After creating a new simulator, its state is CREATING. When the
# simulator's state becomes SHUTDOWN, the simulator is created.
try:
new_simulator_obj.WaitUntilStateShutdown(
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC)
logging.info('Created new simulator %s.', new_simulator_id)
return new_simulator_id, device_type, os_version, name
except ios_errors.SimError as error:
logging.debug('Failed to create simulator %s: %s.', new_simulator_id,
error)
logging.debug('Deleted half-created simulator %s.', new_simulator_id)
new_simulator_obj.Delete()
if i != _SIM_OPERATION_MAX_ATTEMPTS - 1:
logging.debug('Will sleep %ss and retry again.',
_SIM_ERROR_RETRY_INTERVAL_SEC)
# If the simulator's state becomes SHUTDOWN, there may be something
# wrong in CoreSimulatorService. Sleeps a short interval(2s) can help
# reduce flakiness.
time.sleep(_SIM_ERROR_RETRY_INTERVAL_SEC)
raise ios_errors.SimError('Failed to create simulator in %d attempts.' %
_SIM_OPERATION_MAX_ATTEMPTS)
def GetSupportedSimDeviceTypes(os_type=None):
"""Gets the name list of supported simulator device types of given OS type.
If os_type is not provided, it will return all supported simulator device
types. The names are got from command result of `xcrun simctl list devices`.
So some simulator device types' names may be different in different Xcode.
E.g., the name of iPad Pro (12.9-inch) in Xcode 7.2.1 is "iPad Pro", but it is
"iPad Pro (12.9-inch)" in Xcode 8+.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
Returns:
a list of string, each item is a simulator device type.
E.g., ["iPhone 5", "iPhone 6 Plus"]
"""
# Example output:
# {
# "devicetypes" : [
# {
# "name" : "iPhone 5",
# "identifier" : "com.apple.CoreSimulator.SimDeviceType.iPhone-5"
# }
# ]
# }
#
# See more examples in testdata/simctl_list_devicetypes.json
sim_types_infos_json = json.loads(
RunSimctlCommand(('xcrun', 'simctl', 'list', 'devicetypes', '-j')))
sim_types = []
for sim_types_info in sim_types_infos_json['devicetypes']:
sim_type = sim_types_info['name']
if (os_type is None or
(os_type == ios_constants.OS.IOS and sim_type.startswith('i')) or
(os_type == ios_constants.OS.TVOS and 'TV' in sim_type) or
(os_type == ios_constants.OS.WATCHOS and 'Watch' in sim_type)):
sim_types.append(sim_type)
return sim_types
def GetLastSupportedIphoneSimType(os_version):
""""Gets the last supported iPhone simulator type of the given OS version.
Currently, the last supported iPhone simulator type is the last iPhone from
the output of `xcrun simctl list devicetypes`.
Args:
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
Returns:
a string, the last supported iPhone simulator type.
Raises:
ios_errors.SimError: when there is no supported iPhone simulator type.
"""
supported_sim_types = GetSupportedSimDeviceTypes(ios_constants.OS.IOS)
supported_sim_types.reverse()
os_version_float = float(os_version)
for sim_type in supported_sim_types:
if sim_type.startswith('iPhone'):
min_os_version_float = float(
simtype_profile.SimTypeProfile(sim_type).min_os_version)
if os_version_float >= min_os_version_float:
return sim_type
raise ios_errors.SimError('Can not find supported iPhone simulator type.')
def GetSupportedSimOsVersions(os_type=ios_constants.OS.IOS):
"""Gets the supported version of given simulator OS type.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
Returns:
a list of string, each item is an OS version number. E.g., ["10.1", "11.0"]
"""
if os_type is None:
os_type = ios_constants.OS.IOS
# Example output:
# {
# "runtimes" : [
# {
# "bundlePath" : "\/Applications\/Xcode10.app\/Contents\/Developer\
# /Platforms\/iPhoneOS.platform\/Developer\/Library\
# /CoreSimulator\/Profiles\/Runtimes\/iOS.simruntime",
# "availabilityError" : "",
# "buildversion" : "16A366",
# "availability" : "(available)",
# "isAvailable" : true,
# "identifier" : "com.apple.CoreSimulator.SimRuntime.iOS-12-0",
# "version" : "12.0",
# "name" : "iOS 12.0"
# }
# }
# See more examples in testdata/simctl_list_runtimes.json
xcode_version_num = xcode_info_util.GetXcodeVersionNumber()
sim_runtime_infos_json = json.loads(
RunSimctlCommand(('xcrun', 'simctl', 'list', 'runtimes', '-j')))
sim_versions = []
for sim_runtime_info in sim_runtime_infos_json['runtimes']:
# Normally, the json does not contain unavailable runtimes. To be safe,
# also checks the 'availability' field.
if 'availability' in sim_runtime_info and sim_runtime_info[
'availability'].find('unavailable') >= 0:
continue
elif 'isAvailable' in sim_runtime_info and not sim_runtime_info[
'isAvailable']:
continue
listed_os_type, listed_os_version = sim_runtime_info['name'].split(' ', 1)
if listed_os_type == os_type:
# `bundlePath` key may not exist in the old Xcode/macOS version.
if 'bundlePath' in sim_runtime_info:
runtime_path = sim_runtime_info['bundlePath']
info_plist_object = plist_util.Plist(
os.path.join(runtime_path, 'Contents/Info.plist'))
min_xcode_version_num = int(info_plist_object.GetPlistField('DTXcode'))
if xcode_version_num >= min_xcode_version_num:
sim_versions.append(listed_os_version)
else:
if os_type == ios_constants.OS.IOS:
ios_major_version, ios_minor_version = listed_os_version.split('.', 1)
# Ingores the potential build version
ios_minor_version = ios_minor_version[0]
ios_version_num = int(ios_major_version) * 100 + int(
ios_minor_version) * 10
# One Xcode version always maps to one max simulator's iOS version.
# The rules is almost max_sim_ios_version <= xcode_version + 200.
# E.g., Xcode 8.3.1/8.3.3 maps to iOS 10.3, Xcode 7.3.1 maps to iOS
# 9.3.
if ios_version_num > xcode_version_num + 200:
continue
sim_versions.append(listed_os_version)
return sim_versions
def GetLastSupportedSimOsVersion(os_type=ios_constants.OS.IOS,
device_type=None):
"""Gets the last supported version of given arguments.
If device_type is given, will return the last supported OS version of the
device type. Otherwise, will return the last supported OS version of the
OS type.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Returns:
a string, the last supported version.
Raises:
ios_errors.SimError: when there is no supported OS version of the given OS.
ios_errors.IllegalArgumentError: when the supported OS version can not match
the given simulator type.
"""
supported_os_versions = GetSupportedSimOsVersions(os_type)
if not supported_os_versions:
raise ios_errors.SimError('Can not find supported OS version of %s.' %
os_type)
if not device_type:
return supported_os_versions[-1]
simtype_max_os_version_float = float(
simtype_profile.SimTypeProfile(device_type).max_os_version)
supported_os_versions.reverse()
for os_version in supported_os_versions:
if float(os_version) <= simtype_max_os_version_float:
return os_version
if not supported_os_versions:
raise ios_errors.IllegalArgumentError(
'The supported OS version %s can not match simulator type %s. Because '
'its max OS version is %s' %
(supported_os_versions, device_type, simtype_max_os_version_float))
def GetOsType(device_type):
"""Gets the OS type of the given simulator.
This method can not work fine if the device_type is invalid. Please calls
simulator_util.ValidateSimulatorType(device_type, os_version) to validate
it first.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Returns:
shared.ios_constants.OS.
Raises:
ios_errors.IllegalArgumentError: when the OS type of the given simulator
device type can not be recognized.
"""
if device_type.startswith('i'):
return ios_constants.OS.IOS
if 'TV' in device_type:
return ios_constants.OS.TVOS
if 'Watch' in device_type:
return ios_constants.OS.WATCHOS
raise ios_errors.IllegalArgumentError(
'Failed to recognize the os type for simulator device type %s.' %
device_type)
def _ValidateSimulatorType(device_type):
"""Checks if the simulator type is valid.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Raises:
ios_errors.IllegalArgumentError: when the given simulator device type is
invalid.
"""
supported_sim_device_types = GetSupportedSimDeviceTypes()
if device_type not in supported_sim_device_types:
raise ios_errors.IllegalArgumentError(
'The simulator device type %s is not supported. Supported simulator '
'device types are %s.' % (device_type, supported_sim_device_types))
def _ValidateSimulatorTypeWithOsVersion(device_type, os_version):
"""Checks if the simulator type with the given os version is valid.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
Raises:
ios_errors.IllegalArgumentError: when the given simulator device type can
not match the given OS version.
"""
os_version_float = float(os_version)
sim_profile = simtype_profile.SimTypeProfile(device_type)
min_os_version_float = float(sim_profile.min_os_version)
if min_os_version_float > os_version_float:
raise ios_errors.IllegalArgumentError(
'The min OS version of %s is %s. But current OS version is %s' %
(device_type, min_os_version_float, os_version))
max_os_version_float = float(sim_profile.max_os_version)
if max_os_version_float < os_version_float:
raise ios_errors.IllegalArgumentError(
'The max OS version of %s is %s. But current OS version is %s' %
(device_type, max_os_version_float, os_version))
def QuitSimulatorApp():
"""Quits the Simulator.app."""
if xcode_info_util.GetXcodeVersionNumber() >= 700:
simulator_name = 'Simulator'
else:
simulator_name = 'iOS Simulator'
subprocess.Popen(['killall', simulator_name],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def IsAppFailedToLaunchOnSim(sim_sys_log, app_bundle_id=''):
"""Checks if the app failed to launch on simulator.
If app_bundle_id is not provided, will check if any UIKitApplication failed
to launch on simulator.
Args:
sim_sys_log: string, the content of the simulator's system.log.
app_bundle_id: string, the bundle id of the app.
Returns:
True if the app failed to launch on simulator.
"""
pattern = re.compile(_PATTERN_APP_CRASH_ON_SIM % app_bundle_id)
return pattern.search(sim_sys_log) is not None
def IsXctestFailedToLaunchOnSim(sim_sys_log):
"""Checks if the xctest process failed to launch on simulator.
Args:
sim_sys_log: string, the content of the simulator's system.log.
Returns:
True if the xctest process failed to launch on simulator.
"""
pattern = re.compile(_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM)
return pattern.search(sim_sys_log) is not None
def IsCoreSimulatorCrash(sim_sys_log):
"""Checks if CoreSimulator crashes.
Args:
sim_sys_log: string, the content of the simulator's system.log.
Returns:
True if the CoreSimulator crashes.
"""
pattern = re.compile(_PATTERN_CORESIMULATOR_CRASH)
return pattern.search(sim_sys_log) is not None
def RunSimctlCommand(command):
"""Runs simctl command."""
for i in range(_SIMCTL_MAX_ATTEMPTS):
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if ios_constants.CORESIMULATOR_CHANGE_ERROR in stderr:
output = stdout
else:
output = '\n'.join([stdout, stderr])
output = output.strip()
if process.poll() != 0:
if (i < (_SIMCTL_MAX_ATTEMPTS - 1) and
ios_constants.CORESIMULATOR_INTERRUPTED_ERROR in output):
continue
raise ios_errors.SimError(output)
return output
| 38.611111 | 80 | 0.699356 | [
"Apache-2.0"
] | ios-bazel-users/xctestrunner | simulator_control/simulator_util.py | 26,410 | Python |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter # useful for `logit` scale
# Fixing random state for reproducibility
np.random.seed(19680801)
# make up some data in the interval ]0, 1[
y = np.random.normal(loc=0.5, scale=0.4, size=1000)
y = y[(y > 0) & (y < 1)]
y.sort()
x = np.arange(len(y))
# plot with various axes scales
plt.figure(1)
# linear
plt.subplot(221)
plt.plot(x, y)
plt.yscale('linear')
plt.title('linear')
plt.grid(True)
# log
plt.subplot(222)
plt.plot(x, y)
plt.yscale('log')
plt.title('log')
plt.grid(True)
# symmetric log
plt.subplot(223)
plt.plot(x, y - y.mean())
plt.yscale('symlog', linthreshy=0.01)
plt.title('symlog')
plt.grid(True)
# logit
plt.subplot(224)
plt.plot(x, y)
plt.yscale('logit')
plt.title('logit')
plt.grid(True)
# Format the minor tick labels of the y-axis into empty strings with
# `NullFormatter`, to avoid cumbering the axis with too many labels.
plt.gca().yaxis.set_minor_formatter(NullFormatter())
# Adjust the subplot layout, because the logit one may take more space
# than usual, due to y-tick labels like "1 - 10^{-3}"
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,
wspace=0.35)
plt.show() | 22.672727 | 78 | 0.700882 | [
"MIT"
] | Henrynaut/Py622 | logAxes.py | 1,247 | Python |
# The MIT License (MIT)
#
# Copyright (c) 2016 Scott Shawcroft for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_bus_device.i2c_device` - I2C Bus Device
====================================================
"""
__version__ = "3.0.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BusDevice.git"
class I2CDevice:
"""
Represents a single I2C device and manages locking the bus and the device
address.
:param ~busio.I2C i2c: The I2C bus the device is on
:param int device_address: The 7 bit device address
.. note:: This class is **NOT** built into CircuitPython. See
:ref:`here for install instructions <bus_device_installation>`.
Example:
.. code-block:: python
import busio
from board import *
from adafruit_bus_device.i2c_device import I2CDevice
with busio.I2C(SCL, SDA) as i2c:
device = I2CDevice(i2c, 0x70)
bytes_read = bytearray(4)
with device:
device.readinto(bytes_read)
# A second transaction
with device:
device.write(bytes_read)
"""
def __init__(self, i2c, device_address):
"""
Try to read a byte from an address,
if you get an OSError it means the device is not there
"""
while not i2c.try_lock():
pass
try:
i2c.writeto(device_address, b'')
except OSError:
# some OS's dont like writing an empty bytesting...
# Retry by reading a byte
try:
result = bytearray(1)
i2c.readfrom_into(device_address, result)
except OSError:
raise ValueError("No I2C device at address: %x" % device_address)
finally:
i2c.unlock()
self.i2c = i2c
self.device_address = device_address
def readinto(self, buf, **kwargs):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buffer: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
self.i2c.readfrom_into(self.device_address, buf, **kwargs)
def write(self, buf, **kwargs):
"""
Write the bytes from ``buffer`` to the device. Transmits a stop bit if
``stop`` is set.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buffer: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
:param bool stop: If true, output an I2C stop condition after the buffer is written
"""
self.i2c.writeto(self.device_address, buf, **kwargs)
#pylint: disable-msg=too-many-arguments
def write_then_readinto(self, out_buffer, in_buffer, *,
out_start=0, out_end=None, in_start=0, in_end=None, stop=True):
"""
Write the bytes from ``out_buffer`` to the device, then immediately
reads into ``in_buffer`` from the device. The number of bytes read
will be the length of ``in_buffer``.
Transmits a stop bit after the write, if ``stop`` is set.
If ``out_start`` or ``out_end`` is provided, then the output buffer
will be sliced as if ``out_buffer[out_start:out_end]``. This will
not cause an allocation like ``buffer[out_start:out_end]`` will so
it saves memory.
If ``in_start`` or ``in_end`` is provided, then the input buffer
will be sliced as if ``in_buffer[in_start:in_end]``. This will not
cause an allocation like ``in_buffer[in_start:in_end]`` will so
it saves memory.
:param bytearray out_buffer: buffer containing the bytes to write
:param bytearray in_buffer: buffer containing the bytes to read into
:param int out_start: Index to start writing from
:param int out_end: Index to read up to but not include
:param int in_start: Index to start writing at
:param int in_end: Index to write up to but not include
:param bool stop: If true, output an I2C stop condition after the buffer is written
"""
if out_end is None:
out_end = len(out_buffer)
if in_end is None:
in_end = len(in_buffer)
if hasattr(self.i2c, 'writeto_then_readfrom'):
# In linux, at least, this is a special kernel function call
self.i2c.writeto_then_readfrom(self.device_address, out_buffer, in_buffer,
out_start=out_start, out_end=out_end,
in_start=in_start, in_end=in_end, stop=stop)
else:
# If we don't have a special implementation, we can fake it with two calls
self.write(out_buffer, start=out_start, end=out_end, stop=stop)
self.readinto(in_buffer, start=in_start, end=in_end)
#pylint: enable-msg=too-many-arguments
def __enter__(self):
while not self.i2c.try_lock():
pass
return self
def __exit__(self, *exc):
self.i2c.unlock()
return False
| 40.23494 | 91 | 0.636473 | [
"MIT"
] | rhthomas/Adafruit_CircuitPython_NRF24L01 | adafruit_bus_device/i2c_device.py | 6,679 | Python |
# Solution of;
# Project Euler Problem 564: Maximal polygons
# https://projecteuler.net/problem=564
#
# A line segment of length $2n-3$ is randomly split into $n$ segments of
# integer length ($n \ge 3$). In the sequence given by this split, the
# segments are then used as consecutive sides of a convex $n$-polygon, formed
# in such a way that its area is maximal. All of the $\binom{2n-4} {n-1}$
# possibilities for splitting up the initial line segment occur with the same
# probability. Let $E(n)$ be the expected value of the area that is obtained
# by this procedure. For example, for $n=3$ the only possible split of the
# line segment of length $3$ results in three line segments with length $1$,
# that form an equilateral triangle with an area of $\frac 1 4 \sqrt{3}$.
# Therefore $E(3)=0. 433013$, rounded to $6$ decimal places. For $n=4$ you can
# find $4$ different possible splits, each of which is composed of three line
# segments with length $1$ and one line segment with length $2$. All of these
# splits lead to the same maximal quadrilateral with an area of $\frac 3 4
# \sqrt{3}$, thus $E(4)=1. 299038$, rounded to $6$ decimal places. Let
# $S(k)=\displaystyle \sum_{n=3}^k E(n)$. For example, $S(3)=0. 433013$,
# $S(4)=1. 732051$, $S(5)=4. 604767$ and $S(10)=66. 955511$, rounded to $6$
# decimal places each. Find $S(50)$, rounded to $6$ decimal places.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 564
timed.caller(dummy, n, i, prob_id)
| 44.416667 | 79 | 0.68793 | [
"MIT"
] | lcsm29/project-euler | py/py_0564_maximal_polygons.py | 1,599 | Python |
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import cachetclient.cachet as cachet
import json
ENDPOINT = 'http://status.domain.tld/api/v1'
API_TOKEN = 'token'
# /ping
ping = cachet.Ping(endpoint=ENDPOINT)
print(ping.get())
# /version
version = cachet.Version(endpoint=ENDPOINT)
print(version.get())
# /components
components = cachet.Components(endpoint=ENDPOINT, api_token=API_TOKEN)
new_component = json.loads(components.post(name='Test component',
status=1,
description='Test component'))
print(components.get())
components.put(id=new_component['data']['id'], description='Updated component')
print(components.get(id=new_component['data']['id']))
components.delete(id=new_component['data']['id'])
# /components/groups
groups = cachet.Groups(endpoint=ENDPOINT, api_token=API_TOKEN)
new_group = json.loads(groups.post(name='Test group'))
print(groups.get())
groups.put(id=new_group['data']['id'], name='Updated group')
print(groups.get(id=new_group['data']['id']))
groups.delete(new_group['data']['id'])
# /incidents
incidents = cachet.Incidents(endpoint=ENDPOINT, api_token=API_TOKEN)
new_incident = json.loads(incidents.post(name='Test incident',
message='Houston, we have a problem.',
status=1))
print(incidents.get())
incidents.put(id=new_incident['data']['id'],
message="There's another problem, Houston.")
print(incidents.get(id=new_incident['data']['id']))
incidents.delete(id=new_incident['data']['id'])
# /metrics
# /metrics/points
metrics = cachet.Metrics(endpoint=ENDPOINT, api_token=API_TOKEN)
new_metric = json.loads(metrics.post(name='Test metric',
suffix='Numbers per hour',
description='How many numbers per hour',
default_value=0))
print(metrics.get())
print(metrics.get(id=new_metric['data']['id']))
points = cachet.Points(endpoint=ENDPOINT, api_token=API_TOKEN)
new_point = json.loads(points.post(id=new_metric['data']['id'], value=5))
print(points.get(metric_id=new_metric['data']['id']))
points.delete(metric_id=new_metric['data']['id'],
point_id=new_point['data']['id'])
metrics.delete(id=new_metric['data']['id'])
# /subscribers
subscribers = cachet.Subscribers(endpoint=ENDPOINT, api_token=API_TOKEN)
new_subscriber = json.loads(subscribers.post(email='[email protected]'))
subscribers.delete(id=new_subscriber['data']['id'])
| 38.691358 | 79 | 0.674537 | [
"Apache-2.0"
] | MScienceLLC/python-mscachetclient | contrib/example.py | 3,134 | Python |
import itertools
import json
import logging
import time
from typing import Optional, Union, Set, List
import click
from lib.amazon import get_current_key, release_for, get_releases, get_events_file, save_event_file
from lib.env import Config
from lib.instance import Instance
from lib.releases import Hash, Release
logger = logging.getLogger(__name__)
def sizeof_fmt(num: Union[int, float], suffix='B') -> str:
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def describe_current_release(cfg: Config) -> str:
current = get_current_key(cfg)
if not current:
return "none"
r = release_for(get_releases(), current)
if r:
return str(r)
else:
return "non-standard release with s3 key '{}'".format(current)
def wait_for_autoscale_state(instance: Instance, state: str) -> None:
logger.info("Waiting for %s to reach autoscale lifecycle '%s'...", instance, state)
while True:
autoscale = instance.describe_autoscale()
if not autoscale:
logger.error("Instance is not longer in an ASG: stopping")
return
cur_state = autoscale['LifecycleState']
logger.debug("State is %s", cur_state)
if cur_state == state:
logger.info("...done")
return
time.sleep(5)
def get_events(cfg: Config) -> dict:
events = json.loads(get_events_file(cfg))
if 'ads' not in events:
events['ads'] = []
if 'decorations' not in events:
events['decorations'] = []
if 'motd' not in events:
events['motd'] = ''
if 'update' not in events:
events['update'] = ''
return events
def save_events(cfg: Config, events) -> None:
save_event_file(cfg, json.dumps(events))
def update_motd(cfg: Config, motd: str) -> str:
events = get_events(cfg)
old_motd = events['motd']
events['motd'] = motd
save_events(cfg, events)
return old_motd
def set_update_message(cfg: Config, message: str):
events = get_events(cfg)
events['update'] = message
save_events(cfg, events)
def are_you_sure(name: str, cfg: Optional[Config] = None) -> bool:
env_name = cfg.env.value if cfg else 'global'
while True:
typed = input(
f'Confirm operation: "{name}" in env {env_name}\nType the name of the environment to proceed: ')
if typed == env_name:
return True
def display_releases(current: Union[str, Hash], filter_branches: Set[str], releases: List[Release]) -> None:
max_branch_len = max(10, max((len(release.branch) for release in releases), default=10))
release_format = '{: <5} {: <' + str(max_branch_len) + '} {: <10} {: <10} {: <14}'
click.echo(release_format.format('Live', 'Branch', 'Version', 'Size', 'Hash'))
for _, grouped_releases in itertools.groupby(releases, lambda r: r.branch):
for release in grouped_releases:
if not filter_branches or release.branch in filter_branches:
click.echo(
release_format.format(
' -->' if (release.key == current or release.hash == current) else '',
release.branch, str(release.version), sizeof_fmt(release.size), str(release.hash))
)
def confirm_branch(branch: str) -> bool:
while True:
typed = input('Confirm build branch "{}"\nType the name of the branch: '.format(branch))
if typed == branch:
return True
def confirm_action(description: str) -> bool:
typed = input('{}: [Y/N]\n'.format(description))
return typed.upper() == 'Y'
| 32.582609 | 108 | 0.622098 | [
"BSD-2-Clause"
] | compiler-explorer/compiler-explorer-image | bin/lib/ce_utils.py | 3,747 | Python |
a = int(input('Digite um número para saber sua tabuada :'))
n1 = a*1
n2 = a*2
n3 = a*3
n4 = a*4
n5 = a*5
n6 = a*6
n7 = a*7
n8 = a*8
n9 = a*9
n10 = a*10
print('A sua tabuada é')
print('{} x 1 = {}'.format(a, n1))
print('{} x 2 = {}'.format(a, n2))
print('{} x 3 = {}'.format(a, n3))
print('{} x 4 = {}'.format(a, n4))
print('{} x 5 = {}'.format(a, n5))
print('{} x 6 = {}'.format(a, n6))
print('{} x 7 = {}'.format(a, n7))
print('{} x 8 = {}'.format(a, n8))
print('{} x 9 = {}'.format(a, n9))
print('{} x 10 = {}'.format(a, n10)) | 24 | 59 | 0.498106 | [
"MIT"
] | emerfelippini/Curso_em_video-Aulas_Python | ex009a.py | 530 | Python |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from profiles.models import Developer, Provider
class DeveloperInline(admin.StackedInline):
model = Developer
can_delete = False
verbose_name_plural = 'Developer'
fk_name = 'user'
class ProviderInline(admin.StackedInline):
model = Provider
can_delete = False
verbose_name_plural = 'Provider'
fk_name = 'user'
class CustomUserAdmin(UserAdmin):
inlines = (DeveloperInline, ProviderInline, )
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super(CustomUserAdmin, self).get_inline_instances(request, obj)
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin) | 26.096774 | 78 | 0.74042 | [
"Apache-2.0"
] | pacslab/ChainFaaS | ServerlessController/profiles/admin.py | 809 | Python |
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import Firefox, Chrome, PhantomJS
from selenium import webdriver
from argparse import ArgumentParser
from urllib.parse import quote
import time
import copy
import sys
import os
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
TIMEOUT = 20
TIMESLP = 3
my_sender = '[email protected]' # 发件人邮箱账号
my_pass = 'fujkixpkjiyhcaji' # 发件人邮箱密码
my_user = '[email protected]' # 收件人邮箱账号
def mail():
ret = True
try:
cur_time = time.strftime("%d/%m/%Y")
msgRoot = MIMEMultipart('related')
msgRoot['From'] = Header('PKU-AutoSubmit', 'utf-8')
msgRoot['To'] = Header('student', 'utf-8')
subject = cur_time + ' 报备成功!'
msgRoot['Subject'] = Header(subject, 'utf-8')
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
mail_msg = """
<p>自动报备成功!</p>
<p>截图:</p>
<p><img src="cid:image1"></p>
"""
msgAlternative.attach(MIMEText(mail_msg, 'html', 'utf-8'))
# 指定图片为当前目录
fp = open('result.png', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# 定义图片 ID,在 HTML 文本中引用
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
server = smtplib.SMTP_SSL("smtp.qq.com", 465) # 发件人邮箱中的SMTP服务器,端口是25
server.login(my_sender, my_pass) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(my_sender, [my_user, ], msgRoot.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() # 关闭连接
except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False
ret = False
return ret
def login(driver, username, password, failed=0):
if failed == 3:
raise Exception('门户登录失败')
iaaaUrl = 'https://iaaa.pku.edu.cn/iaaa/oauth.jsp'
appName = quote('北京大学校内信息门户新版')
redirectUrl = 'https://portal.pku.edu.cn/portal2017/ssoLogin.do'
driver.get('https://portal.pku.edu.cn/portal2017/')
driver.get(
f'{iaaaUrl}?appID=portal2017&appName={appName}&redirectUrl={redirectUrl}'
)
print('门户登陆中...')
driver.find_element_by_id('user_name').send_keys(username)
time.sleep(TIMESLP)
driver.find_element_by_id('password').send_keys(password)
time.sleep(TIMESLP)
driver.find_element_by_id('logon_button').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.LINK_TEXT, '我知道了')))
except:
pass
else:
driver.find_element_by_link_text('我知道了').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
except:
login(driver, username, password, failed + 1)
else:
print('门户登录成功!')
def go_to_application_out(driver):
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-input__inner')))
def go_to_application_in(driver):
driver.get('https://portal.pku.edu.cn/portal2017/#/bizCenter')
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-select')))
def select_in_out(driver, way):
driver.find_element_by_class_name('el-select').click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{way}"]').click()
def select_campus(driver, campus):
driver.find_elements_by_class_name('el-select')[1].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{campus}"]').click()
def select_destination(driver, destination):
driver.find_elements_by_class_name('el-select')[2].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{destination}"]').click()
def select_district(driver, district):
driver.find_elements_by_class_name('el-select')[3].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{district}"]').click()
def write_reason(driver, reason):
driver.find_element_by_class_name('el-textarea__inner').send_keys(
f'{reason}')
time.sleep(TIMESLP)
def write_track(driver, track):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{track}')
time.sleep(TIMESLP)
def write_street(driver, street):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{street}')
time.sleep(TIMESLP)
def click_check(driver):
driver.find_element_by_class_name('el-checkbox__label').click()
time.sleep(TIMESLP)
def click_inPeking(driver):
driver.find_element_by_class_name('el-radio__inner').click()
time.sleep(TIMESLP)
def submit(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"保存")]').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located(
(By.XPATH, '(//button/span[contains(text(),"提交")])[3]')))
driver.find_element_by_xpath(
'(//button/span[contains(text(),"提交")])[3]').click()
time.sleep(TIMESLP)
def screen_capture(driver):
driver.back()
driver.back()
WebDriverWait(driver, 5).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
driver.find_elements_by_class_name('el-card__body')[1].click()
WebDriverWait(driver, 5).until(
EC.visibility_of_element_located(
(By.XPATH, '//button/span[contains(text(),"加载更多")]')))
driver.maximize_window()
time.sleep(0.1)
driver.save_screenshot('result.png')
print('备案历史截图已保存')
def fill_out(driver, campus, reason, destination, track):
print('开始填报出校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '出校')
print('Done')
print('选择校区 ', end='')
select_campus(driver, campus)
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
print('选择出校目的地 ', end='')
select_destination(driver, destination)
print('Done')
print('填写出校行动轨迹 ', end='')
write_track(driver, track)
print('Done')
click_check(driver)
submit(driver)
print('出校备案填报完毕!')
def fill_in(driver, campus, reason, habitation, district, street):
print('开始填报入校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '入校')
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
if habitation != '北京':
raise Exception('暂不支持京外入校备案,请手动填写')
print('选择居住地所在区 ', end='')
select_district(driver, district)
print('Done')
print('填写居住地所在街道 ', end='')
write_street(driver, street)
print('Done')
click_inPeking(driver)
click_check(driver)
submit(driver)
print('入校备案填报完毕!')
def run(driver, username, password, campus, reason, destination, track,
habitation, district, street):
login(driver, username, password)
print('=================================')
go_to_application_out(driver)
fill_out(driver, campus, reason, destination, track)
print('=================================')
go_to_application_in(driver)
fill_in(driver, campus, reason, habitation, district, street)
print('=================================')
screen_capture(driver)
print('=================================')
ret = mail()
if ret:
print("邮件发送成功")
else:
print("邮件发送失败")
print('可以愉快的玩耍啦!')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--username', '-u', type=str, help='用户名')
parser.add_argument('--password', '-p', type=str, help='密码')
parser.add_argument('--campus', type=str, help='所在校区, 燕园、万柳、畅春园、圆明园、中关新园', default='燕园')
parser.add_argument('--reason', type=str, help='出校原因, eg. 吃饭', default='上课')
parser.add_argument('--destination', type=str, help='出校目的地, eg. 北京', default='北京')
parser.add_argument('--track', type=str, help='出校轨迹, eg. 畅春园食堂', default='东南门-理教-勺园')
parser.add_argument('--habitation', type=str, help='入校前居住地, eg. 北京', default='北京')
parser.add_argument('--district', type=str, help='入校前居住所在区, eg. 海淀区', default='海淀区')
parser.add_argument('--street', type=str, help='入校前居住所在街道, eg. 燕园街道', default='燕园街道')
args = parser.parse_args()
args_public = copy.deepcopy(args)
args_public.password = 'xxxxxxxx'
print('Arguments: {}'.format(args_public))
print('Driver Launching...')
# driver = Firefox()
# driver = Chrome()
if sys.platform == 'darwin': # macOS
phantomjs_path = os.path.join('phantomjs', 'phantomjs-darwin')
elif sys.platform == 'linux': # linux
phantomjs_path = os.path.join('phantomjs', 'phantomjs-linux-x86_64')
else: # windows
phantomjs_path = os.path.join('phantomjs', 'phantomjs-windows.exe')
driver = PhantomJS(executable_path=phantomjs_path)
run(driver, args.username, args.password, args.campus, args.reason,
args.destination, args.track, args.habitation, args.district,
args.street)
driver.close()
| 31.58982 | 99 | 0.660696 | [
"Apache-2.0"
] | yoonseo0917/PKUAutoSubmit | main.py | 11,423 | Python |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_identity_mfa_totp_device_facts
short_description: Fetches details about one or multiple MfaTotpDevice resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple MfaTotpDevice resources in Oracle Cloud Infrastructure
- Lists the MFA TOTP devices for the specified user. The returned object contains the device's OCID, but not
the seed. The seed is returned only upon creation or when the IAM service regenerates the MFA seed for the device.
- If I(mfa_totp_device_id) is specified, the details of a single MfaTotpDevice will be returned.
version_added: "2.9"
author: Oracle (@oracle)
options:
user_id:
description:
- The OCID of the user.
type: str
required: true
mfa_totp_device_id:
description:
- The OCID of the MFA TOTP device.
- Required to get a specific mfa_totp_device.
type: str
aliases: ["id"]
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for NAME is ascending. The NAME
sort order is case sensitive.
- "**Note:** In general, some \\"List\\" operations (for example, `ListInstances`) let you
optionally filter by Availability Domain if the scope of the resource type is within a
single Availability Domain. If you call one of these \\"List\\" operations without specifying
an Availability Domain, the resources are grouped by Availability Domain, then sorted."
type: str
choices:
- "TIMECREATED"
- "NAME"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`). The NAME sort order
is case sensitive.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List mfa_totp_devices
oci_identity_mfa_totp_device_facts:
user_id: ocid1.user.oc1..xxxxxxEXAMPLExxxxxx
- name: Get a specific mfa_totp_device
oci_identity_mfa_totp_device_facts:
user_id: ocid1.user.oc1..xxxxxxEXAMPLExxxxxx
mfa_totp_device_id: ocid1.mfatotpdevice.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
mfa_totp_devices:
description:
- List of MfaTotpDevice resources
returned: on success
type: complex
contains:
id:
description:
- The OCID of the MFA TOTP Device.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
user_id:
description:
- The OCID of the user the MFA TOTP device belongs to.
returned: on success
type: string
sample: ocid1.user.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- Date and time the `MfaTotpDevice` object was created, in the format defined by RFC3339.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
time_expires:
description:
- Date and time when this MFA TOTP device will expire, in the format defined by RFC3339.
Null if it never expires.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
lifecycle_state:
description:
- The MFA TOTP device's current state.
returned: on success
type: string
sample: CREATING
inactive_status:
description:
- "The detailed status of INACTIVE lifecycleState.
Allowed values are:
- 1 - SUSPENDED
- 2 - DISABLED
- 4 - BLOCKED
- 8 - LOCKED"
returned: on success
type: int
sample: 56
is_activated:
description:
- Flag to indicate if the MFA TOTP device has been activated
returned: on success
type: bool
sample: true
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"user_id": "ocid1.user.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2016-08-25T21:10:29.600Z",
"time_expires": "2016-08-25T21:10:29.600Z",
"lifecycle_state": "CREATING",
"inactive_status": 56,
"is_activated": true
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.identity import IdentityClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class MfaTotpDeviceFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"user_id",
"mfa_totp_device_id",
]
def get_required_params_for_list(self):
return [
"user_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_mfa_totp_device,
user_id=self.module.params.get("user_id"),
mfa_totp_device_id=self.module.params.get("mfa_totp_device_id"),
)
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_mfa_totp_devices,
user_id=self.module.params.get("user_id"),
**optional_kwargs
)
MfaTotpDeviceFactsHelperCustom = get_custom_class("MfaTotpDeviceFactsHelperCustom")
class ResourceFactsHelper(MfaTotpDeviceFactsHelperCustom, MfaTotpDeviceFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
user_id=dict(type="str", required=True),
mfa_totp_device_id=dict(aliases=["id"], type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "NAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="mfa_totp_device",
service_client_class=IdentityClient,
namespace="identity",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(mfa_totp_devices=result)
if __name__ == "__main__":
main()
| 33.241803 | 120 | 0.631365 | [
"Apache-2.0"
] | hanielburton/oci-ansible-collection | plugins/modules/oci_identity_mfa_totp_device_facts.py | 8,111 | Python |
from django.views.generic.base import TemplateResponseMixin, View
from django.http import HttpResponseRedirect
from django.forms.formsets import formset_factory
from django.forms.models import modelformset_factory, inlineformset_factory
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
from django.forms.models import BaseInlineFormSet
class BaseFormSetMixin(object):
"""
Base class for constructing a FormSet within a view
"""
initial = []
form_class = None
formset_class = None
success_url = None
extra = 2
max_num = None
can_order = False
can_delete = False
def construct_formset(self):
return self.get_formset()(initial=self.get_initial(), **self.get_formset_kwargs())
def get_initial(self):
return self.initial
def get_formset_class(self):
return self.formset_class
def get_form_class(self):
return self.form_class
def get_formset(self):
return formset_factory(self.get_form_class(), **self.get_factory_kwargs())
def get_formset_kwargs(self):
kwargs = {}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_factory_kwargs(self):
kwargs = {
'extra': self.extra,
'max_num': self.max_num,
'can_order': self.can_order,
'can_delete': self.can_delete,
}
if self.get_formset_class():
kwargs['formset'] = self.get_formset_class()
return kwargs
class FormSetMixin(BaseFormSetMixin):
def get_context_data(self, **kwargs):
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url
else:
# Default to returning to the same page
url = self.request.get_full_path()
return url
def formset_valid(self, formset):
return HttpResponseRedirect(self.get_success_url())
def formset_invalid(self, formset):
return self.render_to_response(self.get_context_data(formset=formset))
class ModelFormSetMixin(FormSetMixin, MultipleObjectMixin):
exclude = None
fields = None
formfield_callback = None
def get_context_data(self, **kwargs):
context = kwargs
if self.object_list:
context['object_list'] = self.object_list
context_object_name = self.get_context_object_name(self.get_queryset())
if context_object_name:
context[context_object_name] = self.object_list
return context
def construct_formset(self):
return self.get_formset()(queryset=self.get_queryset(), **self.get_formset_kwargs())
def get_factory_kwargs(self):
kwargs = super(ModelFormSetMixin, self).get_factory_kwargs()
kwargs.update({
'exclude': self.exclude,
'fields': self.fields,
'formfield_callback': self.formfield_callback,
})
if self.get_form_class():
kwargs['form'] = self.get_form_class()
if self.get_formset_class():
kwargs['formset'] = self.get_formset_class()
return kwargs
def get_formset(self):
return modelformset_factory(self.model, **self.get_factory_kwargs())
def formset_valid(self, formset):
self.object_list = formset.save()
return super(ModelFormSetMixin, self).formset_valid(formset)
class BaseInlineFormSetMixin(BaseFormSetMixin):
model = None
inline_model = None
fk_name = None
formset_class = BaseInlineFormSet
exclude = None
fields = None
formfield_callback = None
can_delete = True
def get_context_data(self, **kwargs):
context = kwargs
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
def construct_formset(self):
return self.get_formset()(instance=self.object, **self.get_formset_kwargs())
def get_inline_model(self):
return self.inline_model
def get_factory_kwargs(self):
kwargs = super(BaseInlineFormSetMixin, self).get_factory_kwargs()
kwargs.update({
'exclude': self.exclude,
'fields': self.fields,
'formfield_callback': self.formfield_callback,
'fk_name': self.fk_name,
})
if self.get_form_class():
kwargs['form'] = self.get_form_class()
if self.get_formset_class():
kwargs['formset'] = self.get_formset_class()
return kwargs
def get_formset(self):
return inlineformset_factory(self.model, self.get_inline_model(), **self.get_factory_kwargs())
class InlineFormSetMixin(BaseInlineFormSetMixin, FormSetMixin, SingleObjectMixin):
def formset_valid(self, formset):
self.object_list = formset.save()
return super(BaseInlineFormSetMixin, self).formset_valid(formset)
class ProcessFormSetView(View):
"""
A mixin that processes a fomset on POST.
"""
def get(self, request, *args, **kwargs):
formset = self.construct_formset()
return self.render_to_response(self.get_context_data(formset=formset))
def post(self, request, *args, **kwargs):
formset = self.construct_formset()
if formset.is_valid():
return self.formset_valid(formset)
else:
return self.formset_invalid(formset)
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormSetView(FormSetMixin, ProcessFormSetView):
"""
A base view for displaying a formset
"""
class FormSetView(TemplateResponseMixin, BaseFormSetView):
"""
A view for displaying a formset, and rendering a template response
"""
class BaseModelFormSetView(ModelFormSetMixin, ProcessFormSetView):
"""
A base view for displaying a modelformset
"""
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
return super(BaseModelFormSetView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
return super(BaseModelFormSetView, self).post(request, *args, **kwargs)
class ModelFormSetView(MultipleObjectTemplateResponseMixin, BaseModelFormSetView):
"""
A view for displaying a modelformset, and rendering a template response
"""
class BaseInlineFormSetView(InlineFormSetMixin, ProcessFormSetView):
"""
A base view for displaying a modelformset for a queryset belonging to a parent model
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseInlineFormSetView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseInlineFormSetView, self).post(request, *args, **kwargs)
class InlineFormSetView(SingleObjectTemplateResponseMixin, BaseInlineFormSetView):
"""
A view for displaying a modelformset for a queryset belonging to a parent model
"""
| 31.898734 | 102 | 0.661111 | [
"MIT"
] | hovel/django-extra-views | extra_views/formsets.py | 7,560 | Python |
from __future__ import unicode_literals
from flask import Flask,render_template,url_for,request
from text_summarization import text_summarizer
import time
import spacy
nlp = spacy.load('en_core_web_sm')
app = Flask(__name__)
# Web Scraping Pkg
from bs4 import BeautifulSoup
# from urllib.request import urlopen
from urllib.request import urlopen
# Reading Time
def readingTime(mytext):
total_words = len([ token.text for token in nlp(mytext)])
estimatedTime = total_words/200.0
return estimatedTime
# Fetch Text From Url
def get_text(url):
page = urlopen(url)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p:p.text,soup.find_all('p')))
return fetched_text
@app.route('/')
def index():
return render_template('index.html')
@app.route('/analyze',methods=['GET','POST'])
def analyze():
start = time.time()
if request.method == 'POST':
rawtext = request.form['rawtext']
final_reading_time = readingTime(rawtext)
final_summary = text_summarizer(rawtext)
summary_reading_time = readingTime(final_summary)
end = time.time()
final_time = end-start
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/analyze_url',methods=['GET','POST'])
def analyze_url():
start = time.time()
if request.method == 'POST':
raw_url = request.form['raw_url']
rawtext = get_text(raw_url)
final_reading_time = readingTime(rawtext)
final_summary = text_summarizer(rawtext)
summary_reading_time = readingTime(final_summary)
end = time.time()
final_time = end-start
return render_template('index.html',ctext=rawtext,final_summary=final_summary,final_time=final_time,final_reading_time=final_reading_time,summary_reading_time=summary_reading_time)
@app.route('/about')
def about():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True) | 31.793651 | 182 | 0.751872 | [
"MIT"
] | ramachandra742/Text-Summarization-projects | Automatic extractive Text Summarization using RoBERTa/Deploy Flask app/app.py | 2,003 | Python |
import os
import sys
import time
import shutil
import random
import subprocess
from itertools import starmap
from tempfile import mkdtemp, NamedTemporaryFile
from .. import current, FlowSpec
from ..metaflow_config import DATATOOLS_S3ROOT
from ..util import is_stringish,\
to_bytes,\
to_unicode,\
to_fileobj,\
url_quote,\
url_unquote
from ..exception import MetaflowException
from ..debug import debug
from . import s3op
try:
# python2
from urlparse import urlparse
except:
# python3
from urllib.parse import urlparse
from metaflow.datastore.util.s3util import get_s3_client
from botocore.exceptions import ClientError
NUM_S3OP_RETRIES = 8
class MetaflowS3InvalidObject(MetaflowException):
headline = 'Not a string-like object'
class MetaflowS3URLException(MetaflowException):
headline = 'Invalid address'
class MetaflowS3Exception(MetaflowException):
headline = 'S3 access failed'
class MetaflowS3NotFound(MetaflowException):
headline = 'S3 object not found'
class MetaflowS3AccessDenied(MetaflowException):
headline = 'S3 access denied'
class S3Object(object):
"""
This object represents a path or an object in S3,
with an optional local copy.
Get or list calls return one or more of S3Objects.
"""
def __init__(self, prefix, url, path, size=None):
# all fields of S3Object should return a unicode object
def ensure_unicode(x):
return None if x is None else to_unicode(x)
prefix, url, path = map(ensure_unicode, (prefix, url, path))
self._size = size
self._url = url
self._path = path
self._key = None
if path:
self._size = os.stat(self._path).st_size
if prefix is None or prefix == url:
self._key = url
self._prefix = None
else:
self._key = url[len(prefix.rstrip('/')) + 1:].rstrip('/')
self._prefix = prefix
@property
def exists(self):
"""
Does this key correspond to an object in S3?
"""
return self._size is not None
@property
def downloaded(self):
"""
Has this object been downloaded?
"""
return bool(self._path)
@property
def url(self):
"""
S3 location of the object
"""
return self._url
@property
def prefix(self):
"""
Prefix requested that matches the object.
"""
return self._prefix
@property
def key(self):
"""
Key corresponds to the key given to the get call that produced
this object. This may be a full S3 URL or a suffix based on what
was requested.
"""
return self._key
@property
def path(self):
"""
Path to the local file corresponding to the object downloaded.
This file gets deleted automatically when a S3 scope exits.
Returns None if this S3Object has not been downloaded.
"""
return self._path
@property
def blob(self):
"""
Contents of the object as a byte string.
Returns None if this S3Object has not been downloaded.
"""
if self._path:
with open(self._path, 'rb') as f:
return f.read()
@property
def text(self):
"""
Contents of the object as a Unicode string.
Returns None if this S3Object has not been downloaded.
"""
if self._path:
return self.blob.decode('utf-8', errors='replace')
@property
def size(self):
"""
Size of the object in bytes.
Returns None if the key does not correspond to an object in S3.
"""
return self._size
def __str__(self):
if self._path:
return '<S3Object %s (%d bytes, local)>' % (self._url, self._size)
elif self._size:
return '<S3Object %s (%d bytes, in S3)>' % (self._url, self._size)
else:
return '<S3Object %s (object does not exist)>' % self._url
def __repr__(self):
return str(self)
class S3(object):
def __init__(self,
tmproot='.',
bucket=None,
prefix=None,
run=None,
s3root=None):
"""
Initialize a new context for S3 operations. This object is based used as
a context manager for a with statement.
There are two ways to initialize this object depending whether you want
to bind paths to a Metaflow run or not.
1. With a run object:
run: (required) Either a FlowSpec object (typically 'self') or a
Run object corresponding to an existing Metaflow run. These
are used to add a version suffix in the S3 path.
bucket: (optional) S3 bucket.
prefix: (optional) S3 prefix.
2. Without a run object:
s3root: (optional) An S3 root URL for all operations. If this is
not specified, all operations require a full S3 URL.
These options are supported in both the modes:
tmproot: (optional) Root path for temporary files (default: '.')
"""
if run:
# 1. use a (current) run ID with optional customizations
parsed = urlparse(DATATOOLS_S3ROOT)
if not bucket:
bucket = parsed.netloc
if not prefix:
prefix = parsed.path
if isinstance(run, FlowSpec):
if current.is_running_flow:
prefix = os.path.join(prefix,
current.flow_name,
current.run_id)
else:
raise MetaflowS3URLException(\
"Initializing S3 with a FlowSpec outside of a running "
"flow is not supported.")
else:
prefix = os.path.join(prefix, run.parent.id, run.id)
self._s3root = u's3://%s' % os.path.join(bucket, prefix.strip('/'))
elif s3root:
# 2. use an explicit S3 prefix
parsed = urlparse(to_unicode(s3root))
if parsed.scheme != 's3':
raise MetaflowS3URLException(\
"s3root needs to be an S3 URL prefxied with s3://.")
self._s3root = s3root.rstrip('/')
else:
# 3. use the client only with full URLs
self._s3root = None
self._tmpdir = mkdtemp(dir=tmproot, prefix='metaflow.s3.')
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
Delete all temporary files downloaded in this context.
"""
try:
if not debug.s3client:
shutil.rmtree(self._tmpdir)
except:
pass
def _url(self, key):
# NOTE: All URLs are handled as Unicode objects (unicde in py2,
# string in py3) internally. We expect that all URLs passed to this
# class as either Unicode or UTF-8 encoded byte strings. All URLs
# returned are Unicode.
if self._s3root is None:
parsed = urlparse(to_unicode(key))
if parsed.scheme == 's3' and parsed.path:
return key
else:
if current.is_running_flow:
raise MetaflowS3URLException(\
"Specify S3(run=self) when you use S3 inside a running "
"flow. Otherwise you have to use S3 with full "
"s3:// urls.")
else:
raise MetaflowS3URLException(\
"Initialize S3 with an 's3root' or 'run' if you don't "
"want to specify full s3:// urls.")
elif key:
if key.startswith('s3://'):
raise MetaflowS3URLException(\
"Don't use absolute S3 URLs when the S3 client is "
"initialized with a prefix. URL: %s" % key)
return os.path.join(self._s3root, key)
else:
return self._s3root
def list_paths(self, keys=None):
"""
List the next level of paths in S3. If multiple keys are
specified, listings are done in parallel. The returned
S3Objects have .exists == False if the url refers to a
prefix, not an existing S3 object.
Args:
keys: (required) a list of suffixes for paths to list.
Returns:
a list of S3Objects (not downloaded)
Example:
Consider the following paths in S3:
A/B/C
D/E
In this case, list_paths(['A', 'D']), returns ['A/B', 'D/E']. The
first S3Object has .exists == False, since it does not refer to an
object in S3. It is just a prefix.
"""
def _list(keys):
if keys is None:
keys = [None]
urls = (self._url(key).rstrip('/') + '/' for key in keys)
res = self._read_many_files('list', urls)
for s3prefix, s3url, size in res:
if size:
yield s3prefix, s3url, None, int(size)
else:
yield s3prefix, s3url, None, None
return list(starmap(S3Object, _list(keys)))
def list_recursive(self, keys=None):
"""
List objects in S3 recursively. If multiple keys are
specified, listings are done in parallel. The returned
S3Objects have always .exists == True, since they refer
to existing objects in S3.
Args:
keys: (required) a list of suffixes for paths to list.
Returns:
a list of S3Objects (not downloaded)
Example:
Consider the following paths in S3:
A/B/C
D/E
In this case, list_recursive(['A', 'D']), returns ['A/B/C', 'D/E'].
"""
def _list(keys):
if keys is None:
keys = [None]
res = self._read_many_files('list',
map(self._url, keys),
recursive=True)
for s3prefix, s3url, size in res:
yield s3prefix, s3url, None, int(size)
return list(starmap(S3Object, _list(keys)))
def get(self, key=None, return_missing=False):
"""
Get a single object from S3.
Args:
key: (optional) a suffix identifying the object.
return_missing: (optional, default False) if set to True, do
not raise an exception for a missing key but
return it as an S3Object with .exists == False.
Returns:
an S3Object corresponding to the object requested.
"""
url = self._url(key)
src = urlparse(url)
def _download(s3, tmp):
s3.download_file(src.netloc, src.path.lstrip('/'), tmp)
return url
try:
path = self._one_boto_op(_download, url)
except MetaflowS3NotFound:
if return_missing:
path = None
else:
raise
return S3Object(self._s3root, url, path)
def get_many(self, keys, return_missing=False):
"""
Get many objects from S3 in parallel.
Args:
keys: (required) a list of suffixes identifying the objects.
return_missing: (optional, default False) if set to True, do
not raise an exception for a missing key but
return it as an S3Object with .exists == False.
Returns:
a list of S3Objects corresponding to the objects requested.
"""
def _get():
res = self._read_many_files('get',
map(self._url, keys),
allow_missing=return_missing,
verify=True,
verbose=False,
listing=True)
for s3prefix, s3url, fname in res:
if fname:
yield self._s3root, s3url, os.path.join(self._tmpdir, fname)
else:
# missing entries per return_missing=True
yield self._s3root, s3prefix, None, None
return list(starmap(S3Object, _get()))
def get_recursive(self, keys):
"""
Get many objects from S3 recursively in parallel.
Args:
keys: (required) a list of suffixes for paths to download
recursively.
Returns:
a list of S3Objects corresponding to the objects requested.
"""
def _get():
res = self._read_many_files('get',
map(self._url, keys),
recursive=True,
verify=True,
verbose=False,
listing=True)
for s3prefix, s3url, fname in res:
yield s3prefix, s3url, os.path.join(self._tmpdir, fname)
return list(starmap(S3Object, _get()))
def get_all(self):
"""
Get all objects from S3 recursively (in parallel). This request
only works if S3 is initialized with a run or a s3root prefix.
Returns:
a list of S3Objects corresponding to the objects requested.
"""
if self._s3root is None:
raise MetaflowS3URLException(\
"Can't get_all() when S3 is initialized without a prefix")
else:
return self.get_recursive([None])
def put(self, key, obj, overwrite=True):
"""
Put an object to S3.
Args:
key: (required) suffix for the object.
obj: (required) a bytes, string, or a unicode object to
be stored in S3.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
an S3 URL corresponding to the object stored.
"""
if not is_stringish(obj):
raise MetaflowS3InvalidObject(\
"Object corresponding to the key '%s' is not a string "
"or a bytes object." % key)
url = self._url(key)
src = urlparse(url)
def _upload(s3, tmp):
# we need to recreate the StringIO object for retries since
# apparently upload_fileobj will/may close() it
blob = to_fileobj(obj)
s3.upload_fileobj(blob, src.netloc, src.path.lstrip('/'))
if overwrite:
self._one_boto_op(_upload, url)
return url
else:
def _head(s3, tmp):
s3.head_object(Bucket=src.netloc, Key=src.path.lstrip('/'))
try:
self._one_boto_op(_head, url)
except MetaflowS3NotFound as err:
self._one_boto_op(_upload, url)
return url
def put_many(self, key_objs, overwrite=True):
"""
Put objects to S3 in parallel.
Args:
key_objs: (required) an iterator of (key, value) tuples. Value must
be a string, bytes, or a unicode object.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
a list of (key, S3 URL) tuples corresponding to the files sent.
"""
def _store():
for key, obj in key_objs:
if is_stringish(obj):
with NamedTemporaryFile(dir=self._tmpdir,
delete=False,
mode='wb',
prefix='metaflow.s3.put_many.') as tmp:
tmp.write(to_bytes(obj))
tmp.close()
yield tmp.name, self._url(key), key
else:
raise MetaflowS3InvalidObject(
"Object corresponding to the key '%s' is not a string "
"or a bytes object." % key)
return self._put_many_files(_store(), overwrite)
def put_files(self, key_paths, overwrite=True):
"""
Put files to S3 in parallel.
Args:
key_paths: (required) an iterator of (key, path) tuples.
overwrite: (optional) overwrites the key with obj, if it exists
Returns:
a list of (key, S3 URL) tuples corresponding to the files sent.
"""
def _check():
for key, path in key_paths:
if not os.path.exists(path):
raise MetaflowS3NotFound("Local file not found: %s" % path)
yield path, self._url(key), key
return self._put_many_files(_check(), overwrite)
def _one_boto_op(self, op, url):
error = ''
for i in range(NUM_S3OP_RETRIES):
tmp = NamedTemporaryFile(dir=self._tmpdir,
prefix='metaflow.s3.one_file.',
delete=False)
try:
s3, _ = get_s3_client()
op(s3, tmp.name)
return tmp.name
except ClientError as err:
error_code = s3op.normalize_client_error(err)
if error_code == 404:
raise MetaflowS3NotFound(url)
elif error_code == 403:
raise MetaflowS3AccessDenied(url)
elif error_code == 'NoSuchBucket':
raise MetaflowS3URLException("Specified S3 bucket doesn't exist.")
error = str(err)
except Exception as ex:
# TODO specific error message for out of disk space
error = str(ex)
os.unlink(tmp.name)
# add some jitter to make sure retries are not synchronized
time.sleep(2**i + random.randint(0, 10))
raise MetaflowS3Exception("S3 operation failed.\n"\
"Key requested: %s\n"\
"Error: %s" % (url, error))
# NOTE: re: _read_many_files and _put_many_files
# All file IO is through binary files - we write bytes, we read
# bytes. All inputs and outputs from these functions are Unicode.
# Conversion between bytes and unicode is done through url_quote
# and url_unquote.
def _read_many_files(self, op, prefixes, **options):
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb',
delete=not debug.s3client,
prefix='metaflow.s3.inputs.') as inputfile:
inputfile.write(b'\n'.join(map(url_quote, prefixes)))
inputfile.flush()
stdout, stderr = self._s3op_with_retries(op,
inputs=inputfile.name,
**options)
if stderr:
raise MetaflowS3Exception("Getting S3 files failed.\n"\
"First prefix requested: %s\n"\
"Error: %s" % (prefixes[0], stderr))
else:
for line in stdout.splitlines():
yield tuple(map(url_unquote, line.strip(b'\n').split(b' ')))
def _put_many_files(self, url_files, overwrite):
url_files = list(url_files)
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb',
delete=not debug.s3client,
prefix='metaflow.s3.put_inputs.') as inputfile:
lines = (b' '.join(map(url_quote, (os.path.realpath(local), url)))
for local, url, _ in url_files)
inputfile.write(b'\n'.join(lines))
inputfile.flush()
stdout, stderr = self._s3op_with_retries('put',
filelist=inputfile.name,
verbose=False,
overwrite=overwrite,
listing=True)
if stderr:
raise MetaflowS3Exception("Uploading S3 files failed.\n"\
"First key: %s\n"\
"Error: %s" % (url_files[0][2],
stderr))
else:
urls = set()
for line in stdout.splitlines():
url, _, _ = map(url_unquote, line.strip(b'\n').split(b' '))
urls.add(url)
return [(key, url) for _, url, key in url_files if url in urls]
def _s3op_with_retries(self, mode, **options):
cmdline = [sys.executable, os.path.abspath(s3op.__file__), mode]
for key, value in options.items():
key = key.replace('_', '-')
if isinstance(value, bool):
if value:
cmdline.append('--%s' % key)
else:
cmdline.append('--no-%s' % key)
else:
cmdline.extend(('--%s' % key, value))
for i in range(NUM_S3OP_RETRIES):
with NamedTemporaryFile(dir=self._tmpdir,
mode='wb+',
delete=not debug.s3client,
prefix='metaflow.s3op.stderr') as stderr:
try:
debug.s3client_exec(cmdline)
stdout = subprocess.check_output(cmdline,
cwd=self._tmpdir,
stderr=stderr.file)
return stdout, None
except subprocess.CalledProcessError as ex:
stderr.seek(0)
err_out = stderr.read().decode('utf-8', errors='replace')
stderr.seek(0)
if ex.returncode == s3op.ERROR_URL_NOT_FOUND:
raise MetaflowS3NotFound(err_out)
elif ex.returncode == s3op.ERROR_URL_ACCESS_DENIED:
raise MetaflowS3AccessDenied(err_out)
time.sleep(2**i + random.randint(0, 10))
return None, err_out
| 35.472136 | 86 | 0.514728 | [
"Apache-2.0"
] | anthonypreza/metaflow | metaflow/datatools/s3.py | 22,915 | Python |
#© 2017-2020, ETH Zurich, D-INFK, [email protected]
from rpcudp.protocol import RPCProtocol
from twisted.internet import reactor
from talosstorage.checks import QueryToken
from talosstorage.chunkdata import CloudChunk
class RPCServer(RPCProtocol):
# Any methods starting with "rpc_" are available to clients.
def rpc_sayhi(self, sender, chunk, token):
token = QueryToken.from_json(token)
# This could return a Deferred as well. sender is (ip,port)
chunk_orig = CloudChunk.decode(chunk)
return "Tag is %s you live at %s:%i and token is %s" % (chunk_orig.get_tag_hex(), sender[0], sender[1], token.owner)
# start a server on UDP port 1234
reactor.listenUDP(1234, RPCServer())
reactor.run() | 36.6 | 125 | 0.726776 | [
"Apache-2.0"
] | chunchuan-wang/droplet-engine | talosblockchain/global_tests/test_udprpc_server.py | 733 | Python |
import datetime
import os
import re
from peewee import *
from playhouse.reflection import *
from .base import IS_SQLITE_OLD
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import requires_models
from .base import requires_sqlite
from .base import skip_if
from .base_models import Tweet
from .base_models import User
class ColTypes(TestModel):
f1 = BigIntegerField(index=True)
f2 = BlobField()
f3 = BooleanField()
f4 = CharField(max_length=50)
f5 = DateField()
f6 = DateTimeField()
f7 = DecimalField()
f8 = DoubleField()
f9 = FloatField()
f10 = IntegerField(unique=True)
f11 = AutoField()
f12 = TextField()
f13 = TimeField()
class Meta:
indexes = (
(('f10', 'f11'), True),
(('f11', 'f8', 'f13'), False),
)
class Nullable(TestModel):
nullable_cf = CharField(null=True)
nullable_if = IntegerField(null=True)
class RelModel(TestModel):
col_types = ForeignKeyField(ColTypes, backref='foo')
col_types_nullable = ForeignKeyField(ColTypes, null=True)
class FKPK(TestModel):
col_types = ForeignKeyField(ColTypes, primary_key=True)
class Underscores(TestModel):
_id = AutoField()
_name = CharField()
class Category(TestModel):
name = CharField(max_length=10)
parent = ForeignKeyField('self', null=True)
class Nugget(TestModel):
category_id = ForeignKeyField(Category, column_name='category_id')
category = CharField()
class BaseReflectionTestCase(ModelTestCase):
def setUp(self):
super(BaseReflectionTestCase, self).setUp()
self.introspector = Introspector.from_database(self.database)
class TestReflection(BaseReflectionTestCase):
requires = [ColTypes, Nullable, RelModel, FKPK, Underscores, Category,
Nugget]
def test_generate_models(self):
models = self.introspector.generate_models()
self.assertTrue(set((
'category',
'col_types',
'fkpk',
'nugget',
'nullable',
'rel_model',
'underscores')).issubset(set(models)))
def assertIsInstance(obj, klass):
self.assertTrue(isinstance(obj, klass))
category = models['category']
self.assertEqual(
sorted(category._meta.fields),
['id', 'name', 'parent'])
assertIsInstance(category.id, AutoField)
assertIsInstance(category.name, CharField)
assertIsInstance(category.parent, ForeignKeyField)
self.assertEqual(category.parent.rel_model, category)
fkpk = models['fkpk']
self.assertEqual(sorted(fkpk._meta.fields), ['col_types'])
assertIsInstance(fkpk.col_types, ForeignKeyField)
self.assertEqual(fkpk.col_types.rel_model, models['col_types'])
self.assertTrue(fkpk.col_types.primary_key)
relmodel = models['rel_model']
self.assertEqual(
sorted(relmodel._meta.fields),
['col_types', 'col_types_nullable', 'id'])
assertIsInstance(relmodel.col_types, ForeignKeyField)
assertIsInstance(relmodel.col_types_nullable, ForeignKeyField)
self.assertFalse(relmodel.col_types.null)
self.assertTrue(relmodel.col_types_nullable.null)
self.assertEqual(relmodel.col_types.rel_model,
models['col_types'])
self.assertEqual(relmodel.col_types_nullable.rel_model,
models['col_types'])
@requires_sqlite
def test_generate_models_indexes(self):
models = self.introspector.generate_models()
self.assertEqual(models['fkpk']._meta.indexes, [])
self.assertEqual(models['rel_model']._meta.indexes, [])
self.assertEqual(models['category']._meta.indexes, [])
col_types = models['col_types']
indexed = set(['f1'])
unique = set(['f10'])
for field in col_types._meta.sorted_fields:
self.assertEqual(field.index, field.name in indexed)
self.assertEqual(field.unique, field.name in unique)
indexes = col_types._meta.indexes
self.assertEqual(sorted(indexes), [
(['f10', 'f11'], True),
(['f11', 'f8', 'f13'], False),
])
def test_table_subset(self):
models = self.introspector.generate_models(table_names=[
'category',
'col_types',
'foobarbaz'])
self.assertEqual(sorted(models.keys()), ['category', 'col_types'])
@requires_sqlite
def test_sqlite_fk_re(self):
user_id_tests = [
'FOREIGN KEY("user_id") REFERENCES "users"("id")',
'FOREIGN KEY(user_id) REFERENCES users(id)',
'FOREIGN KEY ([user_id]) REFERENCES [users] ([id])',
'"user_id" NOT NULL REFERENCES "users" ("id")',
'user_id not null references users (id)',
]
fk_pk_tests = [
('"col_types_id" INTEGER NOT NULL PRIMARY KEY REFERENCES '
'"coltypes" ("f11")'),
'FOREIGN KEY ("col_types_id") REFERENCES "coltypes" ("f11")',
]
regex = SqliteMetadata.re_foreign_key
for test in user_id_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'user_id', 'users', 'id',
))
for test in fk_pk_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'col_types_id', 'coltypes', 'f11',
))
def test_make_column_name(self):
# Tests for is_foreign_key=False.
tests = (
('Column', 'column'),
('Foo_iD', 'foo_id'),
('foo_id', 'foo_id'),
('foo_id_id', 'foo_id_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_id'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name), expected)
# Tests for is_foreign_key=True.
tests = (
('Foo_iD', 'foo'),
('foo_id', 'foo'),
('foo_id_id', 'foo_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name, True), expected)
def test_make_model_name(self):
tests = (
('Table', 'Table'),
('table', 'Table'),
('table_baz', 'TableBaz'),
('foo__bar__baz2', 'FooBarBaz2'),
('foo12_3', 'Foo123'),
)
for table_name, expected in tests:
self.assertEqual(
self.introspector.make_model_name(table_name), expected)
def test_col_types(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', (BigIntegerField, IntegerField), False),
# There do not appear to be separate constants for the blob and
# text field types in MySQL's drivers. See GH#1034.
('f2', (BlobField, TextField), False),
('f3', (BooleanField, IntegerField), False),
('f4', CharField, False),
('f5', DateField, False),
('f6', DateTimeField, False),
('f7', DecimalField, False),
('f8', (DoubleField, FloatField), False),
('f9', FloatField, False),
('f10', IntegerField, False),
('f11', AutoField, False),
('f12', TextField, False),
('f13', TimeField, False))),
('rel_model', (
('col_types_id', ForeignKeyField, False),
('col_types_nullable_id', ForeignKeyField, True))),
('nugget', (
('category_id', ForeignKeyField, False),
('category', CharField, False))),
('nullable', (
('nullable_cf', CharField, True),
('nullable_if', IntegerField, True))),
('fkpk', (
('col_types_id', ForeignKeyField, False),)),
('underscores', (
('_id', AutoField, False),
('_name', CharField, False))),
('category', (
('name', CharField, False),
('parent_id', ForeignKeyField, True))),
)
for table_name, expected_columns in expected:
introspected_columns = columns[table_name]
for field_name, field_class, is_null in expected_columns:
if not isinstance(field_class, (list, tuple)):
field_class = (field_class,)
column = introspected_columns[field_name]
self.assertTrue(column.field_class in field_class,
"%s in %s" % (column.field_class, field_class))
self.assertEqual(column.nullable, is_null)
def test_foreign_keys(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
self.assertEqual(foreign_keys['col_types'], [])
rel_model = foreign_keys['rel_model']
self.assertEqual(len(rel_model), 2)
fkpk = foreign_keys['fkpk']
self.assertEqual(len(fkpk), 1)
fkpk_fk = fkpk[0]
self.assertEqual(fkpk_fk.table, 'fkpk')
self.assertEqual(fkpk_fk.column, 'col_types_id')
self.assertEqual(fkpk_fk.dest_table, 'col_types')
self.assertEqual(fkpk_fk.dest_column, 'f11')
category = foreign_keys['category']
self.assertEqual(len(category), 1)
category_fk = category[0]
self.assertEqual(category_fk.table, 'category')
self.assertEqual(category_fk.column, 'parent_id')
self.assertEqual(category_fk.dest_table, 'category')
self.assertEqual(category_fk.dest_column, 'id')
def test_table_names(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
names = (
('col_types', 'ColTypes'),
('nullable', 'Nullable'),
('rel_model', 'RelModel'),
('fkpk', 'Fkpk'))
for k, v in names:
self.assertEqual(model_names[k], v)
def test_column_meta(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
rel_model = columns['rel_model']
col_types_id = rel_model['col_types_id']
self.assertEqual(col_types_id.get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'field': "'f11'",
})
col_types_nullable_id = rel_model['col_types_nullable_id']
self.assertEqual(col_types_nullable_id.get_field_parameters(), {
'column_name': "'col_types_nullable_id'",
'null': True,
'backref': "'col_types_col_types_nullable_set'",
'model': 'ColTypes',
'field': "'f11'",
})
fkpk = columns['fkpk']
self.assertEqual(fkpk['col_types_id'].get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'primary_key': True,
'field': "'f11'"})
category = columns['category']
parent_id = category['parent_id']
self.assertEqual(parent_id.get_field_parameters(), {
'column_name': "'parent_id'",
'null': True,
'model': "'self'",
'field': "'id'",
})
nugget = columns['nugget']
category_fk = nugget['category_id']
self.assertEqual(category_fk.name, 'category_id')
self.assertEqual(category_fk.get_field_parameters(), {
'field': "'id'",
'model': 'Category',
'column_name': "'category_id'",
})
category = nugget['category']
self.assertEqual(category.name, 'category')
def test_get_field(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', ('f1 = BigIntegerField(index=True)',
'f1 = IntegerField(index=True)')),
('f2', ('f2 = BlobField()', 'f2 = TextField()')),
('f4', 'f4 = CharField()'),
('f5', 'f5 = DateField()'),
('f6', 'f6 = DateTimeField()'),
('f7', 'f7 = DecimalField()'),
('f10', 'f10 = IntegerField(unique=True)'),
('f11', 'f11 = AutoField()'),
('f12', ('f12 = TextField()', 'f12 = BlobField()')),
('f13', 'f13 = TimeField()'),
)),
('nullable', (
('nullable_cf', 'nullable_cf = '
'CharField(null=True)'),
('nullable_if', 'nullable_if = IntegerField(null=True)'),
)),
('fkpk', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes, "
'primary_key=True)'),
)),
('nugget', (
('category_id', 'category_id = ForeignKeyField('
"column_name='category_id', field='id', model=Category)"),
('category', 'category = CharField()'),
)),
('rel_model', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes)"),
('col_types_nullable_id', 'col_types_nullable = '
"ForeignKeyField(backref='col_types_col_types_nullable_set', "
"column_name='col_types_nullable_id', field='f11', "
'model=ColTypes, null=True)'),
)),
('underscores', (
('_id', '_id = AutoField()'),
('_name', '_name = CharField()'),
)),
('category', (
('name', 'name = CharField()'),
('parent_id', 'parent = ForeignKeyField('
"column_name='parent_id', field='id', model='self', "
'null=True)'),
)),
)
for table, field_data in expected:
for field_name, fields in field_data:
if not isinstance(fields, tuple):
fields = (fields,)
actual = columns[table][field_name].get_field()
self.assertTrue(actual in fields,
'%s not in %s' % (actual, fields))
class EventLog(TestModel):
data = CharField(constraints=[SQL('DEFAULT \'\'')])
timestamp = DateTimeField(constraints=[SQL('DEFAULT current_timestamp')])
flags = IntegerField(constraints=[SQL('DEFAULT 0')])
misc = TextField(constraints=[SQL('DEFAULT \'foo\'')])
class DefaultVals(TestModel):
key = CharField(constraints=[SQL('DEFAULT \'foo\'')])
value = IntegerField(constraints=[SQL('DEFAULT 0')])
class Meta:
primary_key = CompositeKey('key', 'value')
class TestReflectDefaultValues(BaseReflectionTestCase):
requires = [DefaultVals, EventLog]
@requires_sqlite
def test_default_values(self):
models = self.introspector.generate_models()
default_vals = models['default_vals']
create_table = (
'CREATE TABLE IF NOT EXISTS "default_vals" ('
'"key" VARCHAR(255) NOT NULL DEFAULT \'foo\', '
'"value" INTEGER NOT NULL DEFAULT 0, '
'PRIMARY KEY ("key", "value"))')
# Re-create table using the introspected schema.
self.assertSQL(default_vals._schema._create_table(), create_table, [])
default_vals.drop_table()
default_vals.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
default_vals = models['default_vals']
self.assertSQL(default_vals._schema._create_table(), create_table, [])
@requires_sqlite
def test_default_values_extended(self):
models = self.introspector.generate_models()
eventlog = models['event_log']
create_table = (
'CREATE TABLE IF NOT EXISTS "event_log" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"data" VARCHAR(255) NOT NULL DEFAULT \'\', '
'"timestamp" DATETIME NOT NULL DEFAULT current_timestamp, '
'"flags" INTEGER NOT NULL DEFAULT 0, '
'"misc" TEXT NOT NULL DEFAULT \'foo\')')
# Re-create table using the introspected schema.
self.assertSQL(eventlog._schema._create_table(), create_table, [])
eventlog.drop_table()
eventlog.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
eventlog = models['event_log']
self.assertSQL(eventlog._schema._create_table(), create_table, [])
class TestReflectionDependencies(BaseReflectionTestCase):
requires = [User, Tweet]
def test_generate_dependencies(self):
models = self.introspector.generate_models(table_names=['tweet'])
self.assertEqual(set(models), set(('users', 'tweet')))
IUser = models['users']
ITweet = models['tweet']
self.assertEqual(set(ITweet._meta.fields), set((
'id', 'user', 'content', 'timestamp')))
self.assertEqual(set(IUser._meta.fields), set(('id', 'username')))
self.assertTrue(ITweet.user.rel_model is IUser)
self.assertTrue(ITweet.user.rel_field is IUser.id)
def test_ignore_backrefs(self):
models = self.introspector.generate_models(table_names=['users'])
self.assertEqual(set(models), set(('users',)))
class Note(TestModel):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
status = IntegerField()
class TestReflectViews(BaseReflectionTestCase):
requires = [Note]
def setUp(self):
super(TestReflectViews, self).setUp()
self.database.execute_sql('CREATE VIEW notes_public AS '
'SELECT content, timestamp FROM note '
'WHERE status = 1 ORDER BY timestamp DESC')
def tearDown(self):
self.database.execute_sql('DROP VIEW notes_public')
super(TestReflectViews, self).tearDown()
def test_views_ignored_default(self):
models = self.introspector.generate_models()
self.assertFalse('notes_public' in models)
def test_introspect_view(self):
models = self.introspector.generate_models(include_views=True)
self.assertTrue('notes_public' in models)
NotesPublic = models['notes_public']
self.assertEqual(sorted(NotesPublic._meta.fields),
['content', 'timestamp'])
self.assertTrue(isinstance(NotesPublic.content, TextField))
self.assertTrue(isinstance(NotesPublic.timestamp, DateTimeField))
@skip_if(IS_SQLITE_OLD)
def test_introspect_view_integration(self):
for i, (ct, st) in enumerate([('n1', 1), ('n2', 2), ('n3', 1)]):
Note.create(content=ct, status=st,
timestamp=datetime.datetime(2018, 1, 1 + i))
NP = self.introspector.generate_models(
table_names=['notes_public'], include_views=True)['notes_public']
self.assertEqual([(np.content, np.timestamp) for np in NP.select()], [
('n3', datetime.datetime(2018, 1, 3)),
('n1', datetime.datetime(2018, 1, 1))])
class Event(TestModel):
key = TextField()
timestamp = DateTimeField(index=True)
metadata = TextField(default='')
class TestInteractiveHelpers(ModelTestCase):
requires = [Category, Event]
def test_generate_models(self):
M = generate_models(self.database)
self.assertTrue('category' in M)
self.assertTrue('event' in M)
def assertFields(m, expected):
actual = [(f.name, f.field_type) for f in m._meta.sorted_fields]
self.assertEqual(actual, expected)
assertFields(M['category'], [('id', 'AUTO'), ('name', 'VARCHAR'),
('parent', 'INT')])
assertFields(M['event'], [
('id', 'AUTO'),
('key', 'TEXT'),
('timestamp', 'DATETIME'),
('metadata', 'TEXT')])
| 35.283557 | 79 | 0.567169 | [
"MIT"
] | Abhishek5101/peewee | tests/reflection.py | 21,029 | Python |
#!/usr/bin/env python
import datetime
import logging
import os
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# https://www2.ed.gov/about/offices/list/oig/areports.html
# Oldest report: 1995
# options:
# standard since/year options for a year range to fetch from.
# report_id: limit to a single report
#
# Notes for IG's web team:
# - Fix the row for A17C0008 on
# http://www2.ed.gov/about/offices/list/oig/areports2003.html
# - Fix the published date for A06K0003
# on http://www2.ed.gov/about/offices/list/oig/areports2011.html
# - Multiple reports on http://www2.ed.gov/about/offices/list/oig/ireports.html
# say that they were published in 'Decemver' or 'Deccember' instead of 'December'
AUDIT_REPORTS_URL = "https://www2.ed.gov/about/offices/list/oig/areports{}.html"
SEMIANNUAL_REPORTS_URL = "https://www2.ed.gov/about/offices/list/oig/sarpages.html"
INSPECTION_REPORTS_URL = "https://www2.ed.gov/about/offices/list/oig/aireports.html"
INVESTIGATIVE_REPORTS_URL = "https://www2.ed.gov/about/offices/list/oig/ireports.html"
CONGRESSIONAL_TESTIMONY_URL = "https://www2.ed.gov/about/offices/list/oig/testimon.html"
SPECIAL_REPORTS_URL = "https://www2.ed.gov/about/offices/list/oig/specialreportstocongress.html"
OTHER_REPORTS_URL = "https://www2.ed.gov/about/offices/list/oig/otheroigproducts.html"
OTHER_REPORTS_URL = [OTHER_REPORTS_URL, SPECIAL_REPORTS_URL, CONGRESSIONAL_TESTIMONY_URL, INVESTIGATIVE_REPORTS_URL, INSPECTION_REPORTS_URL]
REPORT_PUBLISHED_MAP = {
"statelocal032002": datetime.datetime(2002, 3, 21),
"statloc082001": datetime.datetime(2001, 8, 3),
"A17B0006": datetime.datetime(2002, 2, 27),
"A17A0002": datetime.datetime(2001, 2, 28),
"A1790019": datetime.datetime(2000, 2, 28), # Approximation
"A17C0008": datetime.datetime(2003, 1, 31),
"PESMemo": datetime.datetime(2001, 1, 1), # Approximation
"s1370001": datetime.datetime(1999, 3, 18),
"oigqualitystandardsforalternativeproducts": datetime.datetime(2010, 3, 11),
}
def run(options):
year_range = inspector.year_range(options)
# optional: limit to a single report
report_id = options.get("report_id")
# Get the audit reports
for year in year_range:
url = audit_url_for(year)
doc = beautifulsoup_from_url(url)
agency_tables = doc.find_all("table", {"border": 1})
for agency_table in agency_tables:
results = agency_table.select("tr")
for index, result in enumerate(results):
if not index:
# First row is the header
continue
report = audit_report_from(result, url, year_range)
if report:
# optional: filter to a single report
if report_id and (report_id != report['report_id']):
continue
inspector.save_report(report)
# Get semiannual reports
doc = beautifulsoup_from_url(SEMIANNUAL_REPORTS_URL)
table = doc.find("table", {"border": 1})
for index, result in enumerate(table.select("tr")):
if index < 2:
# The first two rows are headers
continue
report = semiannual_report_from(result, SEMIANNUAL_REPORTS_URL, year_range)
if report:
# optional: filter to a single report
if report_id and (report_id != report['report_id']):
continue
inspector.save_report(report)
# Get other reports
for url in OTHER_REPORTS_URL:
doc = beautifulsoup_from_url(url)
results = doc.select("div.contentText ul li")
for result in results:
report = report_from(result, url, year_range)
if report:
# optional: filter to a single report
if report_id and (report_id != report['report_id']):
continue
inspector.save_report(report)
def audit_report_from(result, page_url, year_range):
if not result.text.strip():
# Just an empty row
return
title = result.select("td")[0].text.strip()
report_url = urljoin(page_url, result.select("td a")[0].get('href'))
report_id = None
if len(result.select("td")) != 3:
report_id = result.select("td")[1].text.strip()
if not report_id:
report_filename = report_url.split("/")[-1]
report_id, extension = os.path.splitext(report_filename)
if report_id in REPORT_PUBLISHED_MAP:
published_on = REPORT_PUBLISHED_MAP[report_id]
else:
# See notes to the IG Web team for some of this
published_on_text = result.select("td")[2].text.strip().replace(")", "").replace("//", "/")
date_formats = ['%m/%d/%Y', '%m/%d/%y', '%m/%Y']
published_on = None
for date_format in date_formats:
try:
published_on = datetime.datetime.strptime(published_on_text, date_format)
except ValueError:
pass
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'education',
'inspector_url': 'https://www2.ed.gov/about/offices/list/oig/',
'agency': 'education',
'agency_name': "Department of Education",
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
def audit_url_for(year):
if year < 1998:
# This is the first listed year
year = 1998
if year == 2001:
# This one needs a capital A. Yup.
return "https://www2.ed.gov/about/offices/list/oig/Areports2001.html"
if year == datetime.datetime.today().year:
# The current year is on the main page
return AUDIT_REPORTS_URL.format("")
else:
return AUDIT_REPORTS_URL.format(year)
def semiannual_report_from(result, page_url, year_range):
report_url = urljoin(page_url, result.select("a")[0].get('href'))
report_filename = report_url.split("/")[-1]
report_id, extension = os.path.splitext(report_filename)
date_range_text = result.select("td")[1].text
title = "Semiannual Report - {}".format(date_range_text)
published_on_text = date_range_text.split("-")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
report = {
'inspector': 'education',
'inspector_url': 'https://www2.ed.gov/about/offices/list/oig/',
'agency': 'education',
'agency_name': "Department of Education",
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
def report_from(result, url, year_range):
report_url = urljoin(url, result.select("a")[0].get('href'))
report_filename = report_url.split("/")[-1]
report_id, extension = os.path.splitext(report_filename)
title = result.text.split(" ACN:")[0]
if report_id in REPORT_PUBLISHED_MAP:
published_on = REPORT_PUBLISHED_MAP[report_id]
else:
result_text = result.text.replace(",", "").replace("\n", " ")
result_text = " ".join(result_text.split()) # Remove any double spaces
result_text = result_text.replace("Decemver", "December").replace("Deccember", "December") # See note to IG Web team
try:
published_on_text = "/".join(re.search("(\d+)/(\d+)/(\d+)", result_text).groups())
published_on = datetime.datetime.strptime(published_on_text, '%m/%d/%Y')
except AttributeError:
try:
published_on_text = "/".join(re.search("(\d+)/(\d+)", result_text).groups())
published_on = datetime.datetime.strptime(published_on_text, '%m/%Y')
except AttributeError:
try:
published_on_text = "/".join(re.search("(\w+) (\d+) (\d+)", result_text).groups())
published_on = datetime.datetime.strptime(published_on_text, '%B/%d/%Y')
except AttributeError:
published_on_text = "/".join(re.search("(\w+) (\d+)", result_text).groups())
published_on = datetime.datetime.strptime(published_on_text, '%B/%Y')
report = {
'inspector': 'education',
'inspector_url': 'https://www2.ed.gov/about/offices/list/oig/',
'agency': 'education',
'agency_name': "Department of Education",
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
def beautifulsoup_from_url(url):
body = utils.download(url)
return BeautifulSoup(body)
utils.run(run) if (__name__ == "__main__") else None | 36.734513 | 140 | 0.687907 | [
"CC0-1.0"
] | crdunwel/inspectors-general | inspectors/education.py | 8,302 | Python |
import os
from io import StringIO
from pathlib import Path
from quom import Quom
from quom.__main__ import main
FILE_MAIN_HPP = """
int foo = 3;
int foo();
"""
FILE_MAIN_CPP = """
int foo() { return 42; }
"""
RESULT = """
int foo = 3;
int foo();
int foo() { return 42; }
"""
def test_source_directory(fs):
os.makedirs('project/')
os.chdir('project/')
os.makedirs('include/')
os.makedirs('src/')
with open('include/main.hpp', 'w+') as file:
file.write(FILE_MAIN_HPP)
with open('src/main.cpp', 'w+') as file:
file.write(FILE_MAIN_CPP)
dst = StringIO()
Quom(Path('include/main.hpp'), dst)
assert dst.getvalue() != RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, relative_source_directories=[Path('../src')])
assert dst.getvalue() == RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, source_directories=[Path('src').resolve()])
assert dst.getvalue() == RESULT
dst = StringIO()
Quom(Path('include/main.hpp'), dst, source_directories=[Path('/project/src')])
assert dst.getvalue() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', './../src'])
assert Path('result.hpp').read_text() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', 'src'])
assert Path('result.hpp').read_text() == RESULT
main(['include/main.hpp', 'result.hpp', '-S', '/project/src'])
assert Path('result.hpp').read_text() == RESULT
| 23.111111 | 85 | 0.618132 | [
"MIT"
] | Chaoses-Ib/quom | tests/test_quom/test_source_directory.py | 1,456 | Python |
from combat import Combat, RecursiveCombat
with open("test_input.txt") as f:
game = Combat.parse_from_file(f)
f.seek(0)
recursive_game = RecursiveCombat.parse_from_file(f)
# 1st round
round_1 = game[1]
assert round_1.player_1==[2, 6, 3, 1, 9, 5]
assert round_1.player_2==[8, 4, 7, 10]
# 28th round
round_28 = game[27]
assert round_28.player_1==[4, 1]
assert round_28.player_2==[9, 7, 3, 2, 10, 6, 8, 5]
# error if checking score/winner before end
caught_error = False
try:
game[12].score
except ValueError:
caught_error = True
assert caught_error
caught_error = False
try:
game[8].score
except ValueError:
caught_error = True
assert caught_error
# end
end = game.play()
assert end.player_1==[]
assert end.player_2==[3, 2, 10, 6, 8, 5, 9, 4, 7, 1]
assert end.score==306
assert end.winner==1
# recursive game
end = recursive_game.play()
assert end.score==291
| 18.326531 | 55 | 0.699332 | [
"MIT"
] | DallogFheir/aoc-2020 | day-22/test.py | 898 | Python |
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Replaces GN files in tree with files from here that
make the build use system libraries.
"""
from __future__ import print_function
import argparse
import os
import shutil
import sys
REPLACEMENTS = {
'ffmpeg': 'third_party/ffmpeg/BUILD.gn',
'flac': 'third_party/flac/BUILD.gn',
'harfbuzz-ng': 'third_party/harfbuzz-ng/BUILD.gn',
'icu': 'third_party/icu/BUILD.gn',
'libevent': 'base/third_party/libevent/BUILD.gn',
'libjpeg': 'build/secondary/third_party/libjpeg_turbo/BUILD.gn',
'libpng': 'third_party/libpng/BUILD.gn',
'libvpx': 'third_party/libvpx/BUILD.gn',
'libwebp': 'third_party/libwebp/BUILD.gn',
'libxml': 'third_party/libxml/BUILD.gn',
'libxslt': 'third_party/libxslt/BUILD.gn',
're2': 'third_party/re2/BUILD.gn',
'snappy': 'third_party/snappy/BUILD.gn',
'yasm': 'third_party/yasm/yasm_assemble.gni',
'zlib': 'third_party/zlib/BUILD.gn',
}
def DoMain(argv):
my_dirname = os.path.dirname(__file__)
source_tree_root = os.path.abspath(
os.path.join(my_dirname, '..', '..', '..'))
parser = argparse.ArgumentParser()
parser.add_argument('--system-libraries', nargs='*', default=[])
parser.add_argument('--undo', action='store_true')
args = parser.parse_args(argv)
handled_libraries = set()
for lib, path in REPLACEMENTS.items():
if lib not in args.system_libraries:
continue
handled_libraries.add(lib)
if args.undo:
# Restore original file, and also remove the backup.
# This is meant to restore the source tree to its original state.
os.rename(os.path.join(source_tree_root, path + '.orig'),
os.path.join(source_tree_root, path))
else:
# Create a backup copy for --undo.
shutil.copyfile(os.path.join(source_tree_root, path),
os.path.join(source_tree_root, path + '.orig'))
# Copy the GN file from directory of this script to target path.
shutil.copyfile(os.path.join(my_dirname, '%s.gn' % lib),
os.path.join(source_tree_root, path))
unhandled_libraries = set(args.system_libraries) - handled_libraries
if unhandled_libraries:
print('Unrecognized system libraries requested: %s' % ', '.join(
sorted(unhandled_libraries)), file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(DoMain(sys.argv[1:]))
| 31.5 | 72 | 0.687698 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | Cela-Inc/WebARonARCore | build/linux/unbundle/replace_gn_files.py | 2,520 | Python |
from random import choice, randint, sample, shuffle
from ga4stpg import graph
from ga4stpg.edgeset import EdgeSet
from ga4stpg.graph import UGraph
from ga4stpg.graph.disjointsets import DisjointSets
from ga4stpg.graph.priorityqueue import PriorityQueue
class MutatitionReplaceByLowerEdge:
def __init__(self, stpg):
self.stpg = stpg
def __call__(self, chromosome : EdgeSet):
assert isinstance(chromosome, EdgeSet), f'Chromosome must be EdgeSet type: Given was <{type(chromosome)}>'
graph = self.stpg.graph
disjoints = DisjointSets()
candidates = PriorityQueue()
result = EdgeSet()
for v in chromosome.vertices:
disjoints.make_set(v)
index = randint(0, len(chromosome))
for i, edge in enumerate(chromosome):
u, v = edge
if i == index:
candidates.push(graph.weight(u,v), (u,v))
else:
disjoints.union(u, v)
result.add(u, v)
components = disjoints.get_disjoint_sets()
lesser_idx = min(components, key=lambda item: len(components[item]))
keys = components.keys() - set([lesser_idx])
for key in keys:
for v in components[lesser_idx]:
for w in graph.adjacent_to(v):
if w in components[key]:
candidates.push(graph.weight(w, v), (v, w))
while len(disjoints.get_disjoint_sets()) >= 2 or candidates:
w, v = candidates.pop()
if disjoints.find(w) != disjoints.find(v):
result.add(w, v)
disjoints.union(w,v)
return result
class MutationReplaceByRandomEdge:
def __init__(self, stpg) -> None:
self.stpg = stpg
def __call__(self, chromosome : EdgeSet):
assert isinstance(chromosome, EdgeSet), f'Chromosome must be EdgeSet type: Given was <{type(chromosome)}>'
graph = self.stpg.graph
disjoints = DisjointSets()
result = EdgeSet()
for v in chromosome.vertices:
disjoints.make_set(v)
index = randint(0, len(chromosome))
for i, edge in enumerate(chromosome):
if i != index :
v, u = edge
disjoints.union(v, u)
result.add(v, u)
components = disjoints.get_disjoint_sets()
lesser_idx = min(components, key=lambda item: len(components[item]))
lesser = components[lesser_idx]
keys = components.keys() - set([lesser_idx])
for key in keys:
candidates = list()
greater_component = components[key]
for v in lesser:
for w in graph.adjacent_to(v):
if w in greater_component:
candidates.append((v, w))
while candidates:
shuffle(candidates)
v, w = candidates.pop()
if disjoints.find(v) != disjoints.find(w):
result.add(v, w)
disjoints.union(v, w)
break
# if len(disjoints.get_disjoint_sets()) >= 2:
# result.add(selected_edge)
return result
| 32.480392 | 115 | 0.55086 | [
"MIT"
] | GiliardGodoi/ppgi-stpg-gpx | ga4stpg/edgeset/mutate.py | 3,313 | Python |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX runner for Kubeflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from typing import Callable, Dict, List, Optional, Text, Type, cast
from absl import logging
from kfp import compiler
from kfp import dsl
from kfp import gcp
from kubernetes import client as k8s_client
from tfx import version
from tfx.dsl.compiler import compiler as tfx_compiler
from tfx.dsl.components.base import base_component as tfx_base_component
from tfx.orchestration import data_types
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.kubeflow import base_component
from tfx.orchestration.kubeflow import utils
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
from tfx.orchestration.launcher import kubernetes_component_launcher
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import json_utils
from tfx.utils import telemetry_utils
# OpFunc represents the type of a function that takes as input a
# dsl.ContainerOp and returns the same object. Common operations such as adding
# k8s secrets, mounting volumes, specifying the use of TPUs and so on can be
# specified as an OpFunc.
# See example usage here:
# https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/gcp.py
OpFunc = Callable[[dsl.ContainerOp], dsl.ContainerOp]
# Default secret name for GCP credentials. This secret is installed as part of
# a typical Kubeflow installation when the component is GKE.
_KUBEFLOW_GCP_SECRET_NAME = 'user-gcp-sa'
# Default TFX container image to use in KubeflowDagRunner.
DEFAULT_KUBEFLOW_TFX_IMAGE = 'tensorflow/tfx:%s' % (version.__version__,)
def _mount_config_map_op(config_map_name: Text) -> OpFunc:
"""Mounts all key-value pairs found in the named Kubernetes ConfigMap.
All key-value pairs in the ConfigMap are mounted as environment variables.
Args:
config_map_name: The name of the ConfigMap resource.
Returns:
An OpFunc for mounting the ConfigMap.
"""
def mount_config_map(container_op: dsl.ContainerOp):
config_map_ref = k8s_client.V1ConfigMapEnvSource(
name=config_map_name, optional=True)
container_op.container.add_env_from(
k8s_client.V1EnvFromSource(config_map_ref=config_map_ref))
return mount_config_map
def _mount_secret_op(secret_name: Text) -> OpFunc:
"""Mounts all key-value pairs found in the named Kubernetes Secret.
All key-value pairs in the Secret are mounted as environment variables.
Args:
secret_name: The name of the Secret resource.
Returns:
An OpFunc for mounting the Secret.
"""
def mount_secret(container_op: dsl.ContainerOp):
secret_ref = k8s_client.V1ConfigMapEnvSource(
name=secret_name, optional=True)
container_op.container.add_env_from(
k8s_client.V1EnvFromSource(secret_ref=secret_ref))
return mount_secret
def get_default_pipeline_operator_funcs(
use_gcp_sa: bool = False) -> List[OpFunc]:
"""Returns a default list of pipeline operator functions.
Args:
use_gcp_sa: If true, mount a GCP service account secret to each pod, with
the name _KUBEFLOW_GCP_SECRET_NAME.
Returns:
A list of functions with type OpFunc.
"""
# Enables authentication for GCP services if needed.
gcp_secret_op = gcp.use_gcp_secret(_KUBEFLOW_GCP_SECRET_NAME)
# Mounts configmap containing Metadata gRPC server configuration.
mount_config_map_op = _mount_config_map_op('metadata-grpc-configmap')
if use_gcp_sa:
return [gcp_secret_op, mount_config_map_op]
else:
return [mount_config_map_op]
def get_default_kubeflow_metadata_config(
) -> kubeflow_pb2.KubeflowMetadataConfig:
"""Returns the default metadata connection config for Kubeflow.
Returns:
A config proto that will be serialized as JSON and passed to the running
container so the TFX component driver is able to communicate with MLMD in
a Kubeflow cluster.
"""
# The default metadata configuration for a Kubeflow Pipelines cluster is
# codified as a Kubernetes ConfigMap
# https://github.com/kubeflow/pipelines/blob/master/manifests/kustomize/base/metadata/metadata-grpc-configmap.yaml
config = kubeflow_pb2.KubeflowMetadataConfig()
# The environment variable to use to obtain the Metadata gRPC service host in
# the cluster that is backing Kubeflow Metadata. Note that the key in the
# config map and therefore environment variable used, are lower-cased.
config.grpc_config.grpc_service_host.environment_variable = 'METADATA_GRPC_SERVICE_HOST'
# The environment variable to use to obtain the Metadata grpc service port in
# the cluster that is backing Kubeflow Metadata.
config.grpc_config.grpc_service_port.environment_variable = 'METADATA_GRPC_SERVICE_PORT'
return config
def get_default_pod_labels() -> Dict[Text, Text]:
"""Returns the default pod label dict for Kubeflow."""
# KFP default transformers add pod env:
# https://github.com/kubeflow/pipelines/blob/0.1.32/sdk/python/kfp/compiler/_default_transformers.py
result = {
'add-pod-env': 'true',
telemetry_utils.LABEL_KFP_SDK_ENV: 'tfx'
}
return result
def get_default_output_filename(pipeline_name: str) -> str:
return pipeline_name + '.tar.gz'
class KubeflowDagRunnerConfig(pipeline_config.PipelineConfig):
"""Runtime configuration parameters specific to execution on Kubeflow."""
def __init__(
self,
pipeline_operator_funcs: Optional[List[OpFunc]] = None,
tfx_image: Optional[Text] = None,
kubeflow_metadata_config: Optional[
kubeflow_pb2.KubeflowMetadataConfig] = None,
# TODO(b/143883035): Figure out the best practice to put the
# SUPPORTED_LAUNCHER_CLASSES
supported_launcher_classes: List[Type[
base_component_launcher.BaseComponentLauncher]] = None,
**kwargs):
"""Creates a KubeflowDagRunnerConfig object.
The user can use pipeline_operator_funcs to apply modifications to
ContainerOps used in the pipeline. For example, to ensure the pipeline
steps mount a GCP secret, and a Persistent Volume, one can create config
object like so:
from kfp import gcp, onprem
mount_secret_op = gcp.use_secret('my-secret-name)
mount_volume_op = onprem.mount_pvc(
"my-persistent-volume-claim",
"my-volume-name",
"/mnt/volume-mount-path")
config = KubeflowDagRunnerConfig(
pipeline_operator_funcs=[mount_secret_op, mount_volume_op]
)
Args:
pipeline_operator_funcs: A list of ContainerOp modifying functions that
will be applied to every container step in the pipeline.
tfx_image: The TFX container image to use in the pipeline.
kubeflow_metadata_config: Runtime configuration to use to connect to
Kubeflow metadata.
supported_launcher_classes: A list of component launcher classes that are
supported by the current pipeline. List sequence determines the order in
which launchers are chosen for each component being run.
**kwargs: keyword args for PipelineConfig.
"""
supported_launcher_classes = supported_launcher_classes or [
in_process_component_launcher.InProcessComponentLauncher,
kubernetes_component_launcher.KubernetesComponentLauncher,
]
super(KubeflowDagRunnerConfig, self).__init__(
supported_launcher_classes=supported_launcher_classes, **kwargs)
self.pipeline_operator_funcs = (
pipeline_operator_funcs or get_default_pipeline_operator_funcs())
self.tfx_image = tfx_image or DEFAULT_KUBEFLOW_TFX_IMAGE
self.kubeflow_metadata_config = (
kubeflow_metadata_config or get_default_kubeflow_metadata_config())
class KubeflowDagRunner(tfx_runner.TfxRunner):
"""Kubeflow Pipelines runner.
Constructs a pipeline definition YAML file based on the TFX logical pipeline.
"""
def __init__(
self,
output_dir: Optional[Text] = None,
output_filename: Optional[Text] = None,
config: Optional[KubeflowDagRunnerConfig] = None,
pod_labels_to_attach: Optional[Dict[Text, Text]] = None
):
"""Initializes KubeflowDagRunner for compiling a Kubeflow Pipeline.
Args:
output_dir: An optional output directory into which to output the pipeline
definition files. Defaults to the current working directory.
output_filename: An optional output file name for the pipeline definition
file. Defaults to pipeline_name.tar.gz when compiling a TFX pipeline.
Currently supports .tar.gz, .tgz, .zip, .yaml, .yml formats. See
https://github.com/kubeflow/pipelines/blob/181de66cf9fa87bcd0fe9291926790c400140783/sdk/python/kfp/compiler/compiler.py#L851
for format restriction.
config: An optional KubeflowDagRunnerConfig object to specify runtime
configuration when running the pipeline under Kubeflow.
pod_labels_to_attach: Optional set of pod labels to attach to GKE pod
spinned up for this pipeline. Default to the 3 labels:
1. add-pod-env: true,
2. pipeline SDK type,
3. pipeline unique ID,
where 2 and 3 are instrumentation of usage tracking.
"""
if config and not isinstance(config, KubeflowDagRunnerConfig):
raise TypeError('config must be type of KubeflowDagRunnerConfig.')
super(KubeflowDagRunner, self).__init__(config or KubeflowDagRunnerConfig())
self._config = cast(KubeflowDagRunnerConfig, self._config)
self._output_dir = output_dir or os.getcwd()
self._output_filename = output_filename
self._compiler = compiler.Compiler()
self._tfx_compiler = tfx_compiler.Compiler()
self._params = [] # List of dsl.PipelineParam used in this pipeline.
self._deduped_parameter_names = set() # Set of unique param names used.
if pod_labels_to_attach is None:
self._pod_labels_to_attach = get_default_pod_labels()
else:
self._pod_labels_to_attach = pod_labels_to_attach
def _parse_parameter_from_component(
self, component: base_component.BaseComponent) -> None:
"""Extract embedded RuntimeParameter placeholders from a component.
Extract embedded RuntimeParameter placeholders from a component, then append
the corresponding dsl.PipelineParam to KubeflowDagRunner.
Args:
component: a TFX component.
"""
serialized_component = json_utils.dumps(component)
placeholders = re.findall(data_types.RUNTIME_PARAMETER_PATTERN,
serialized_component)
for placeholder in placeholders:
placeholder = placeholder.replace('\\', '') # Clean escapes.
placeholder = utils.fix_brackets(placeholder) # Fix brackets if needed.
parameter = json_utils.loads(placeholder)
# Escape pipeline root because it will be added later.
if parameter.name == tfx_pipeline.ROOT_PARAMETER.name:
continue
if parameter.name not in self._deduped_parameter_names:
self._deduped_parameter_names.add(parameter.name)
# TODO(b/178436919): Create a test to cover default value rendering
# and move the external code reference over there.
# The default needs to be serialized then passed to dsl.PipelineParam.
# See
# https://github.com/kubeflow/pipelines/blob/f65391309650fdc967586529e79af178241b4c2c/sdk/python/kfp/dsl/_pipeline_param.py#L154
dsl_parameter = dsl.PipelineParam(
name=parameter.name, value=str(parameter.default))
self._params.append(dsl_parameter)
def _parse_parameter_from_pipeline(self,
pipeline: tfx_pipeline.Pipeline) -> None:
"""Extract all the RuntimeParameter placeholders from the pipeline."""
for component in pipeline.components:
self._parse_parameter_from_component(component)
def _construct_pipeline_graph(self, pipeline: tfx_pipeline.Pipeline,
pipeline_root: dsl.PipelineParam):
"""Constructs a Kubeflow Pipeline graph.
Args:
pipeline: The logical TFX pipeline to base the construction on.
pipeline_root: dsl.PipelineParam representing the pipeline root.
"""
component_to_kfp_op = {}
tfx_ir = self._generate_tfx_ir(pipeline)
# Assumption: There is a partial ordering of components in the list, i.e.,
# if component A depends on component B and C, then A appears after B and C
# in the list.
for component in pipeline.components:
# Keep track of the set of upstream dsl.ContainerOps for this component.
depends_on = set()
for upstream_component in component.upstream_nodes:
depends_on.add(component_to_kfp_op[upstream_component])
kfp_component = base_component.BaseComponent(
component=component,
depends_on=depends_on,
pipeline=pipeline,
pipeline_root=pipeline_root,
tfx_image=self._config.tfx_image,
kubeflow_metadata_config=self._config.kubeflow_metadata_config,
pod_labels_to_attach=self._pod_labels_to_attach,
tfx_ir=tfx_ir)
for operator in self._config.pipeline_operator_funcs:
kfp_component.container_op.apply(operator)
component_to_kfp_op[component] = kfp_component.container_op
def _generate_tfx_ir(
self, pipeline: tfx_pipeline.Pipeline) -> Optional[pipeline_pb2.Pipeline]:
result = self._tfx_compiler.compile(pipeline)
logging.info('Generated pipeline:\n %s', result)
return result
def run(self, pipeline: tfx_pipeline.Pipeline):
"""Compiles and outputs a Kubeflow Pipeline YAML definition file.
Args:
pipeline: The logical TFX pipeline to use when building the Kubeflow
pipeline.
"""
for component in pipeline.components:
# TODO(b/187122662): Pass through pip dependencies as a first-class
# component flag.
if isinstance(component, tfx_base_component.BaseComponent):
component._resolve_pip_dependencies( # pylint: disable=protected-access
pipeline.pipeline_info.pipeline_root)
# KFP DSL representation of pipeline root parameter.
dsl_pipeline_root = dsl.PipelineParam(
name=tfx_pipeline.ROOT_PARAMETER.name,
value=pipeline.pipeline_info.pipeline_root)
self._params.append(dsl_pipeline_root)
def _construct_pipeline():
"""Constructs a Kubeflow pipeline.
Creates Kubeflow ContainerOps for each TFX component encountered in the
logical pipeline definition.
"""
self._construct_pipeline_graph(pipeline, dsl_pipeline_root)
# Need to run this first to get self._params populated. Then KFP compiler
# can correctly match default value with PipelineParam.
self._parse_parameter_from_pipeline(pipeline)
file_name = self._output_filename or get_default_output_filename(
pipeline.pipeline_info.pipeline_name)
# Create workflow spec and write out to package.
self._compiler._create_and_write_workflow( # pylint: disable=protected-access
pipeline_func=_construct_pipeline,
pipeline_name=pipeline.pipeline_info.pipeline_name,
params_list=self._params,
package_path=os.path.join(self._output_dir, file_name))
| 40.895408 | 136 | 0.748487 | [
"Apache-2.0"
] | TimoKerr/tfx | tfx/orchestration/kubeflow/kubeflow_dag_runner.py | 16,031 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from refinery.units.pattern import PatternExtractor
from refinery.units import RefineryCriticalException
from refinery.lib.patterns import wallets
class xtw(PatternExtractor):
"""
Extract Wallets: Extracts anything that looks like a cryptocurrency wallet address.
This works similar to the `refinery.xtp` unit.
"""
def __init__(self, stripspace=False, duplicates=False, longest=False, take=None):
self.superinit(super(), **vars(), ascii=True, utf16=True)
def process(self, data):
pattern = '|'.join(F'(?P<{p.name}>{p.value})' for p in wallets).encode('latin1')
def check(match):
for name, value in match.groupdict().items():
if value is not None:
break
else:
raise RefineryCriticalException('Received empty match.')
return self.labelled(value, kind=name)
yield from self.matches_filtered(memoryview(data), pattern, check)
| 35.137931 | 88 | 0.651619 | [
"BSD-3-Clause"
] | binre/refinery | refinery/units/pattern/xtw.py | 1,019 | Python |
# pylint: disable=missing-class-docstring,missing-module-docstring
# Discord Packages
from discord.ext.commands.errors import CommandError
class NoDM(CommandError):
pass
class NoToken(Exception):
pass
| 17.75 | 66 | 0.784038 | [
"MIT"
] | Roxedus/PoengBott | cogs/utils/my_errors.py | 213 | Python |
# -*- coding: utf-8 -*-
'''
feature: v1, 2, 3, 4, 10, 11
feature: v1, 2, 3, 4, 11, 13, 14, 17, 18, 19, 22, 23
model: v10
'''
import itertools
import json
import gc
import glob
import os
import time
import cv2
import re
import nltk
import torch
import imagehash
import lightgbm as lgb
import xgboost as xgb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import scipy as sp
from scipy.stats import rankdata
from PIL import Image
from pymagnitude import Magnitude
from gensim.models import word2vec, KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from contextlib import contextmanager
from functools import partial
from itertools import combinations
from logging import getLogger, Formatter, StreamHandler, FileHandler, INFO
from keras.applications.densenet import preprocess_input as preprocess_input_dense
from keras.applications.densenet import DenseNet121
from keras.applications.inception_resnet_v2 import preprocess_input as preprocess_input_incep
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras import backend as K
from keras.layers import GlobalAveragePooling2D, Input, Lambda, AveragePooling1D
from keras.models import Model
from keras.preprocessing.text import text_to_word_sequence
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD, NMF
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import cohen_kappa_score, mean_squared_error
from sklearn.model_selection import GroupKFold, StratifiedKFold, train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.feature_extraction.text import _document_frequency
# ===============
# Constants
# ===============
COMPETITION_NAME = 'petfinder-adoption-prediction'
MODEL_NAME = 'v001'
logger = getLogger(COMPETITION_NAME)
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
target = 'AdoptionSpeed'
len_train = 14993
len_test = 3948
T_flag = True
K_flag = True
G_flag = True
debug = False
# ===============
# Params
# ===============
seed = 777
kaeru_seed = 1337
n_splits = 5
np.random.seed(seed)
# feature engineering
n_components = 5
n_components_gege_img = 32
n_components_gege_txt = 16
img_size = 256
batch_size = 256
# model
MODEL_PARAMS = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
'learning_rate': 0.01,
'num_leaves': 63,
'subsample': 0.9,
'subsample_freq': 1,
'colsample_bytree': 0.6,
'max_depth': 9,
'max_bin': 127,
'reg_alpha': 0.11,
'reg_lambda': 0.01,
'min_child_weight': 0.2,
'min_child_samples': 20,
'min_gain_to_split': 0.02,
'min_data_in_bin': 3,
'bin_construct_sample_cnt': 5000,
'cat_l2': 10,
'verbose': -1,
'nthread': -1,
'seed': 777,
}
KAERU_PARAMS = {'application': 'regression',
'boosting': 'gbdt',
'metric': 'rmse',
'num_leaves': 70,
'max_depth': 9,
'learning_rate': 0.01,
'max_bin': 32,
'bagging_freq': 2,
'bagging_fraction': 0.85,
'feature_fraction': 0.8,
'min_split_gain': 0.02,
'min_child_samples': 150,
'min_child_weight': 0.02,
'lambda_l2': 0.0475,
'verbosity': -1,
'seed': kaeru_seed}
ADV_PARAMS = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'num_leaves': 64,
'learning_rate': 0.02,
'verbose': 0,
'lambda_l1': 0.1,
'seed': 1213
}
MODEL_PARAMS_XGB = {
'eval_metric': 'rmse',
'seed': 1337,
'eta': 0.01,
'subsample': 0.8,
'colsample_bytree': 0.85,
'tree_method': 'gpu_hist',
'device': 'gpu',
'silent': 1,
}
FIT_PARAMS = {
'num_boost_round': 5000,
'early_stopping_rounds': 100,
'verbose_eval': 5000,
}
# define
maxvalue_dict = {}
categorical_features = [
'Breed1',
'Breed2',
'Color1',
'Color2',
'Color3',
'Dewormed',
'FurLength',
'Gender',
'Health',
'MaturitySize',
'State',
'Sterilized',
'Type',
'Vaccinated',
'Type_main_breed',
'BreedName_main_breed',
'Type_second_breed',
'BreedName_second_breed',
'BreedName_main_breed_all',
]
contraction_mapping = {u"ain’t": u"is not", u"aren’t": u"are not", u"can’t": u"cannot", u"’cause": u"because",
u"could’ve": u"could have", u"couldn’t": u"could not", u"didn’t": u"did not",
u"doesn’t": u"does not", u"don’t": u"do not", u"hadn’t": u"had not",
u"hasn’t": u"has not", u"haven’t": u"have not", u"he’d": u"he would",
u"he’ll": u"he will", u"he’s": u"he is", u"how’d": u"how did", u"how’d’y": u"how do you",
u"how’ll": u"how will", u"how’s": u"how is", u"I’d": u"I would",
u"I’d’ve": u"I would have", u"I’ll": u"I will", u"I’ll’ve": u"I will have",
u"I’m": u"I am", u"I’ve": u"I have", u"i’d": u"i would", u"i’d’ve": u"i would have",
u"i’ll": u"i will", u"i’ll’ve": u"i will have", u"i’m": u"i am", u"i’ve": u"i have",
u"isn’t": u"is not", u"it’d": u"it would", u"it’d’ve": u"it would have",
u"it’ll": u"it will", u"it’ll’ve": u"it will have", u"it’s": u"it is",
u"let’s": u"let us", u"ma’am": u"madam", u"mayn’t": u"may not",
u"might’ve": u"might have", u"mightn’t": u"might not", u"mightn’t’ve": u"might not have",
u"must’ve": u"must have", u"mustn’t": u"must not", u"mustn’t’ve": u"must not have",
u"needn’t": u"need not", u"needn’t’ve": u"need not have", u"o’clock": u"of the clock",
u"oughtn’t": u"ought not", u"oughtn’t’ve": u"ought not have", u"shan’t": u"shall not",
u"sha’n’t": u"shall not", u"shan’t’ve": u"shall not have", u"she’d": u"she would",
u"she’d’ve": u"she would have", u"she’ll": u"she will", u"she’ll’ve": u"she will have",
u"she’s": u"she is", u"should’ve": u"should have", u"shouldn’t": u"should not",
u"shouldn’t’ve": u"should not have", u"so’ve": u"so have", u"so’s": u"so as",
u"this’s": u"this is", u"that’d": u"that would", u"that’d’ve": u"that would have",
u"that’s": u"that is", u"there’d": u"there would", u"there’d’ve": u"there would have",
u"there’s": u"there is", u"here’s": u"here is", u"they’d": u"they would",
u"they’d’ve": u"they would have", u"they’ll": u"they will",
u"they’ll’ve": u"they will have", u"they’re": u"they are", u"they’ve": u"they have",
u"to’ve": u"to have", u"wasn’t": u"was not", u"we’d": u"we would",
u"we’d’ve": u"we would have", u"we’ll": u"we will", u"we’ll’ve": u"we will have",
u"we’re": u"we are", u"we’ve": u"we have", u"weren’t": u"were not",
u"what’ll": u"what will", u"what’ll’ve": u"what will have", u"what’re": u"what are",
u"what’s": u"what is", u"what’ve": u"what have", u"when’s": u"when is",
u"when’ve": u"when have", u"where’d": u"where did", u"where’s": u"where is",
u"where’ve": u"where have", u"who’ll": u"who will", u"who’ll’ve": u"who will have",
u"who’s": u"who is", u"who’ve": u"who have", u"why’s": u"why is", u"why’ve": u"why have",
u"will’ve": u"will have", u"won’t": u"will not", u"won’t’ve": u"will not have",
u"would’ve": u"would have", u"wouldn’t": u"would not", u"wouldn’t’ve": u"would not have",
u"y’all": u"you all", u"y’all’d": u"you all would", u"y’all’d’ve": u"you all would have",
u"y’all’re": u"you all are", u"y’all’ve": u"you all have", u"you’d": u"you would",
u"you’d’ve": u"you would have", u"you’ll": u"you will", u"you’ll’ve": u"you will have",
u"you’re": u"you are", u"you’ve": u"you have", u"cat’s": u"cat is", u" whatapp ": u" whatapps ",
u" whatssapp ": u" whatapps ", u" whatssap ": u" whatapps ", u" whatspp ": u" whatapps ",
u" whastapp ": u" whatapps ", u" whatsap ": u" whatapps ", u" whassap ": u" whatapps ",
u" watapps ": u" whatapps ", u"wetfood": u"wet food", u"intetested": u"interested",
u"领养条件,": u"领养条件", u"谢谢。": u"谢谢",
u"别打我,记住,我有反抗的牙齿,但我不会咬你。remember": u"别打我,记住,我有反抗的牙齿,但我不会咬你。",
u"有你。do": u"有你。", u"名字name": u"名字", u"year,": u"year", u"work,your": u"work your",
u"too,will": u"too will", u"timtams": u"timtam", u"spay。": u"spay", u"shoulder,a": u"shoulder a",
u"sherpherd": u"shepherd", u"sherphed": u"shepherd", u"sherperd": u"shepherd",
u"sherpard": u"shepherd", u"serious。": u"serious", u"remember,i": u"remember i",
u"recover,": u"recover", u"refundable指定期限内结扎后会全数奉还": u"refundable",
u"puchong区,有没有人有增添家庭成员?": u"puchong", u"puchong救的": u"puchong",
u"puchong,": u"puchong", u"month。": u"month", u"month,": u"month",
u"microchip(做狗牌一定要有主人的电话号码)": u"microchip", u"maju。": u"maju", u"maincoone": u"maincoon",
u"lumpur。": u"lumpur", u"location:阿里玛,大山脚": u"location", u"life🐾🐾": u"life",
u"kibble,": u"kibble", u"home…": u"home", u"hand,but": u"hand but", u"hair,a": u"hair a",
u"grey、brown": u"grey brown", u"gray,": u"gray", u"free免费": u"free", u"food,or": u"food or",
u"dog/dog": u"dog", u"dijumpa": u"dijumpai", u"dibela": u"dibelai",
u"beauuuuuuuuutiful": u"beautiful", u"adopt🙏": u"adopt", u"addopt": u"adopt",
u"enxiety": u"anxiety", u"vaksin": u"vaccine"}
numerical_features = []
text_features = ['Name', 'Description', 'Description_Emb', 'Description_bow']
meta_text = ['BreedName_main_breed', 'BreedName_second_breed', 'annots_top_desc', 'sentiment_text',
'annots_top_desc_pick', 'sentiment_entities']
remove = ['index', 'seq_text', 'PetID', 'Name', 'Description', 'RescuerID', 'StateName', 'annots_top_desc',
'sentiment_text',
'sentiment_entities', 'Description_Emb', 'Description_bow', 'annots_top_desc_pick']
kaeru_drop_cols = ["2017GDPperCapita", "Bumiputra", "Chinese", "HDI", "Indian", "Latitude", "Longitude",
'color_red_score_mean_mean', 'color_red_score_mean_sum', 'color_blue_score_mean_mean',
'color_blue_score_mean_sum', 'color_green_score_mean_mean', 'color_green_score_mean_sum',
'dog_cat_scores_mean_mean', 'dog_cat_scores_mean_sum', 'dog_cat_topics_mean_mean',
'dog_cat_topics_mean_sum', 'is_dog_or_cat_mean_mean', 'is_dog_or_cat_mean_sum',
'len_text_mean_mean', 'len_text_mean_sum', 'StateID']
gege_drop_cols = ['2017GDPperCapita', 'Breed1_equals_Breed2', 'Bumiputra', 'Chinese',
'HDI', 'Indian', 'Latitude', 'Longitude', 'Pop_density', 'Urban_pop', 'Breed1_equals_Breed2',
'fix_Breed1', 'fix_Breed2', 'single_Breed', 'color_red_score_mean_mean', 'color_red_score_mean_sum',
'color_red_score_mean_var', 'color_blue_score_mean_mean', 'color_blue_score_mean_sum',
'color_blue_score_mean_var', 'color_green_score_mean_mean', 'color_green_score_mean_sum',
'color_green_score_mean_var', 'dog_cat_scores_mean_mean', 'dog_cat_scores_mean_sum',
'dog_cat_scores_mean_var', 'dog_cat_topics_mean_mean', 'dog_cat_topics_mean_sum',
'dog_cat_topics_mean_var', 'is_dog_or_cat_mean_mean', 'is_dog_or_cat_mean_sum',
'is_dog_or_cat_mean_var', 'len_text_mean_mean', 'len_text_mean_sum', 'len_text_mean_var']
use_cols = pd.read_csv("../input/pet-usecols/importance10.csv")
# use_cols = pd.read_csv("importance9.csv")
use_cols["gain"] = use_cols["gain"] / use_cols["gain"].sum()
use_cols = list(use_cols[use_cols.gain > 0.0002].feature.values)
ps = nltk.stem.PorterStemmer()
lc = nltk.stem.lancaster.LancasterStemmer()
sb = nltk.stem.snowball.SnowballStemmer('english')
# ===============
# Utility Functions
# ===============
def to_category(train, cat=None):
if cat is None:
cat = [col for col in train.columns if train[col].dtype == 'object']
for c in cat:
train[c], uniques = pd.factorize(train[c])
maxvalue_dict[c] = train[c].max() + 1
return train
def init_logger():
# Add handlers
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler('{}.log'.format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger.setLevel(INFO)
logger.addHandler(handler)
logger.addHandler(fh_handler)
@contextmanager
def timer(name):
t0 = time.time()
yield
logger.info(f'[{name}] done in {time.time() - t0:.0f} s')
def load_image_and_hash(paths):
funcs = [
imagehash.average_hash,
imagehash.phash,
imagehash.dhash,
imagehash.whash,
# lambda x: imagehash.whash(x, mode='db4'),
]
petids = []
hashes = []
for path in paths:
image = Image.open(path)
imageid = path.split('/')[-1].split('.')[0][:-2]
petids.append(imageid)
hashes.append(np.array([f(image).hash for f in funcs]).reshape(256))
return petids, np.array(hashes).astype(np.int32)
def find_duplicates_all():
train_paths = glob.glob('../input/petfinder-adoption-prediction/train_images/*-1.jpg')
train_paths += glob.glob('../input/petfinder-adoption-prediction/train_images/*-2.jpg')
test_paths = glob.glob('../input/petfinder-adoption-prediction/test_images/*-1.jpg')
test_paths += glob.glob('../input/petfinder-adoption-prediction/test_images/*-2.jpg')
train_petids, train_hashes = load_image_and_hash(train_paths)
test_petids, test_hashes = load_image_and_hash(test_paths)
# sims = np.array([(train_hashes[i] == test_hashes).sum(axis=1)/256 for i in range(train_hashes.shape[0])])
train_hashes = torch.Tensor(train_hashes).cuda()
test_hashes = torch.Tensor(test_hashes).cuda()
sims = np.array(
[(train_hashes[i] == test_hashes).sum(dim=1).cpu().numpy() / 256 for i in range(train_hashes.shape[0])])
indices1 = np.where(sims > 0.9)
indices2 = np.where(indices1[0] != indices1[1])
petids1 = [train_petids[i] for i in indices1[0][indices2]]
petids2 = [test_petids[i] for i in indices1[1][indices2]]
dups = {tuple(sorted([petid1, petid2])): True for petid1, petid2 in zip(petids1, petids2)}
logger.info('found %d duplicates' % len(dups))
return dups
def submission_with_postprocess(y_pred):
df_sub = pd.read_csv('../input/petfinder-adoption-prediction/test/sample_submission.csv')
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
df_sub["AdoptionSpeed"] = y_pred
# postprocess
duplicated = find_duplicates_all()
duplicated = pd.DataFrame(duplicated, index=range(0)).T.reset_index()
duplicated.columns = ['pet_id_0', 'pet_id_1']
duplicated_0 = duplicated.merge(train[['PetID', 'AdoptionSpeed']], how='left', left_on='pet_id_0',
right_on='PetID').dropna()
df_sub = df_sub.merge(duplicated_0[['pet_id_1', 'AdoptionSpeed']],
how='left', left_on='PetID', right_on='pet_id_1', suffixes=('_original', ''))
df_sub['AdoptionSpeed'].fillna(df_sub['AdoptionSpeed_original'], inplace=True)
df_sub = df_sub[['PetID', 'AdoptionSpeed']]
duplicated_1 = duplicated.merge(train[['PetID', 'AdoptionSpeed']],
how='left', left_on='pet_id_1', right_on='PetID').dropna()
df_sub = df_sub.merge(duplicated_1[['pet_id_0', 'AdoptionSpeed']],
how='left', left_on='PetID', right_on='pet_id_0', suffixes=('_original', ''))
df_sub['AdoptionSpeed'].fillna(df_sub['AdoptionSpeed_original'], inplace=True)
df_sub = df_sub[['PetID', 'AdoptionSpeed']]
df_sub['AdoptionSpeed'] = df_sub['AdoptionSpeed'].astype('int32')
# submission
df_sub.to_csv('submission.csv', index=False)
def submission(y_pred):
logger.info('making submission file...')
df_sub = pd.read_csv('../input/petfinder-adoption-prediction/test/sample_submission.csv')
df_sub[target] = y_pred
df_sub.to_csv('submission.csv', index=False)
def analyzer_bow(text):
stop_words = ['i', 'a', 'an', 'the', 'to', 'and', 'or', 'if', 'is', 'are', 'am', 'it', 'this', 'that', 'of', 'from',
'in', 'on']
text = text.lower() # 小文字化
text = text.replace('\n', '') # 改行削除
text = text.replace('\t', '') # タブ削除
puncts = r',.":)(-!?|;\'$&/[]>%=#*+\\•~@£·_{}©^®`<→°€™›♥←×§″′Â█½à…“★”–●â►−¢²¬░¶↑±¿▾═¦║―¥▓—‹─▒:¼⊕▼▪†■’▀¨▄♫☆é¯♦¤▲踾Ã⋅‘∞∙)↓、│(»,♪╩╚³・╦╣╔╗▬❤ïØ¹≤‡√。【】'
for punct in puncts:
text = text.replace(punct, f' {punct} ')
for bad_word in contraction_mapping:
if bad_word in text:
text = text.replace(bad_word, contraction_mapping[bad_word])
text = text.split(' ') # スペースで区切る
text = [sb.stem(t) for t in text]
words = []
for word in text:
if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは分割
for w in re.findall(r'(\d+|\D+)', word):
words.append(w)
continue
if word in stop_words: # ストップワードに含まれるものは除外
continue
if len(word) < 2: # 1文字、0文字(空文字)は除外
continue
words.append(word)
return " ".join(words)
def analyzer_embed(text):
text = text.lower() # 小文字化
text = text.replace('\n', '') # 改行削除
text = text.replace('\t', '') # タブ削除
puncts = r',.":)(-!?|;\'$&/[]>%=#*+\\•~@£·_{}©^®`<→°€™›♥←×§″′Â█½à…“★”–●â►−¢²¬░¶↑±¿▾═¦║―¥▓—‹─▒:¼⊕▼▪†■’▀¨▄♫☆é¯♦¤▲踾Ã⋅‘∞∙)↓、│(»,♪╩╚³・╦╣╔╗▬❤ïØ¹≤‡√。【】'
for punct in puncts:
text = text.replace(punct, f' {punct} ')
for bad_word in contraction_mapping:
if bad_word in text:
text = text.replace(bad_word, contraction_mapping[bad_word])
text = text.split(' ') # スペースで区切る
words = []
for word in text:
if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは分割
for w in re.findall(r'(\d+|\D+)', word):
words.append(w)
continue
if len(word) < 1: # 0文字(空文字)は除外
continue
words.append(word)
return " ".join(words)
def analyzer_k(text):
stop_words = ['i', 'a', 'an', 'the', 'to', 'and', 'or', 'if', 'is', 'are', 'am', 'it', 'this', 'that', 'of', 'from',
'in', 'on']
text = text.lower() # 小文字化
text = text.replace('\n', '') # 改行削除
text = text.replace('\t', '') # タブ削除
text = re.sub(re.compile(r'[!-\/:-@[-`{-~]'), ' ', text) # 記号をスペースに置き換え
text = text.split(' ') # スペースで区切る
words = []
for word in text:
if (re.compile(r'^.*[0-9]+.*$').fullmatch(word) is not None): # 数字が含まれるものは除外
continue
if word in stop_words: # ストップワードに含まれるものは除外
continue
if len(word) < 2: # 1文字、0文字(空文字)は除外
continue
words.append(word)
return words
# ===============
# Feature Engineering
# ===============
class GroupbyTransformer():
def __init__(self, param_dict=None):
self.param_dict = param_dict
def _get_params(self, p_dict):
key = p_dict['key']
if 'var' in p_dict.keys():
var = p_dict['var']
else:
var = self.var
if 'agg' in p_dict.keys():
agg = p_dict['agg']
else:
agg = self.agg
if 'on' in p_dict.keys():
on = p_dict['on']
else:
on = key
return key, var, agg, on
def _aggregate(self, dataframe):
self.features = []
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
all_features = list(set(key + var))
new_features = self._get_feature_names(key, var, agg)
features = dataframe[all_features].groupby(key)[
var].agg(agg).reset_index()
features.columns = key + new_features
self.features.append(features)
return self
def _merge(self, dataframe, merge=True):
for param_dict, features in zip(self.param_dict, self.features):
key, var, agg, on = self._get_params(param_dict)
if merge:
dataframe = dataframe.merge(features, how='left', on=on)
else:
new_features = self._get_feature_names(key, var, agg)
dataframe = pd.concat([dataframe, features[new_features]], axis=1)
return dataframe
def transform(self, dataframe):
self._aggregate(dataframe)
return self._merge(dataframe, merge=True)
def _get_feature_names(self, key, var, agg):
_agg = []
for a in agg:
if not isinstance(a, str):
_agg.append(a.__name__)
else:
_agg.append(a)
return ['_'.join([a, v, 'groupby'] + key) for v in var for a in _agg]
def get_feature_names(self):
self.feature_names = []
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
self.feature_names += self._get_feature_names(key, var, agg)
return self.feature_names
def get_numerical_features(self):
return self.get_feature_names()
class DiffGroupbyTransformer(GroupbyTransformer):
def _aggregate(self):
raise NotImplementedError
def _merge(self):
raise NotImplementedError
def transform(self, dataframe):
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
for a in agg:
for v in var:
new_feature = '_'.join(['diff', a, v, 'groupby'] + key)
base_feature = '_'.join([a, v, 'groupby'] + key)
dataframe[new_feature] = dataframe[base_feature] - dataframe[v]
return dataframe
def _get_feature_names(self, key, var, agg):
_agg = []
for a in agg:
if not isinstance(a, str):
_agg.append(a.__name__)
else:
_agg.append(a)
return ['_'.join(['diff', a, v, 'groupby'] + key) for v in var for a in _agg]
class RatioGroupbyTransformer(GroupbyTransformer):
def _aggregate(self):
raise NotImplementedError
def _merge(self):
raise NotImplementedError
def transform(self, dataframe):
for param_dict in self.param_dict:
key, var, agg, on = self._get_params(param_dict)
for a in agg:
for v in var:
new_feature = '_'.join(['ratio', a, v, 'groupby'] + key)
base_feature = '_'.join([a, v, 'groupby'] + key)
dataframe[new_feature] = dataframe[v] / dataframe[base_feature]
return dataframe
def _get_feature_names(self, key, var, agg):
_agg = []
for a in agg:
if not isinstance(a, str):
_agg.append(a.__name__)
else:
_agg.append(a)
return ['_'.join(['ratio', a, v, 'groupby'] + key) for v in var for a in _agg]
class CategoryVectorizer():
def __init__(self, categorical_columns, n_components,
vectorizer=CountVectorizer(),
transformer=LatentDirichletAllocation(),
name='CountLDA'):
self.categorical_columns = categorical_columns
self.n_components = n_components
self.vectorizer = vectorizer
self.transformer = transformer
self.name = name + str(self.n_components)
def transform(self, dataframe):
features = []
for (col1, col2) in self.get_column_pairs():
try:
sentence = self.create_word_list(dataframe, col1, col2)
sentence = self.vectorizer.fit_transform(sentence)
feature = self.transformer.fit_transform(sentence)
feature = self.get_feature(dataframe, col1, col2, feature, name=self.name)
features.append(feature)
except:
pass
features = pd.concat(features, axis=1)
return features
def create_word_list(self, dataframe, col1, col2):
col1_size = int(dataframe[col1].values.max() + 1)
col2_list = [[] for _ in range(col1_size)]
for val1, val2 in zip(dataframe[col1].values, dataframe[col2].values):
col2_list[int(val1)].append(col2 + str(val2))
return [' '.join(map(str, ls)) for ls in col2_list]
def get_feature(self, dataframe, col1, col2, latent_vector, name=''):
features = np.zeros(
shape=(len(dataframe), self.n_components), dtype=np.float32)
self.columns = ['_'.join([name, col1, col2, str(i)])
for i in range(self.n_components)]
for i, val1 in enumerate(dataframe[col1]):
features[i, :self.n_components] = latent_vector[val1]
return pd.DataFrame(data=features, columns=self.columns)
def get_column_pairs(self):
return [(col1, col2) for col1, col2 in itertools.product(self.categorical_columns, repeat=2) if col1 != col2]
def get_numerical_features(self):
return self.columns
class BM25Transformer(BaseEstimator, TransformerMixin):
"""
Parameters
----------
use_idf : boolean, optional (default=True)
k1 : float, optional (default=2.0)
b : float, optional (default=0.75)
References
----------
Okapi BM25: a non-binary model - Introduction to Information Retrieval
http://nlp.stanford.edu/IR-book/html/htmledition/okapi-bm25-a-non-binary-model-1.html
"""
def __init__(self, use_idf=True, k1=2.0, b=0.75):
self.use_idf = use_idf
self.k1 = k1
self.b = b
def fit(self, X):
"""
Parameters
----------
X : sparse matrix, [n_samples, n_features] document-term matrix
"""
if not sp.sparse.issparse(X):
X = sp.sparse.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
idf = np.log((n_samples - df + 0.5) / (df + 0.5))
self._idf_diag = sp.sparse.spdiags(idf, diags=0, m=n_features, n=n_features)
doc_len = X.sum(axis=1)
self._average_document_len = np.average(doc_len)
return self
def transform(self, X, copy=True):
"""
Parameters
----------
X : sparse matrix, [n_samples, n_features] document-term matrix
copy : boolean, optional (default=True)
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.sparse.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.sparse.csr_matrix(X, dtype=np.float, copy=copy)
n_samples, n_features = X.shape
# Document length (number of terms) in each row
# Shape is (n_samples, 1)
doc_len = X.sum(axis=1)
# Number of non-zero elements in each row
# Shape is (n_samples, )
sz = X.indptr[1:] - X.indptr[0:-1]
# In each row, repeat `doc_len` for `sz` times
# Shape is (sum(sz), )
# Example
# -------
# dl = [4, 5, 6]
# sz = [1, 2, 3]
# rep = [4, 5, 5, 6, 6, 6]
rep = np.repeat(np.asarray(doc_len), sz)
# Compute BM25 score only for non-zero elements
nom = self.k1 + 1
denom = X.data + self.k1 * (1 - self.b + self.b * rep / self._average_document_len)
data = X.data * nom / denom
X = sp.sparse.csr_matrix((data, X.indices, X.indptr), shape=X.shape)
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
X = X * self._idf_diag
return X
# ===============
# For pet
# ===============
def merge_state_info(train):
states = pd.read_csv('../input/petfinder-adoption-prediction/state_labels.csv')
state_info = pd.read_csv('../input/state-info/state_info.csv')
state_info.rename(columns={
'Area (km2)': 'Area',
'Pop. density': 'Pop_density',
'Urban pop.(%)': 'Urban_pop',
'Bumiputra (%)': 'Bumiputra',
'Chinese (%)': 'Chinese',
'Indian (%)': 'Indian'
}, inplace=True)
state_info['Population'] = state_info['Population'].str.replace(',', '').astype('int32')
state_info['Area'] = state_info['Area'].str.replace(',', '').astype('int32')
state_info['Pop_density'] = state_info['Pop_density'].str.replace(',', '').astype('int32')
state_info['2017GDPperCapita'] = state_info['2017GDPperCapita'].str.replace(',', '').astype('float32')
state_info['StateName'] = state_info['StateName'].str.replace('FT ', '')
state_info['StateName'] = state_info['StateName'].str.replace('Malacca', 'Melaka')
state_info['StateName'] = state_info['StateName'].str.replace('Penang', 'Pulau Pinang')
states = states.merge(state_info, how='left', on='StateName')
train = train.merge(states, how='left', left_on='State', right_on='StateID')
return train
def merge_breed_name(train):
breeds = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')
with open("../input/cat-and-dog-breeds-parameters/rating.json", 'r', encoding='utf-8') as f:
breed_data = json.load(f)
cat_breed = pd.DataFrame.from_dict(breed_data['cat_breeds']).T
dog_breed = pd.DataFrame.from_dict(breed_data['dog_breeds']).T
df = pd.concat([dog_breed, cat_breed], axis=0).reset_index().rename(columns={'index': 'BreedName'})
df.BreedName.replace(
{
'Siamese Cat': 'Siamese',
'Chinese Crested': 'Chinese Crested Dog',
'Australian Cattle Dog': 'Australian Cattle Dog/Blue Heeler',
'Yorkshire Terrier': 'Yorkshire Terrier Yorkie',
'Pembroke Welsh Corgi': 'Welsh Corgi',
'Sphynx': 'Sphynx (hairless cat)',
'Plott': 'Plott Hound',
'Korean Jindo Dog': 'Jindo',
'Anatolian Shepherd Dog': 'Anatolian Shepherd',
'Belgian Malinois': 'Belgian Shepherd Malinois',
'Belgian Sheepdog': 'Belgian Shepherd Dog Sheepdog',
'Belgian Tervuren': 'Belgian Shepherd Tervuren',
'Bengal Cats': 'Bengal',
'Bouvier des Flandres': 'Bouvier des Flanders',
'Brittany': 'Brittany Spaniel',
'Caucasian Shepherd Dog': 'Caucasian Sheepdog (Caucasian Ovtcharka)',
'Dandie Dinmont Terrier': 'Dandi Dinmont Terrier',
'Bulldog': 'English Bulldog',
'American English Coonhound': 'English Coonhound',
'Small Munsterlander Pointer': 'Munsterlander',
'Entlebucher Mountain Dog': 'Entlebucher',
'Exotic': 'Exotic Shorthair',
'Flat-Coated Retriever': 'Flat-coated Retriever',
'English Foxhound': 'Foxhound',
'Alaskan Klee Kai': 'Klee Kai',
'Newfoundland': 'Newfoundland Dog',
'Norwegian Forest': 'Norwegian Forest Cat',
'Nova Scotia Duck Tolling Retriever': 'Nova Scotia Duck-Tolling Retriever',
'American Pit Bull Terrier': 'Pit Bull Terrier',
'Ragdoll Cats': 'Ragdoll',
'Standard Schnauzer': 'Schnauzer',
'Scottish Terrier': 'Scottish Terrier Scottie',
'Chinese Shar-Pei': 'Shar Pei',
'Shetland Sheepdog': 'Shetland Sheepdog Sheltie',
'West Highland White Terrier': 'West Highland White Terrier Westie',
'Soft Coated Wheaten Terrier': 'Wheaten Terrier',
'Wirehaired Pointing Griffon': 'Wire-haired Pointing Griffon',
'Xoloitzcuintli': 'Wirehaired Terrier',
'Cane Corso': 'Cane Corso Mastiff',
'Havana Brown': 'Havana',
}, inplace=True
)
breeds = breeds.merge(df, how='left', on='BreedName')
breeds1_dic, breeds2_dic = {}, {}
for c in breeds.columns:
if c == "BreedID":
continue
breeds1_dic[c] = c + "_main_breed_all"
breeds2_dic[c] = c + "_second_breed_all"
train = train.merge(breeds.rename(columns=breeds1_dic), how='left', left_on='Breed1', right_on='BreedID')
train.drop(['BreedID'], axis=1, inplace=True)
train = train.merge(breeds.rename(columns=breeds2_dic), how='left', left_on='Breed2', right_on='BreedID')
train.drop(['BreedID'], axis=1, inplace=True)
return train
def merge_breed_name_sub(train):
breeds = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')
df = pd.read_json('../input/cat-and-dog-breeds-parameters/rating.json')
cat_df = df.cat_breeds.dropna(0).reset_index().rename(columns={'index': 'BreedName'})
dog_df = df.dog_breeds.dropna(0).reset_index().rename(columns={'index': 'BreedName'})
cat = cat_df['cat_breeds'].apply(lambda x: pd.Series(x))
cat_df = pd.concat([cat_df, cat], axis=1).drop(['cat_breeds'], axis=1)
dog = dog_df['dog_breeds'].apply(lambda x: pd.Series(x))
dog_df = pd.concat([dog_df, cat], axis=1).drop(['dog_breeds'], axis=1)
df = pd.concat([dog_df, cat_df])
df.BreedName.replace(
{
'Siamese Cat': 'Siamese',
'Chinese Crested': 'Chinese Crested Dog',
'Australian Cattle Dog': 'Australian Cattle Dog/Blue Heeler',
'Yorkshire Terrier': 'Yorkshire Terrier Yorkie',
'Pembroke Welsh Corgi': 'Welsh Corgi',
'Sphynx': 'Sphynx (hairless cat)',
'Plott': 'Plott Hound',
'Korean Jindo Dog': 'Jindo',
'Anatolian Shepherd Dog': 'Anatolian Shepherd',
'Belgian Malinois': 'Belgian Shepherd Malinois',
'Belgian Sheepdog': 'Belgian Shepherd Dog Sheepdog',
'Belgian Tervuren': 'Belgian Shepherd Tervuren',
'Bengal Cats': 'Bengal',
'Bouvier des Flandres': 'Bouvier des Flanders',
'Brittany': 'Brittany Spaniel',
'Caucasian Shepherd Dog': 'Caucasian Sheepdog (Caucasian Ovtcharka)',
'Dandie Dinmont Terrier': 'Dandi Dinmont Terrier',
'Bulldog': 'English Bulldog',
'American English Coonhound': 'English Coonhound',
'Small Munsterlander Pointer': 'Munsterlander',
'Entlebucher Mountain Dog': 'Entlebucher',
'Exotic': 'Exotic Shorthair',
'Flat-Coated Retriever': 'Flat-coated Retriever',
'English Foxhound': 'Foxhound',
'Alaskan Klee Kai': 'Klee Kai',
'Newfoundland': 'Newfoundland Dog',
'Norwegian Forest': 'Norwegian Forest Cat',
'Nova Scotia Duck Tolling Retriever': 'Nova Scotia Duck-Tolling Retriever',
'American Pit Bull Terrier': 'Pit Bull Terrier',
'Ragdoll Cats': 'Ragdoll',
'Standard Schnauzer': 'Schnauzer',
'Scottish Terrier': 'Scottish Terrier Scottie',
'Chinese Shar-Pei': 'Shar Pei',
'Shetland Sheepdog': 'Shetland Sheepdog Sheltie',
'West Highland White Terrier': 'West Highland White Terrier Westie',
'Soft Coated Wheaten Terrier': 'Wheaten Terrier',
'Wirehaired Pointing Griffon': 'Wire-haired Pointing Griffon',
'Xoloitzcuintli': 'Wirehaired Terrier',
'Cane Corso': 'Cane Corso Mastiff',
'Havana Brown': 'Havana',
}, inplace=True
)
breeds = breeds.merge(df, how='left', on='BreedName')
train = train.merge(breeds.rename(columns={'BreedName': 'BreedName_main_breed'}), how='left', left_on='Breed1',
right_on='BreedID', suffixes=('', '_main_breed'))
train.drop(['BreedID'], axis=1, inplace=True)
train = train.merge(breeds.rename(columns={'BreedName': 'BreedName_second_breed'}), how='left', left_on='Breed2',
right_on='BreedID', suffixes=('', '_second_breed'))
train.drop(['BreedID'], axis=1, inplace=True)
return train
def merge_breed_ranking(train):
breeds = pd.read_csv('../input/breed-labels-with-ranks/breed_labels_with_ranks.csv').drop("BreedName", axis=1)
train = train.merge(breeds, how="left", left_on="fix_Breed1", right_on="BreedID")
train = train.rename(columns={"BreedCatRank": "BreedCatRank_main", "BreedDogRank": "BreedDogRank_main"})
train = train.merge(breeds, how="left", left_on="fix_Breed2", right_on="BreedID")
train = train.rename(columns={"BreedCatRank": "BreedCatRank_second", "BreedDogRank": "BreedDogRank_second"})
return train
def breed_mismatch(train):
breed_labels = pd.read_csv('../input/petfinder-adoption-prediction/breed_labels.csv')
dog_breed_labels_set = list(breed_labels[breed_labels['Type'] == 1]['BreedID'])
dog_breed_labels_set.remove(307)
train['breeds_mismatch'] = list((train['Type'] == 2) & (
(train['fix_Breed1'].isin(dog_breed_labels_set)) | (train['fix_Breed2'].isin(dog_breed_labels_set))))
train['breeds_mismatch'] = train['breeds_mismatch'].astype(int)
return train
def breed_mismatch_desc(train):
train['desc_contain_dog'] = train['Description'].str.lower().str.contains(' dog | dogs ')
train['desc_contain_cat'] = train['Description'].str.lower().str.contains(' cat | cats ')
train['desc_miss_match'] = list((train['Type'] == 1) & (train['desc_contain_cat']))
train['desc_miss_match'] = train['desc_miss_match'].astype(int)
return train
def breed_mismatch_meta(train):
train['annot_contain_dog'] = train['annots_top_desc'].str.lower().str.contains(' dog | dogs ')
train['annot_contain_cat'] = train['annots_top_desc'].str.lower().str.contains(' cat | cats ')
train['annot_miss_match'] = list((train['Type'] == 1) & (train['annot_contain_cat']))
train['annot_miss_match'] = train['annot_miss_match'].astype(int)
return train
def extract_emojis(text, emoji_list):
return ' '.join(c for c in text if c in emoji_list)
def merge_emoji(train):
emoji = pd.read_csv('../input/emoji-sentiment-data/Emoji_Sentiment_Data_v1.0.csv')
emoji2 = pd.read_csv('../input/emoji-sentiment-data/Emojitracker_20150604.csv')
emoji = emoji.merge(emoji2, how='left', on='Emoji', suffixes=('', '_tracker'))
emoji_list = emoji['Emoji'].values
train_emoji = train['Description'].apply(extract_emojis, emoji_list=emoji_list)
train_emoji = pd.DataFrame([train['PetID'], train_emoji]).T.set_index('PetID')
train_emoji = train_emoji['Description'].str.extractall('(' + ')|('.join(emoji_list) + ')')
train_emoji = train_emoji.fillna(method='bfill', axis=1).iloc[:, 0].reset_index().rename(columns={0: 'Emoji'})
train_emoji = train_emoji.merge(emoji, how='left', on='Emoji')
emoji_columns = ['Occurrences', 'Position', 'Negative', 'Neutral', 'Positive', 'Occurrences_tracker']
stats = ['mean', 'max', 'min', 'median', 'std']
g = train_emoji.groupby('PetID')[emoji_columns].agg(stats)
g.columns = [c + '_' + stat for c in emoji_columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
return train
def get_interactions(train):
interaction_features = ['Age', 'Quantity']
for (c1, c2) in combinations(interaction_features, 2):
train[c1 + '_mul_' + c2] = train[c1] * train[c2]
train[c1 + '_div_' + c2] = train[c1] / train[c2]
return train
def get_text_features(train):
train['Length_Description'] = train['Description'].map(len)
train['Length_annots_top_desc'] = train['annots_top_desc'].map(len)
train['Lengths_sentiment_text'] = train['sentiment_text'].map(len)
train['Lengths_sentiment_entities'] = train['sentiment_entities'].map(len)
return train
def get_name_features(train):
train['num_name_chars'] = train['Name'].apply(len)
train['num_name_capitals'] = train['Name'].apply(lambda x: sum(1 for c in x if c.isupper()))
train['name_caps_vs_length'] = train.apply(lambda row: row['num_name_capitals'] / (row['num_name_chars'] + 1e-5),
axis=1)
train['num_name_exclamation_marks'] = train['Name'].apply(lambda x: x.count('!'))
train['num_name_question_marks'] = train['Name'].apply(lambda x: x.count('?'))
train['num_name_punctuation'] = train['Name'].apply(lambda x: sum(x.count(w) for w in '.,;:'))
train['num_name_symbols'] = train['Name'].apply(lambda x: sum(x.count(w) for w in '*&$%'))
train['num_name_words'] = train['Name'].apply(lambda x: len(x.split()))
return train
class MetaDataParser(object):
def __init__(self):
# sentiment files
train_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_sentiment/*.json'))
test_sentiment_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_sentiment/*.json'))
sentiment_files = train_sentiment_files + test_sentiment_files
self.sentiment_files = pd.DataFrame(sentiment_files, columns=['sentiment_filename'])
self.sentiment_files['PetID'] = self.sentiment_files['sentiment_filename'].apply(
lambda x: x.split('/')[-1].split('.')[0])
# metadata files
train_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_metadata/*.json'))
test_metadata_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_metadata/*.json'))
metadata_files = train_metadata_files + test_metadata_files
self.metadata_files = pd.DataFrame(metadata_files, columns=['metadata_filename'])
self.metadata_files['PetID'] = self.metadata_files['metadata_filename'].apply(
lambda x: x.split('/')[-1].split('-')[0])
def open_json_file(self, filename):
with open(filename, 'r', encoding="utf-8") as f:
metadata_file = json.load(f)
return metadata_file
def get_stats(self, array, name):
stats = [np.mean, np.max, np.min, np.sum, np.var]
result = {}
if len(array):
for stat in stats:
result[name + '_' + stat.__name__] = stat(array)
else:
for stat in stats:
result[name + '_' + stat.__name__] = 0
return result
def parse_sentiment_file(self, file):
file_sentiment = file['documentSentiment']
file_entities = [x['name'] for x in file['entities']]
file_entities = ' '.join(file_entities)
file_sentences_text = [x['text']['content'] for x in file['sentences']]
file_sentences_text = ' '.join(file_sentences_text)
file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]
file_sentences_sentiment_sum = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').sum()
file_sentences_sentiment_sum = file_sentences_sentiment_sum.add_prefix('document_sum_').to_dict()
file_sentences_sentiment_mean = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').mean()
file_sentences_sentiment_mean = file_sentences_sentiment_mean.add_prefix('document_mean_').to_dict()
file_sentences_sentiment_var = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').sum()
file_sentences_sentiment_var = file_sentences_sentiment_var.add_prefix('document_var_').to_dict()
file_sentiment.update(file_sentences_sentiment_mean)
file_sentiment.update(file_sentences_sentiment_sum)
file_sentiment.update(file_sentences_sentiment_var)
file_sentiment.update({"sentiment_text": file_sentences_text})
file_sentiment.update({"sentiment_entities": file_entities})
return pd.Series(file_sentiment)
def parse_metadata(self, file):
file_keys = list(file.keys())
if 'labelAnnotations' in file_keys:
label_annotations = file['labelAnnotations']
file_top_score = [x['score'] for x in label_annotations]
pick_value = int(len(label_annotations) * 0.3)
if pick_value == 0: pick_value = 1
file_top_score_pick = [x['score'] for x in label_annotations[:pick_value]]
file_top_desc = [x['description'] for x in label_annotations]
file_top_desc_pick = [x['description'] for x in label_annotations[:pick_value]]
dog_cat_scores = []
dog_cat_topics = []
is_dog_or_cat = []
for label in label_annotations:
if label['description'] == 'dog' or label['description'] == 'cat':
dog_cat_scores.append(label['score'])
dog_cat_topics.append(label['topicality'])
is_dog_or_cat.append(1)
else:
is_dog_or_cat.append(0)
else:
file_top_score = []
file_top_desc = []
dog_cat_scores = []
dog_cat_topics = []
is_dog_or_cat = []
file_top_score_pick = []
file_top_desc_pick = []
if 'faceAnnotations' in file_keys:
file_face = file['faceAnnotations']
n_faces = len(file_face)
else:
n_faces = 0
if 'textAnnotations' in file_keys:
text_annotations = file['textAnnotations']
file_n_text_annotations = len(text_annotations)
file_len_text = [len(text['description']) for text in text_annotations]
else:
file_n_text_annotations = 0
file_len_text = []
file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']
file_crops = file['cropHintsAnnotation']['cropHints']
file_color_score = [x['score'] for x in file_colors]
file_color_pixelfrac = [x['pixelFraction'] for x in file_colors]
file_color_red = [x['color']['red'] if 'red' in x['color'].keys() else 0 for x in file_colors]
file_color_blue = [x['color']['blue'] if 'blue' in x['color'].keys() else 0 for x in file_colors]
file_color_green = [x['color']['green'] if 'green' in x['color'].keys() else 0 for x in file_colors]
file_crop_conf = np.mean([x['confidence'] for x in file_crops])
file_crop_x = np.mean([x['boundingPoly']['vertices'][1]['x'] for x in file_crops])
file_crop_y = np.mean([x['boundingPoly']['vertices'][3]['y'] for x in file_crops])
if 'importanceFraction' in file_crops[0].keys():
file_crop_importance = np.mean([x['importanceFraction'] for x in file_crops])
else:
file_crop_importance = 0
metadata = {
'annots_top_desc': ' '.join(file_top_desc),
'annots_top_desc_pick': ' '.join(file_top_desc_pick),
'annots_score_pick_mean': np.mean(file_top_score_pick),
'n_faces': n_faces,
'n_text_annotations': file_n_text_annotations,
'crop_conf': file_crop_conf,
'crop_x': file_crop_x,
'crop_y': file_crop_y,
'crop_importance': file_crop_importance,
}
metadata.update(self.get_stats(file_top_score, 'annots_score_normal'))
metadata.update(self.get_stats(file_color_score, 'color_score'))
metadata.update(self.get_stats(file_color_pixelfrac, 'color_pixel_score'))
metadata.update(self.get_stats(file_color_red, 'color_red_score'))
metadata.update(self.get_stats(file_color_blue, 'color_blue_score'))
metadata.update(self.get_stats(file_color_green, 'color_green_score'))
metadata.update(self.get_stats(dog_cat_scores, 'dog_cat_scores'))
metadata.update(self.get_stats(dog_cat_topics, 'dog_cat_topics'))
metadata.update(self.get_stats(is_dog_or_cat, 'is_dog_or_cat'))
metadata.update(self.get_stats(file_len_text, 'len_text'))
metadata.update({"color_red_score_first": file_color_red[0] if len(file_color_red) > 0 else -1})
metadata.update({"color_blue_score_first": file_color_blue[0] if len(file_color_blue) > 0 else -1})
metadata.update({"color_green_score_first": file_color_green[0] if len(file_color_green) > 0 else -1})
metadata.update({"color_pixel_score_first": file_color_pixelfrac[0] if len(file_color_pixelfrac) > 0 else -1})
metadata.update({"color_score_first": file_color_score[0] if len(file_color_score) > 0 else -1})
metadata.update({"label_score_first": file_top_score[0] if len(file_top_score) > 0 else -1})
return pd.Series(metadata)
def _transform(self, path, sentiment=True):
file = self.open_json_file(path)
if sentiment:
result = self.parse_sentiment_file(file)
else:
result = self.parse_metadata(file)
return result
def pretrained_w2v(train_text, model, name):
train_corpus = [text_to_word_sequence(text) for text in train_text]
result = []
for text in train_corpus:
n_skip = 0
vec = np.zeros(model.vector_size)
for n_w, word in enumerate(text):
if word in model: # 0.9906
vec = vec + model.wv[word]
continue
word_ = word.upper()
if word_ in model: # 0.9909
vec = vec + model.wv[word_]
continue
word_ = word.capitalize()
if word_ in model: # 0.9925
vec = vec + model.wv[word_]
continue
word_ = ps.stem(word)
if word_ in model: # 0.9927
vec = vec + model.wv[word_]
continue
word_ = lc.stem(word)
if word_ in model: # 0.9932
vec = vec + model.wv[word_]
continue
word_ = sb.stem(word)
if word_ in model: # 0.9933
vec = vec + model.wv[word_]
continue
else:
n_skip += 1
continue
vec = vec / (n_w - n_skip + 1)
result.append(vec)
w2v_cols = ["{}{}".format(name, i) for i in range(1, model.vector_size + 1)]
result = pd.DataFrame(result)
result.columns = w2v_cols
return result
def w2v_pymagnitude(train_text, model, name):
train_corpus = [text_to_word_sequence(text) for text in train_text]
result = []
for text in train_corpus:
vec = np.zeros(model.dim)
for n_w, word in enumerate(text):
if word in model: # 0.9906
vec = vec + model.query(word)
continue
word_ = word.upper()
if word_ in model: # 0.9909
vec = vec + model.query(word_)
continue
word_ = word.capitalize()
if word_ in model: # 0.9925
vec = vec + model.query(word_)
continue
word_ = ps.stem(word)
if word_ in model: # 0.9927
vec = vec + model.query(word_)
continue
word_ = lc.stem(word)
if word_ in model: # 0.9932
vec = vec + model.query(word_)
continue
word_ = sb.stem(word)
if word_ in model: # 0.9933
vec = vec + model.query(word_)
continue
vec = vec + model.query(word)
vec = vec / (n_w + 1)
result.append(vec)
w2v_cols = ["{}{}".format(name, i) for i in range(1, model.dim + 1)]
result = pd.DataFrame(result)
result.columns = w2v_cols
return result
def doc2vec(description_k, d2v_param):
corpus = [TaggedDocument(words=analyzer_k(text), tags=[i]) for i, text in enumerate(description_k)]
doc2vecs = Doc2Vec(
documents=corpus, dm=1,
**d2v_param
) # dm == 1 -> dmpv, dm != 1 -> DBoW
doc2vecs = np.array([doc2vecs.infer_vector(analyzer_k(text)) for text in description_k])
doc2vec_df = pd.DataFrame()
doc2vec_df['d2v_mean'] = np.mean(doc2vecs, axis=1)
doc2vec_df['d2v_sum'] = np.sum(doc2vecs, axis=1)
doc2vec_df['d2v_max'] = np.max(doc2vecs, axis=1)
doc2vec_df['d2v_min'] = np.min(doc2vecs, axis=1)
doc2vec_df['d2v_median'] = np.median(doc2vecs, axis=1)
doc2vec_df['d2v_var'] = np.var(doc2vecs, axis=1)
return doc2vec_df
def resize_to_square(im):
old_size = im.shape[:2] # old_size is in (height, width) format
ratio = float(img_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
# new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = img_size - new_size[1]
delta_h = img_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
return new_im
def load_image(path, preprocesssing):
image = cv2.imread(path)
new_image = resize_to_square(image)
new_image = preprocesssing(new_image)
return new_image
def get_age_feats(df):
df["Age_year"] = (df["Age"] / 12).astype(np.int32)
over_1year_flag = df["Age"] / 12 >= 1
df.loc[over_1year_flag, "over_1year"] = 1
df.loc[~over_1year_flag, "over_1year"] = 0
return df
def freq_encoding(df, freq_cols):
for c in freq_cols:
count_df = df.groupby([c])['PetID'].count().reset_index()
count_df.columns = [c, '{}_freq'.format(c)]
df = df.merge(count_df, how='left', on=c)
return df
def getSize(filename):
st = os.stat(filename)
return st.st_size
def getDimensions(filename):
img_size = Image.open(filename).size
return img_size
def is_zh(in_str):
"""
SJISに変換して文字数が減れば簡体字があるので中国語
"""
return (set(in_str) - set(in_str.encode('sjis', 'ignore').decode('sjis'))) != set([])
# ===============
# Model
# ===============
def get_score(y_true, y_pred):
return cohen_kappa_score(y_true, y_pred, weights='quadratic')
def get_y():
return pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv', usecols=[target]).values.flatten()
def run_model(X_train, y_train, X_valid, y_valid, X_test,
categorical_features,
predictors, maxvalue_dict, fold_id, params, model_name):
train = lgb.Dataset(X_train, y_train,
categorical_feature=categorical_features,
feature_name=predictors)
valid = lgb.Dataset(X_valid, y_valid,
categorical_feature=categorical_features,
feature_name=predictors)
evals_result = {}
model = lgb.train(
params,
train,
valid_sets=[valid],
valid_names=['valid'],
evals_result=evals_result,
**FIT_PARAMS
)
logger.info(f'Best Iteration: {model.best_iteration}')
# train score
y_pred_train = model.predict(X_train)
train_rmse = np.sqrt(mean_squared_error(y_train, y_pred_train))
# validation score
y_pred_valid = model.predict(X_valid)
valid_rmse = np.sqrt(mean_squared_error(y_valid, y_pred_valid))
y_pred_valid = rankdata(y_pred_valid) / len(y_pred_valid)
# save model
model.save_model(f'{model_name}_fold{fold_id}.txt')
# predict test
y_pred_test = model.predict(X_test)
y_pred_test = rankdata(y_pred_test) / len(y_pred_test)
# save predictions
np.save(f'{model_name}_train_fold{fold_id}.npy', y_pred_valid)
np.save(f'{model_name}_test_fold{fold_id}.npy', y_pred_test)
return y_pred_valid, y_pred_test, train_rmse, valid_rmse
def run_xgb_model(X_train, y_train, X_valid, y_valid, X_test,
predictors, maxvalue_dict, fold_id, params, model_name):
d_train = xgb.DMatrix(data=X_train, label=y_train, feature_names=predictors)
d_valid = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=predictors)
watchlist = [(d_train, 'train'), (d_valid, 'valid')]
model = xgb.train(dtrain=d_train, evals=watchlist, params=params, **FIT_PARAMS)
# train score
y_pred_train = model.predict(d_train, ntree_limit=model.best_ntree_limit)
train_rmse = np.sqrt(mean_squared_error(y_train, y_pred_train))
# validation score
y_pred_valid = model.predict(d_valid, ntree_limit=model.best_ntree_limit)
valid_rmse = np.sqrt(mean_squared_error(y_valid, y_pred_valid))
y_pred_valid = rankdata(y_pred_valid) / len(y_pred_valid)
# save model
model.save_model(f'{model_name}_fold{fold_id}.txt')
# predict test
y_pred_test = model.predict(xgb.DMatrix(data=X_test, feature_names=predictors), ntree_limit=model.best_ntree_limit)
y_pred_test = rankdata(y_pred_test) / len(y_pred_test)
# save predictions
np.save(f'{model_name}_train_fold{fold_id}.npy', y_pred_valid)
np.save(f'{model_name}_test_fold{fold_id}.npy', y_pred_test)
return y_pred_valid, y_pred_test, train_rmse, valid_rmse
def plot_mean_feature_importances(feature_importances, max_num=50, importance_type='gain', path=None):
mean_gain = feature_importances[[importance_type, 'feature']].groupby('feature').mean()
feature_importances['mean_' + importance_type] = feature_importances['feature'].map(mean_gain[importance_type])
if path is not None:
data = feature_importances.sort_values('mean_' + importance_type, ascending=False).iloc[:max_num, :]
plt.clf()
plt.figure(figsize=(16, 8))
sns.barplot(x=importance_type, y='feature', data=data)
plt.tight_layout()
plt.savefig(path)
return feature_importances
def to_bins(x, borders):
for i in range(len(borders)):
if x <= borders[i]:
return i
return len(borders)
class OptimizedRounder(object):
def __init__(self):
self.coef_ = 0
def _loss(self, coef, X, y, idx):
X_p = np.array([to_bins(pred, coef) for pred in X])
ll = -get_score(y, X_p)
return ll
def fit(self, X, y):
coef = [0.2, 0.4, 0.6, 0.8]
golden1 = 0.618
golden2 = 1 - golden1
ab_start = [(0.01, 0.3), (0.15, 0.56), (0.35, 0.75), (0.6, 0.9)]
for it1 in range(10):
for idx in range(4):
# golden section search
a, b = ab_start[idx]
# calc losses
coef[idx] = a
la = self._loss(coef, X, y, idx)
coef[idx] = b
lb = self._loss(coef, X, y, idx)
for it in range(20):
# choose value
if la > lb:
a = b - (b - a) * golden1
coef[idx] = a
la = self._loss(coef, X, y, idx)
else:
b = b - (b - a) * golden2
coef[idx] = b
lb = self._loss(coef, X, y, idx)
self.coef_ = {'x': coef}
def predict(self, X, coef):
X_p = np.array([to_bins(pred, coef) for pred in X])
return X_p
def coefficients(self):
return self.coef_['x']
class StratifiedGroupKFold():
def __init__(self, n_splits=5):
self.n_splits = n_splits
def split(self, X, y=None, groups=None):
fold = pd.DataFrame([X, y, groups]).T
fold.columns = ['X', 'y', 'groups']
fold['y'] = fold['y'].astype(int)
g = fold.groupby('groups')['y'].agg('mean').reset_index()
fold = fold.merge(g, how='left', on='groups', suffixes=('', '_mean'))
fold['y_mean'] = fold['y_mean'].apply(np.round)
fold['fold_id'] = 0
for unique_y in fold['y_mean'].unique():
mask = fold.y_mean == unique_y
selected = fold[mask].reset_index(drop=True)
cv = GroupKFold(n_splits=n_splits)
for i, (train_index, valid_index) in enumerate(
cv.split(range(len(selected)), y=None, groups=selected['groups'])):
selected.loc[valid_index, 'fold_id'] = i
fold.loc[mask, 'fold_id'] = selected['fold_id'].values
for i in range(self.n_splits):
indices = np.arange(len(fold))
train_index = indices[fold['fold_id'] != i]
valid_index = indices[fold['fold_id'] == i]
yield train_index, valid_index
if __name__ == '__main__':
init_logger()
t_cols, k_cols, g_cols = [], [], []
# load
train = pd.read_csv('../input/petfinder-adoption-prediction/train/train.csv')
test = pd.read_csv('../input/petfinder-adoption-prediction/test/test.csv')
train = pd.concat([train, test], sort=True)
train[['Description', 'Name']] = train[['Description', 'Name']].astype(str)
train["Description_Emb"] = [analyzer_embed(text) for text in train["Description"]]
train["Description_bow"] = [analyzer_bow(text) for text in train["Description"]]
train['fix_Breed1'] = train['Breed1']
train['fix_Breed2'] = train['Breed2']
train.loc[train['Breed1'] == 0, 'fix_Breed1'] = train[train['Breed1'] == 0]['Breed2']
train.loc[train['Breed1'] == 0, 'fix_Breed2'] = train[train['Breed1'] == 0]['Breed1']
train['Breed1_equals_Breed2'] = (train['Breed1'] == train['Breed2']).astype(int)
train['single_Breed'] = (train['Breed1'] * train['Breed2'] == 0).astype(int)
train.drop(["Breed1", "Breed2"], axis=1)
train.rename(columns={"fix_Breed1": "Breed1", "fix_Breed2": "Breed2"})
logger.info(f'DataFrame shape: {train.shape}')
with timer('common features'):
with timer('merge additional state files'):
train = merge_state_info(train)
common_cols = list(train.columns)
with timer('merge additional breed rating files'):
orig_cols = list(train.columns)
train = merge_breed_name_sub(train)
t_cols += [c for c in train.columns if c not in orig_cols]
k_cols += [c for c in train.columns if c not in orig_cols]
orig_cols = list(train.columns)
train = merge_breed_name(train)
g_cols += [c for c in train.columns if c not in orig_cols and "_main_breed_all" in c] + [
"Type_second_breed"]
with timer('preprocess category features'):
train = to_category(train, cat=categorical_features)
train[text_features].fillna('missing', inplace=True)
with timer('preprocess metadata'): # 使ってるcolsがkaeruさんとtakuokoで違う kaeruさんがfirst系は全部使うが、takuokoは使わない
# TODO: parallelization
meta_parser = MetaDataParser()
sentiment_features = meta_parser.sentiment_files['sentiment_filename'].apply(
lambda x: meta_parser._transform(x, sentiment=True))
meta_parser.sentiment_files = pd.concat([meta_parser.sentiment_files, sentiment_features], axis=1,
sort=False)
meta_features = meta_parser.metadata_files['metadata_filename'].apply(
lambda x: meta_parser._transform(x, sentiment=False))
meta_parser.metadata_files = pd.concat([meta_parser.metadata_files, meta_features], axis=1, sort=False)
stats = ['mean']
columns = [c for c in sentiment_features.columns if c not in ['sentiment_text', 'sentiment_entities']]
g = meta_parser.sentiment_files[list(sentiment_features.columns) + ['PetID']].groupby('PetID').agg(stats)
g.columns = [c + '_' + stat for c in columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
k_cols += [c for c in g.columns if re.match("\w*_mean_\w*mean", c)] + ["magnitude_mean", "score_mean"]
t_cols += [c for c in g.columns if re.match("\w*_sum_\w*mean", c)] + ["magnitude_mean", "score_mean"]
g_cols += list(g.columns)
stats = ['mean', 'min', 'max', 'median', 'var', 'sum', 'first']
columns = [c for c in meta_features.columns if c not in ['annots_top_desc', 'annots_top_desc_pick']]
g = meta_parser.metadata_files[columns + ['PetID']].groupby('PetID').agg(stats)
g.columns = [c + '_' + stat for c in columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
k_cols += [c for c in g.columns if
("mean_mean" in c or "mean_sum" in c or "first_first" in c) and "annots_score_normal" not in c] + \
['crop_conf_first', 'crop_x_first', 'crop_y_first', 'crop_importance_first', 'crop_conf_mean',
'crop_conf_sum', 'crop_importance_mean', 'crop_importance_sum']
t_cols += [c for c in g.columns if ((re.match("\w*_sum_\w*(?<!sum)$", c) and "first" not in c) \
or (
"sum" not in c and "first" not in c)) and "annots_score_pick" not in c]
g_cols += [c for c in g.columns if
"mean_mean" in c or "mean_sum" in c or "mean_var" in c and "annots_score_pick" not in c] + \
['crop_conf_mean', 'crop_conf_sum', 'crop_conf_var', 'crop_importance_mean',
'crop_importance_sum', 'crop_importance_var']
with timer('preprocess metatext'):
meta_features = meta_parser.metadata_files[['PetID', 'annots_top_desc', 'annots_top_desc_pick']]
meta_features_all = meta_features.groupby('PetID')['annots_top_desc'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(meta_features_all, how='left', on='PetID')
meta_features_pick = meta_features.groupby('PetID')['annots_top_desc_pick'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(meta_features_pick, how='left', on='PetID')
sentiment_features = meta_parser.sentiment_files[['PetID', 'sentiment_text', 'sentiment_entities']]
sentiment_features_txt = sentiment_features.groupby('PetID')['sentiment_text'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(sentiment_features_txt, how='left', on='PetID')
sentiment_features_entities = sentiment_features.groupby('PetID')['sentiment_entities'].apply(
lambda x: " ".join(x)).reset_index()
train = train.merge(sentiment_features_entities, how='left', on='PetID')
train[meta_text] = train[meta_text].astype(str)
train[meta_text].fillna("missing", inplace=True)
del meta_features_all, meta_features_pick, meta_features, sentiment_features;
gc.collect()
with timer('make image features'):
train_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/train_images/*.jpg'))
test_image_files = sorted(glob.glob('../input/petfinder-adoption-prediction/test_images/*.jpg'))
image_files = train_image_files + test_image_files
train_images = pd.DataFrame(image_files, columns=['image_filename'])
train_images['PetID'] = train_images['image_filename'].apply(lambda x: x.split('/')[-1].split('-')[0])
with timer('breed mismatch features'):
train = breed_mismatch(train)
train = breed_mismatch_desc(train)
train = breed_mismatch_meta(train)
t_cols += ['breeds_mismatch', 'desc_contain_dog', 'desc_contain_cat', 'desc_miss_match',
'annot_contain_dog', 'annot_contain_cat', 'annot_miss_match']
k_cols += ['breeds_mismatch', 'desc_miss_match', 'annot_miss_match']
with timer('preprocess densenet'):
if debug:
import feather
X = feather.read_dataframe("feature/dense121_2_X.feather")
gp_img = X.groupby("PetID").mean().reset_index()
train = pd.merge(train, gp_img, how="left", on="PetID")
gp_dense_first = X.groupby("PetID").first().reset_index()
t_cols += list(gp_img.drop("PetID", axis=1).columns)
del gp_img;
gc.collect()
else:
pet_ids = train_images['PetID'].values
img_pathes = train_images['image_filename'].values
n_batches = len(pet_ids) // batch_size + 1
inp = Input((256, 256, 3))
backbone = DenseNet121(input_tensor=inp,
weights='../input/densenet121weights/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',
include_top=False)
x = backbone.output
x = GlobalAveragePooling2D()(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
x = AveragePooling1D(4)(x)
out = Lambda(lambda x: x[:, :, 0])(x)
m = Model(inp, out)
features = []
for b in range(n_batches):
start = b * batch_size
end = (b + 1) * batch_size
batch_pets = pet_ids[start: end]
batch_path = img_pathes[start: end]
batch_images = np.zeros((len(batch_pets), img_size, img_size, 3))
for i, (pet_id, path) in enumerate(zip(batch_pets, batch_path)):
try:
batch_images[i] = load_image(path, preprocess_input_dense)
except:
try:
batch_images[i] = load_image(path, preprocess_input_dense)
except:
pass
batch_preds = m.predict(batch_images)
for i, pet_id in enumerate(batch_pets):
features.append([pet_id] + list(batch_preds[i]))
X = pd.DataFrame(features,
columns=["PetID"] + ["dense121_2_{}".format(i) for i in range(batch_preds.shape[1])])
gp_img = X.groupby("PetID").mean().reset_index()
train = pd.merge(train, gp_img, how="left", on="PetID")
gp_dense_first = X.groupby("PetID").first().reset_index()
t_cols += list(gp_img.drop("PetID", axis=1).columns)
del m, gp_img;
gc.collect();
K.clear_session()
if T_flag:
with timer('takuoko features'):
orig_cols = train.columns
with timer('merge emoji files'):
train = merge_emoji(train)
with timer('preprocess breed files'):
train = merge_breed_ranking(train)
with timer('preprocess and simple features'):
train = get_interactions(train)
with timer('tfidf + svd / nmf / bm25'):
vectorizer = make_pipeline(
TfidfVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['tfidf_svd_{}'.format(i) for i in range(n_components)]
+ ['tfidf_nmf_{}'.format(i) for i in range(n_components)]
+ ['tfidf_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('count + svd / nmf / bm25'):
vectorizer = make_pipeline(
CountVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['count_svd_{}'.format(i) for i in range(n_components)]
+ ['count_nmf_{}'.format(i) for i in range(n_components)]
+ ['count_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('tfidf2 + svd / nmf / bm25'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=20000,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['tfidf2_svd_{}'.format(i) for i in range(n_components)]
+ ['tfidf2_nmf_{}'.format(i) for i in range(n_components)]
+ ['tfidf2_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('count2 + svd / nmf / bm25'):
vectorizer = make_pipeline(
CountVectorizer(min_df=2, max_features=20000,
strip_accents='unicode', analyzer='word', token_pattern=r'\w{1,}',
ngram_range=(1, 3), stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['count2_svd_{}'.format(i) for i in range(n_components)]
+ ['count2_nmf_{}'.format(i) for i in range(n_components)]
+ ['count2_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('tfidf3 + svd / nmf / bm25'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=30, max_features=50000, binary=True,
strip_accents='unicode', analyzer='char', token_pattern=r'\w{1,}',
ngram_range=(3, 3), use_idf=1, smooth_idf=1, sublinear_tf=1, stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['tfidf3_svd_{}'.format(i) for i in range(n_components)]
+ ['tfidf3_nmf_{}'.format(i) for i in range(n_components)]
+ ['tfidf3_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('count3 + svd / nmf / bm25'):
vectorizer = make_pipeline(
CountVectorizer(min_df=30, max_features=50000, binary=True,
strip_accents='unicode', analyzer='char', token_pattern=r'\w{1,}',
ngram_range=(3, 3), stop_words='english'),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['Description_bow'])
X = pd.DataFrame(X, columns=['count3_svd_{}'.format(i) for i in range(n_components)]
+ ['count3_nmf_{}'.format(i) for i in range(n_components)]
+ ['count3_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('meta text bow/tfidf->svd / nmf / bm25'):
train['desc'] = ''
for c in ['BreedName_main_breed', 'BreedName_second_breed', 'annots_top_desc', 'sentiment_text']:
train['desc'] += ' ' + train[c].astype(str)
train["desc_bow"] = [analyzer_bow(text) for text in train["desc"]]
vectorizer = make_pipeline(
TfidfVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['desc_bow'])
X = pd.DataFrame(X, columns=['meta_desc_tfidf_svd_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_tfidf_nmf_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_tfidf_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
vectorizer = make_pipeline(
CountVectorizer(),
make_union(
TruncatedSVD(n_components=n_components, random_state=seed),
NMF(n_components=n_components, random_state=seed),
make_pipeline(
BM25Transformer(use_idf=True, k1=2.0, b=0.75),
TruncatedSVD(n_components=n_components, random_state=seed)
),
n_jobs=1,
),
)
X = vectorizer.fit_transform(train['desc_bow'])
X = pd.DataFrame(X, columns=['meta_desc_count_svd_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_count_nmf_{}'.format(i) for i in range(n_components)]
+ ['meta_desc_count_bm25_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
train.drop(['desc_bow', 'desc'], axis=1, inplace=True)
with timer('description fasttext'):
embedding = '../input/quora-embedding/GoogleNews-vectors-negative300.bin'
model = KeyedVectors.load_word2vec_format(embedding, binary=True)
X = pretrained_w2v(train["Description_Emb"], model, name="gnvec")
train = pd.concat([train, X], axis=1)
del model;
gc.collect()
with timer('description glove'):
embedding = "../input/pymagnitude-data/glove.840B.300d.magnitude"
model = Magnitude(embedding)
X = w2v_pymagnitude(train["Description_Emb"], model, name="glove_mag")
train = pd.concat([train, X], axis=1)
del model;
gc.collect()
with timer('image features'):
train['num_images'] = train['PetID'].apply(lambda x: sum(train_images.PetID == x))
train['num_images_per_pet'] = train['num_images'] / train['Quantity']
with timer('make inception resnet features'):
if debug:
import feather
X = feather.read_dataframe("feature/inception_resnet.feather")
train = pd.concat((train, X), axis=1)
else:
pet_ids = train_images['PetID'].values
img_pathes = train_images['image_filename'].values
n_batches = len(pet_ids) // batch_size + 1
inp = Input((256, 256, 3))
backbone = InceptionResNetV2(input_tensor=inp,
weights='../input/inceptionresnetv2/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5',
include_top=False)
x = backbone.output
x = GlobalAveragePooling2D()(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
x = AveragePooling1D(4)(x)
out = Lambda(lambda x: x[:, :, 0])(x)
m = Model(inp, out)
features = []
for b in range(n_batches):
start = b * batch_size
end = (b + 1) * batch_size
batch_pets = pet_ids[start: end]
batch_path = img_pathes[start: end]
batch_images = np.zeros((len(batch_pets), img_size, img_size, 3))
for i, (pet_id, path) in enumerate(zip(batch_pets, batch_path)):
try:
batch_images[i] = load_image(path, preprocess_input_incep)
except:
try:
batch_images[i] = load_image(path, preprocess_input_incep)
except:
pass
batch_preds = m.predict(batch_images)
for i, pet_id in enumerate(batch_pets):
features.append([pet_id] + list(batch_preds[i]))
X = pd.DataFrame(features, columns=["PetID"] + ["inception_resnet_{}".format(i) for i in
range(batch_preds.shape[1])])
gp_img = X.groupby("PetID").mean().reset_index()
train = pd.merge(train, gp_img, how="left", on="PetID")
del m, gp_img;
gc.collect();
K.clear_session()
with timer('aggregation'):
stats = ['mean', 'sum', 'median', 'min', 'max', 'var']
groupby_dict = [
{
'key': ['Name'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID', 'State'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID', 'Type'],
'var': ['Age'],
'agg': ['count']
},
{
'key': ['RescuerID'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['RescuerID', 'State'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['RescuerID', 'Type'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['Type', 'Breed1', 'Breed2'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['Type', 'Breed1'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['State'],
'var': ['Age', 'Quantity', 'MaturitySize', 'Sterilized', 'Fee'],
'agg': stats
},
{
'key': ['MaturitySize'],
'var': ['Age', 'Quantity', 'Sterilized', 'Fee'],
'agg': stats
},
]
nunique_dict = [
{
'key': ['State'],
'var': ['RescuerID'],
'agg': ['nunique']
},
{
'key': ['Dewormed'],
'var': ['RescuerID'],
'agg': ['nunique']
},
{
'key': ['Type'],
'var': ['RescuerID'],
'agg': ['nunique']
},
{
'key': ['Type', 'Breed1'],
'var': ['RescuerID'],
'agg': ['nunique']
},
]
groupby = GroupbyTransformer(param_dict=nunique_dict)
train = groupby.transform(train)
groupby = GroupbyTransformer(param_dict=groupby_dict)
train = groupby.transform(train)
diff = DiffGroupbyTransformer(param_dict=groupby_dict)
train = diff.transform(train)
ratio = RatioGroupbyTransformer(param_dict=groupby_dict)
train = ratio.transform(train)
with timer('category embedding'):
train[['BreedName_main_breed', 'BreedName_second_breed']] = \
train[['BreedName_main_breed', 'BreedName_second_breed']].astype("int32")
for c in categorical_features:
train[c] = train[c].fillna(train[c].max() + 1)
cv = CategoryVectorizer(categorical_features, n_components,
vectorizer=CountVectorizer(),
transformer=LatentDirichletAllocation(n_components=n_components, n_jobs=-1,
learning_method='online',
random_state=777),
name='CountLDA')
features1 = cv.transform(train).astype(np.float32)
cv = CategoryVectorizer(categorical_features, n_components,
vectorizer=CountVectorizer(),
transformer=TruncatedSVD(n_components=n_components, random_state=777),
name='CountSVD')
features2 = cv.transform(train).astype(np.float32)
train = pd.concat([train, features1, features2], axis=1)
t_cols += [c for c in train.columns if c not in orig_cols]
if K_flag or G_flag:
with timer('kaeru and gege features'):
with timer('text stats features'):
train = get_text_features(train)
k_cols += ['Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_text']
g_cols += ['Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_entities']
if K_flag:
with timer('kaeru features'):
orig_cols = train.columns
with timer('enginerring age'):
train = get_age_feats(train)
with timer('frequency encoding'):
freq_cols = ['BreedName_main_breed', 'BreedName_second_breed']
train = freq_encoding(train, freq_cols)
with timer('kanji feature'):
train['in_kanji'] = train.Description.apply(lambda x: is_zh(x))
with timer('tfidf + svd / nmf'):
vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)
X = vectorizer.fit_transform(train['Description'])
X = pd.DataFrame(X, columns=['tfidf_k_svd_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('description doc2vec'):
d2v_param = {
"features_num": 300,
"min_word_count": 10,
"context": 5,
"downsampling": 1e-3,
"epoch_num": 10
}
X = doc2vec(train["Description"], d2v_param)
train = pd.concat([train, X], axis=1)
with timer('annots_top_desc + svd / nmf'):
vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)
X = vectorizer.fit_transform(train['annots_top_desc_pick'])
X = pd.DataFrame(X, columns=['annots_top_desc_k_svd_{}'.format(i) for i in range(n_components)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('densenet features'):
vectorizer = TruncatedSVD(n_components=n_components, random_state=kaeru_seed)
X = vectorizer.fit_transform(gp_dense_first.drop(['PetID'], axis=1))
X = pd.DataFrame(X, columns=['densenet121_svd_{}'.format(i) for i in range(n_components)])
X["PetID"] = gp_dense_first["PetID"]
train = pd.merge(train, X, how="left", on="PetID")
del vectorizer;
gc.collect()
with timer('aggregation'):
stats = ['mean', 'sum', 'min', 'max']
var = ['Age_k', 'MaturitySize_k', 'FurLength_k', 'Fee_k', 'Health_k']
for c in ['Age', 'MaturitySize', 'FurLength', 'Fee', 'Health']:
train[c + "_k"] = train[c]
groupby_dict = [
{
'key': ['RescuerID'],
'var': ['Age_k'],
'agg': ['count']
},
{
'key': ['RescuerID'],
'var': ['Age_k', 'Length_Description', 'Length_annots_top_desc', 'Lengths_sentiment_text'],
'agg': stats + ["var"]
},
{
'key': ['RescuerID'],
'var': ['MaturitySize_k', 'FurLength_k', 'Fee_k', 'Health_k'],
'agg': stats
}
]
groupby = GroupbyTransformer(param_dict=groupby_dict)
train = groupby.transform(train)
train.drop(var, axis=1, inplace=True)
k_cols += [c for c in train.columns if c not in orig_cols if c not in kaeru_drop_cols]
if G_flag:
with timer('gege features'):
orig_cols = train.columns
with timer('densenet features'):
vectorizer = TruncatedSVD(n_components=n_components_gege_img, random_state=kaeru_seed)
X = vectorizer.fit_transform(gp_dense_first.drop(['PetID'], axis=1))
X = pd.DataFrame(X, columns=['densenet121_g_svd_{}'.format(i) for i in range(n_components_gege_img)])
X["PetID"] = gp_dense_first["PetID"]
train = pd.merge(train, X, how="left", on="PetID")
del vectorizer, gp_dense_first;
gc.collect()
with timer('frequency encoding'):
freq_cols = ['RescuerID', 'Breed1', 'Breed2', 'Color1', 'Color2', 'Color3', 'State']
train = freq_encoding(train, freq_cols)
with timer('tfidf + svd'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),
TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)
)
X = vectorizer.fit_transform(train['Description'])
X = pd.DataFrame(X, columns=['tfidf_g_svd_{}'.format(i) for i in range(n_components_gege_txt)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('annots tfidf + svd'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),
TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)
)
X = vectorizer.fit_transform(train['annots_top_desc'])
X = pd.DataFrame(X, columns=['annots_top_desc_tfidf_g_svd_{}'.format(i) for i in
range(n_components_gege_txt)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('sentiment entities tfidf + svd'):
vectorizer = make_pipeline(
TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1),
TruncatedSVD(n_components=n_components_gege_txt, random_state=kaeru_seed)
)
X = vectorizer.fit_transform(train['sentiment_entities'])
X = pd.DataFrame(X, columns=['sentiment_entities_tfidf_g_svd_{}'.format(i) for i in
range(n_components_gege_txt)])
train = pd.concat([train, X], axis=1)
del vectorizer;
gc.collect()
with timer('image basic features'):
train_images['image_size'] = train_images['image_filename'].apply(getSize)
train_images['temp_size'] = train_images['image_filename'].apply(getDimensions)
train_images['width'] = train_images['temp_size'].apply(lambda x: x[0])
train_images['height'] = train_images['temp_size'].apply(lambda x: x[1])
train_images = train_images.drop(['temp_size'], axis=1)
aggs = {
'image_size': ['sum', 'mean', 'var'],
'width': ['sum', 'mean', 'var'],
'height': ['sum', 'mean', 'var'],
}
gp = train_images.groupby('PetID').agg(aggs)
new_columns = [k + '_' + agg for k in aggs.keys() for agg in aggs[k]]
gp.columns = new_columns
train = train.merge(gp.reset_index(), how="left", on="PetID")
g_cols += [c for c in train.columns if c not in orig_cols]
dtype_cols = ['BreedName_main_breed', 'BreedName_second_breed', 'BreedName_main_breed_all']
train[dtype_cols] = train[dtype_cols].astype("int32")
logger.info(train.head())
train.to_feather("all_data.feather")
np.save("common_cols.npy", np.array(common_cols))
np.save("t_cols.npy", np.array(t_cols))
np.save("k_cols.npy", np.array(k_cols))
np.save("g_cols.npy", np.array(g_cols))
if T_flag:
with timer('takuoko feature info'):
categorical_features_t = list(set(categorical_features) - set(remove))
predictors = list(set(common_cols + t_cols + categorical_features_t) - set([target] + remove))
predictors = [c for c in predictors if c in use_cols]
categorical_features_t = [c for c in categorical_features_t if c in predictors]
logger.info(f'predictors / use_cols = {len(predictors)} / {len(use_cols)}')
train = train.loc[:, ~train.columns.duplicated()]
X = train.loc[:, predictors]
y = train.loc[:, target]
rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]
X_test = X[len_train:]
X = X[:len_train]
y = y[:len_train]
X.to_feather("X_train_t.feather")
X_test.reset_index(drop=True).to_feather("X_test_t.feather")
with timer('takuoko modeling'):
y_pred_t = np.empty(len_train, )
y_test_t = []
train_losses, valid_losses = [], []
# cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1337)
# for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y)):
# cv = GroupKFold(n_splits=n_splits)
# for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=None, groups=rescuer_id)):
cv = StratifiedGroupKFold(n_splits=n_splits)
for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):
X_train = X.loc[train_index, :]
X_valid = X.loc[valid_index, :]
y_train = y[train_index]
y_valid = y[valid_index]
pred_val, pred_test, train_rmse, valid_rmse = run_model(X_train, y_train, X_valid, y_valid, X_test,
categorical_features_t, predictors,
maxvalue_dict, fold_id, MODEL_PARAMS,
MODEL_NAME + "_t")
y_pred_t[valid_index] = pred_val
y_test_t.append(pred_test)
train_losses.append(train_rmse)
valid_losses.append(valid_rmse)
y_test_t = np.mean(y_test_t, axis=0)
logger.info(f'train RMSE = {np.mean(train_losses)}')
logger.info(f'valid RMSE = {np.mean(valid_losses)}')
np.save("y_test_t.npy", y_test_t)
np.save("y_oof_t.npy", y_pred_t)
if K_flag:
with timer('kaeru feature info'):
kaeru_cat_cols = None
predictors = list(set(common_cols + k_cols) - set([target] + remove + kaeru_drop_cols))
X = train.loc[:, predictors]
y = train.loc[:, target]
rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]
X_test = X[len_train:]
X = X[:len_train]
y = y[:len_train]
X.to_feather("X_train_k.feather")
X_test.reset_index(drop=True).to_feather("X_test_k.feather")
with timer('kaeru modeling'):
y_pred_k = np.empty(len_train, )
y_test_k = []
train_losses, valid_losses = [], []
# cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1337)
# for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y)):
cv = StratifiedGroupKFold(n_splits=n_splits)
for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):
X_train = X.loc[train_index, :]
X_valid = X.loc[valid_index, :]
y_train = y[train_index]
y_valid = y[valid_index]
pred_val, pred_test, train_rmse, valid_rmse = run_model(X_train, y_train, X_valid, y_valid, X_test,
kaeru_cat_cols, predictors, maxvalue_dict,
fold_id, KAERU_PARAMS, MODEL_NAME + "_k")
y_pred_k[valid_index] = pred_val
y_test_k.append(pred_test)
train_losses.append(train_rmse)
valid_losses.append(valid_rmse)
y_test_k = np.mean(y_test_k, axis=0)
logger.info(f'train RMSE = {np.mean(train_losses)}')
logger.info(f'valid RMSE = {np.mean(valid_losses)}')
np.save("y_test_k.npy", y_test_k)
np.save("y_oof_k.npy", y_pred_k)
if G_flag:
with timer('gege feature info'):
predictors = list(set(common_cols + g_cols) - set([target] + remove + gege_drop_cols))
categorical_features_g = [c for c in categorical_features if c in predictors]
X = train.loc[:, predictors]
y = train.loc[:, target]
rescuer_id = train.loc[:, 'RescuerID'].iloc[:len_train]
X_test = X[len_train:]
X = X[:len_train]
y = y[:len_train]
X.to_feather("X_train_g.feather")
X_test.reset_index(drop=True).to_feather("X_test_g.feather")
with timer('gege adversarial validation'):
train_idx = range(0, len_train)
X_adv = train.loc[:, predictors]
y_adv = np.array([0 for i in range(len(X))] + [1 for i in range(len(X_test))])
X_adv_tr, X_adv_tst, y_adv_tr, y_adv_tst = train_test_split(X_adv, y_adv, test_size=0.20, shuffle=True,
random_state=42)
lgtrain = lgb.Dataset(X_adv_tr, y_adv_tr,
categorical_feature=categorical_features_g,
feature_name=predictors)
lgvalid = lgb.Dataset(X_adv_tst, y_adv_tst,
categorical_feature=categorical_features_g,
feature_name=predictors)
lgb_adv = lgb.train(
ADV_PARAMS,
lgtrain,
num_boost_round=20000,
valid_sets=[lgtrain, lgvalid],
valid_names=['train', 'valid'],
early_stopping_rounds=500,
verbose_eval=20000
)
train_preds = lgb_adv.predict(X_adv.iloc[train_idx])
extract_idx = np.argsort(-train_preds)[:int(len(train_idx) * 0.85)]
del X_adv_tr, X_adv_tst, y_adv_tr, y_adv_tst, X_adv, y_adv, lgb_adv;
gc.collect()
with timer('gege modeling'):
X = X.iloc[extract_idx].reset_index(drop=True)
y = y[extract_idx].reset_index(drop=True)
rescuer_id = rescuer_id[extract_idx].reset_index(drop=True)
y_pred_g = np.empty(len(extract_idx), )
y_test_g = []
train_losses, valid_losses = [], []
cv = StratifiedGroupKFold(n_splits=n_splits)
for fold_id, (train_index, valid_index) in enumerate(cv.split(range(len(X)), y=y, groups=rescuer_id)):
X_train = X.loc[train_index, :]
X_valid = X.loc[valid_index, :]
y_train = y[train_index]
y_valid = y[valid_index]
pred_val, pred_test, train_rmse, valid_rmse = run_xgb_model(X_train, y_train,
X_valid, y_valid, X_test, predictors,
maxvalue_dict,
fold_id, MODEL_PARAMS_XGB,
MODEL_NAME + "_g")
y_pred_g[valid_index] = pred_val
y_test_g.append(pred_test)
train_losses.append(train_rmse)
valid_losses.append(valid_rmse)
y_test_g = np.mean(y_test_g, axis=0)
logger.info(f'train RMSE = {np.mean(train_losses)}')
logger.info(f'valid RMSE = {np.mean(valid_losses)}')
np.save("y_test_g.npy", y_test_g)
np.save("y_oof_g.npy", y_pred_g)
np.save("extract_idx.npy", extract_idx)
if T_flag and K_flag and G_flag:
y_pred = (y_pred_t[extract_idx] + y_pred_k[extract_idx] + y_pred_g) / 3
y_test = (y_test_t + y_test_k + y_test_g) / 3
elif T_flag and K_flag:
y_pred = y_pred_t * 0.5 + y_pred_k * 0.5
y_test = y_test_t * 0.5 + y_test_k * 0.5
elif T_flag and G_flag:
y_pred = y_pred_t[extract_idx] * 0.5 + y_pred_g * 0.5
y_test = y_test_t * 0.5 + y_test_g * 0.5
elif G_flag and K_flag:
y_pred = y_pred_g * 0.5 + y_pred_k[extract_idx] * 0.5
y_test = y_test_g * 0.5 + y_test_k * 0.5
elif T_flag:
y_pred = y_pred_t
y_test = y_test_t
elif K_flag:
y_pred = y_pred_k
y_test = y_test_k
elif G_flag:
y_pred = y_pred_g
y_test = y_test_g
with timer('optimize threshold'):
optR = OptimizedRounder()
optR.fit(y_pred, y)
coefficients = optR.coefficients()
y_pred = optR.predict(y_pred, coefficients)
score = get_score(y, y_pred)
logger.info(f'Coefficients = {coefficients}')
logger.info(f'QWK = {score}')
y_test = optR.predict(y_test, coefficients).astype(int)
with timer('postprocess'):
submission_with_postprocess(y_test)
| 45.611088 | 151 | 0.557434 | [
"MIT"
] | okotaku/pet_finder | code/exp/v18.py | 110,820 | Python |
class NeuralNet():
def __init__(self, game):
pass
def train(self, examples):
"""
This function trains the neural network with examples obtained from
self-play.
Input:
examples: a list of training examples, where each example is of form
(board, pi, v). pi is the MCTS informed policy vector for
the given board, and v is its value. The examples has
board in its canonical form.
"""
pass
def predict(self, board):
"""
Input:
board: current board in its canonical form.
Returns:
pi: a policy vector for the current board- a numpy array of length
game.getActionSize
v: a float in [-1,1] that gives the value of the current board
"""
pass
def save_checkpoint(self, folder, filename):
"""
Saves the current neural network (with its parameters) in
folder/filename
"""
pass
def load_checkpoint(self, folder, filename):
"""
Loads parameters of the neural network from folder/filename
"""
pass | 30.15 | 80 | 0.554726 | [
"Apache-2.0"
] | MaxU11/playground | pommerman/NN/neural_net.py | 1,206 | Python |
"""
File name: utils
Author: rameshpr
Date: 11/5/18
"""
import numpy as np
from ctypes import *
from typing import List, Tuple
import cv2
from pyyolo.darknet import c_array, IMAGE, METADATA, predict_image, get_network_boxes, \
do_nms_obj, do_nms_sort, free_image, free_detections, ndarray_image
import pyyolo.darknet
from pyyolo.yolo_data import BBox, YoloData
def load_image(filename, flags=None):
# type: (str, int) -> IMAGE
"""
This will call cv2.imread() with the given arguments and convert
the resulting numpy array to a darknet image
:param filename: Image file name
:param flags: imread flags
:return: Given image file as a darknet image
:rtype: IMAGE
"""
image = cv2.imread(filename, flags)
return array_to_image(image)
def array_to_image(arr):
# type: (np.ndarray) -> IMAGE
"""
Given image with numpy array will be converted to
darkent image
Remember to call free_image(im) function after using this image
:rtype: IMAGE
:param arr: numpy array
:return: darknet image
"""
data = arr.ctypes.data_as(POINTER(c_ubyte))
im = ndarray_image(data, arr.ctypes.shape, arr.ctypes.strides)
return im
def classify(net, meta, im):
# type: (object, METADATA, IMAGE) -> Tuple[str, float]
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, im, thresh=.2, hier_thresh=0, nms=.4):
# type: (object, METADATA, IMAGE, float, float, float) -> List[YoloData]
"""
Detect the objects in the given image. free_image function is called inside this function.
Therefore the input darkent image is not usable after calling this function.
:param net:
:param meta:
:param im:
:param thresh:
:param hier_thresh:
:param nms:
:return:
"""
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
res.append(YoloData(id=i, name=meta.names[i], bbox=BBox(b.x - b.w/2.0, b.y - b.h/2.0, b.w, b.h, dets[j].prob[i])))
res = sorted(res, key=lambda x: -x.bbox.c)
free_image(im)
free_detections(dets, num)
return res
def load_net(cfg_filepath, weights_filepath, clear):
# type: (str, str, bool) -> object
"""
:param cfg_filepath: cfg file name
:param weights_filepath: weights file name
:param clear: True if you want to clear the weights otherwise False
:return: darknet network object
"""
return pyyolo.darknet.load_net(cfg_filepath, weights_filepath, clear)
def load_meta(meta_filepath):
# type: (str) -> METADATA
"""
Recommend using load_names(str) function instead.
:param meta_filepath: metadata file path
:return: darknet metadata object
"""
return pyyolo.darknet.load_meta(meta_filepath)
def load_names(names_filepath):
# type: (str) -> METADATA
"""
Loading metadata from data file (eg: coco.data) is a mess as you need to edit that file also by pointing it to the names file.
Using this function you can directly load the names file as METADATA object.
Older function is still available if you need.
:param names_filepath: Filepath of the names file. Eg: coco.names
:return: darknet metadata object
"""
data = None
with open(names_filepath) as f:
data = f.readlines()
if data is None:
raise ValueError("Names file not found.. %s" % names_filepath)
n_cls = len(data)
p_names = (c_char_p * n_cls)()
for cls in range(n_cls):
name = data[cls].encode('utf-8')
c_name = c_char_p()
c_name.value = name[:-1]
p_names[cls] = c_name
return METADATA(n_cls, cast(p_names, POINTER(c_char_p)))
| 29.148936 | 130 | 0.652798 | [
"Apache-2.0"
] | isarandi/pyyolo | pyyolo/utils.py | 4,110 | Python |
# coding: utf-8
"""
Cloudsmith API
The API to the Cloudsmith Service
OpenAPI spec version: v1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import cloudsmith_api
from cloudsmith_api.rest import ApiException
from cloudsmith_api.apis.rates_api import RatesApi
class TestRatesApi(unittest.TestCase):
""" RatesApi unit test stubs """
def setUp(self):
self.api = cloudsmith_api.apis.rates_api.RatesApi()
def tearDown(self):
pass
def test_rates_limits_list(self):
"""
Test case for rates_limits_list
Endpoint to check rate limits for current user.
"""
pass
if __name__ == '__main__':
unittest.main()
| 18.555556 | 68 | 0.691018 | [
"Apache-2.0"
] | cloudsmith-io/cloudsmith-api | bindings/python/src/test/test_rates_api.py | 835 | Python |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.recovery` module.
"""
import numpy as np
import unittest
from colour.colorimetry import (
MSDS_CMFS,
SDS_ILLUMINANTS,
SpectralShape,
reshape_msds,
reshape_sd,
sd_to_XYZ_integration,
)
from colour.recovery import XYZ_to_sd
from colour.utilities import domain_range_scale
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestXYZ_to_sd',
]
class TestXYZ_to_sd(unittest.TestCase):
"""
Defines :func:`colour.recovery.XYZ_to_sd` definition unit tests
methods.
"""
def setUp(self):
"""
Initialises common tests attributes.
"""
# pylint: disable=E1102
self._cmfs = reshape_msds(
MSDS_CMFS['CIE 1931 2 Degree Standard Observer'],
SpectralShape(360, 780, 10))
self._sd_D65 = reshape_sd(SDS_ILLUMINANTS['D65'], self._cmfs.shape)
def test_domain_range_scale_XYZ_to_sd(self):
"""
Tests :func:`colour.recovery.XYZ_to_sd` definition domain
and range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
m = ('Jakob 2019', 'Mallett 2019', 'Meng 2015', 'Otsu 2018',
'Smits 1999')
v = [
sd_to_XYZ_integration(
XYZ_to_sd(
XYZ, method, cmfs=self._cmfs, illuminant=self._sd_D65),
self._cmfs, self._sd_D65) for method in m
]
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for method, value in zip(m, v):
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
sd_to_XYZ_integration(
XYZ_to_sd(
XYZ * factor_a,
method,
cmfs=self._cmfs,
illuminant=self._sd_D65), self._cmfs,
self._sd_D65),
value * factor_b,
decimal=7)
if __name__ == '__main__':
unittest.main()
| 28.976471 | 78 | 0.568006 | [
"BSD-3-Clause"
] | JGoldstone/colour | colour/recovery/tests/test__init__.py | 2,463 | Python |
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BINARIUM_CHAINPARAMSSEEDS_H\n')
g.write('#define BINARIUM_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the binarium network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // BINARIUM_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.553957 | 98 | 0.582991 | [
"MIT"
] | binariumpay/binarium | contrib/seeds/generate-seeds.py | 4,386 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.