blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
adf28e920deddf72529dcb0823b1473ab4f87eba | ae9ce341ffb6b6d0587b04af81d8a25d81adc987 | /src/core/migrations/0001_initial.py | 96305126d9e1be6c432a17f0620d4a7bf2e73231 | [] | no_license | MrTsepa/track_web | 7eda8e0cdcb2c384b57569b59f03a7d4ad0c4543 | 276860bdeb42a2b27002e1e19eca0383ffb27b0e | refs/heads/master | 2021-01-12T17:53:15.769984 | 2016-12-27T17:44:15 | 2016-12-27T17:44:15 | 71,288,968 | 0 | 0 | null | 2016-12-25T19:12:27 | 2016-10-18T20:34:17 | JavaScript | UTF-8 | Python | false | false | 3,021 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-18 16:09
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('avatar', models.ImageField(blank=True, null=True, upload_to=b'avatars')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"[email protected]"
] | |
4606275fa5d9e722d6644f7d7cf1c37e42c82127 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_230/ch168_2020_06_15_19_49_26_764111.py | 6d30c2c492a8b42526eb6299bf967924d030cb9f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | def login_disponivel(login, lista):
novo_login=0
num=0
if login not in lista:
lista.append(login)
return login
else:
for logins in lista:
if login!=logins:
continue
else:
num+=1
novo_login=login+str(num)
if novo_login not in lista:
return novo_login
else:
while novo_login in lista:
num+=1
outro_login=novo_login.replace(novo_login[-1], str(num))
if outro_login not in lista:
return outro_login | [
"[email protected]"
] | |
fb2a17ee074aee2dd601440a013c1d40a2e94c24 | 2b54b1fb1540ab73d6c83cae3acd5fdd58bdead5 | /Platinum_clusters_Project/Pt7O11_richness/Ptoxides_zorderimage_new.py | 5c29ede674b6b65b4573cae10e0835fa87be76a9 | [] | no_license | sivachiriki/GOFEE_Pt_V_supported | 5787d44294262870075f35f2d31c096021b7ce20 | 6bd700dac1f3e7c58394b758d75246ac6e07eade | refs/heads/master | 2022-04-08T11:38:13.038455 | 2020-03-09T10:48:31 | 2020-03-09T10:48:31 | 226,359,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,547 | py | from __future__ import division
import matplotlib
#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'
#from plot_neb_tio2 import *
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import matplotlib.patches as patches
from math import ceil, floor
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
import matplotlib.patches as mpatches
from ase.data.colors import jmol_colors
from decimal import Decimal
from pylab import *
from ase.data import covalent_radii as aradii
from matplotlib.patches import Circle
from math import atan2,pi
import matplotlib.gridspec as gridspec
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
def plot_atoms(ax, atoms, xyz, acols, alp, z):
ecols = [[0, 0, 0] for col in atoms]
indices = range(len(atoms))
for ia in indices:
acol = acols[ia]
ecol = ecols[ia]
arad = aradii[atoms[ia].number]
apos = atoms[ia].position
eps = arad
circ = Circle([apos[xyz[0]], apos[xyz[1]]],
fc = acol,
ec = ecol,
radius = arad,
lw = 0.5,
alpha = alp[ia],
zorder = 1 - apos[1]/1000
)
ax.add_patch(circ)
def plot_conf(ax, atoms, colorlenth,rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
positions =atoms.get_positions()
for i, atom in enumerate(atoms):
if (atom.number ==78):
colors[i] =[0.1, 0.6, 0.6]
if (atom.number ==6):
colors[i] =[0.0, 0.0, 0.0]
if (atom.number ==8 and positions[i,2]>12.2):
colors[i] =[128/255, 0/255, 128/255]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Al' or a.symbol == 'O':
if a.position[2] < 9.7:
alp[i] = 0.3
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
def plot_conf1(ax, atoms, colorlenth,rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
positions =atoms.get_positions()
for i, atom in enumerate(atoms):
if (atom.number ==78):
colors[i] =[0.1, 0.6, 0.6]
if (atom.number ==6):
colors[i] =[0.1, 0.2, 0.9]
if (atom.number ==8 and positions[i,2]>12.2):
colors[i] =[128/255, 0/255, 128/255]
if (positions[i,2]<12.7 ):
colors[i] =[255/255, 255/255, 255/255]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Al' or a.symbol == 'O':
if a.position[2] < 9.7:
alp[i] = 0.3
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
#-----------------------------------------------------------#
fig = plt.figure(figsize=(13.0,10.5))
outer = gridspec.GridSpec(4, 9, wspace=0.04, hspace=0.2)
color_lib = ['#00FF00','#377eb8','#4daf4a','#00FFFF','#a65628','#FF0000','#0000FF', '#FF00FF','#FFFF00','#000000']
#---------------------- Pt7 clusters -------------------------------------#
data=read(sys.argv[1]+'@:')
energydif =np.zeros(len(data))
for j in range(len(data)):
GM_energy = data[0].get_potential_energy()
energydif[j] = (data[j].get_potential_energy() - GM_energy)
for j in range(0,len(data)):
inner = gridspec.GridSpecFromSubplotSpec(2, 1,subplot_spec=outer[j], wspace=0.00, hspace=0.0, height_ratios=[6.86,9.9])
atoms = data[j]
colorlenth = len(atoms)
atoms =atoms*(3,3,1)
print(colorlenth)
# write('newimage.traj',atoms)
a=atoms
del atoms[[atom.index for atom in atoms if atom.index <=colorlenth*5-19 or atom.index >=colorlenth*5]]
#view(atoms)
centreofmass = a.get_center_of_mass()
atoms = data[j]*(3,3,1)
a=atoms
del atoms[atoms.positions[:,0] >=centreofmass[0]+8.10]
del atoms[atoms.positions[:,0] <= centreofmass[0]-8.10]
del atoms[atoms.positions[:,1] >= centreofmass[1]+7.8]
del atoms[atoms.positions[:,1] <= centreofmass[1]-7.10]
colorlenth = len(atoms)
#view(atoms)
cell = atoms.get_cell()
# 0 0
ax = plt.Subplot(fig, inner[0])
img = atoms.copy()
if (j!=4):
plot_conf(ax, img,colorlenth)
if (j==4):
plot_conf1(ax, img,colorlenth)
ax.set_xlim([centreofmass[0]-7.50, centreofmass[0]+7.50])
ax.set_ylim([10.7, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
fig.add_subplot(ax)
#----------------- drawing box -------------------------------#
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#print(xlim)
#print(ylim)
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
# 0 1
ax = plt.Subplot(fig, inner[1])
cell = atoms.get_cell()
img = atoms.copy()
if (j!=4):
plot_conf(ax, img,colorlenth, rot=True)
if (j==4):
plot_conf1(ax, img,colorlenth, rot=True)
ax.set_xlim([centreofmass[0]-7.5, centreofmass[0]+7.50])
ax.set_ylim([centreofmass[1]-6.5, centreofmass[1]+7.0])
name ='$\Delta E = {:3.3f}$ eV'.format(energydif[j])
ax.text(0.05, -0.14, name, transform=ax.transAxes,fontsize=10)
name1 = "S$_{"+ str(j+1) + "}$"
ax.text(0.05, 1.6, name1, transform=ax.transAxes,fontsize=10)
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
#----------------- drawing box -------------------------------#
xlim = ax.get_xlim()
ylim = ax.get_ylim()
#print(xlim)
#print(ylim)
box_x = [xlim[0], xlim[1], xlim[1], xlim[0], xlim[0]]
box_y =[ylim[0], ylim[0], ylim[1], ylim[1], ylim[0]]
ax.add_patch(
patches.Rectangle(
(box_x[0],box_y[0]),
xlim[1]-xlim[0],
ylim[1]-ylim[0],
fill=True,facecolor='white', clip_on=False,zorder =0.8) )
ax.plot(box_x, box_y, color='blue',linewidth=5.0)
fig.add_subplot(ax)
fig.text(0.4, 0.89, 'Lowest Isomers of Pt$_7$O$_{11}$', ha='center',fontsize=14)
name = sys.argv[2]
name =name
savefig(name,bbox_inches='tight')
show()
exit()
| [
"[email protected]"
] | |
19fe4733092470c04d9b22d2264b885c70a14290 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /QcswPnY2cAbrfwuWE_24.py | 87292310bdc04e8e32529844946ccbcd1e95cb45 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | """
Create a function that filters out factorials from a list. A factorial is a
number that can be represented in the following manner:
n! = n * (n-1) * (n-2) * ... * 3 * 2 * 1
Recursively, this can be represented as:
n! = n * (n-1)!
### Examples
filter_factorials([1, 2, 3, 4, 5, 6, 7]) ➞ [1, 2, 6]
filter_factorials([1, 4, 120]) ➞ [1, 120]
filter_factorials([8, 9, 10]) ➞ []
### Notes
N/A
"""
factorial = lambda x: 1 if not x else x * factorial(x-1)
def filter_factorials(n):
fs = [ factorial(x) for x in range(1,max(n)) ]
return [ e for e in n if e in fs ]
| [
"[email protected]"
] | |
40282fc5a8d13a3550a7977c79d53dc897d2564a | 3a17b31ed9250b38de3b9fd9db8d3d3a8719222c | /setup.py | 1b13c01094f9aa2e0ecd2d15e8c084c887a0422e | [
"MIT"
] | permissive | a627414850/Macropodus | 4cc9bb48408b832cdc890a098a7ea8dc64328ba1 | 1d7b8f9938cb8b6d7744e9caabc3eb41c8891283 | refs/heads/master | 2023-02-15T09:04:35.889058 | 2020-12-25T14:29:04 | 2020-12-25T14:29:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,566 | py | # -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/12/30 22:17
# @author :Mo
# @function :setup of Macropodus
# @codes :fix it and copy reference from https://github.com/TianWenQAQ/Kashgari/blob/master/setup.py
from macropodus.version import __version__
from setuptools import find_packages, setup
import codecs
# Package meta-data.
NAME = 'Macropodus'
DESCRIPTION = 'Macropodus: Tookit of Chinese Natural Language Processing'
URL = 'https://github.com/yongzhuo/Macropodus'
EMAIL = '[email protected]'
AUTHOR = 'yongzhuo'
LICENSE = 'MIT'
with codecs.open('README.md', 'r', 'utf8') as reader:
long_description = "\n".join(reader.readlines())
with codecs.open('requirements.txt', 'r', 'utf8') as reader:
install_requires = list(map(lambda x: x.strip(), reader.readlines()))
setup(name=NAME,
version=__version__,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(), # (exclude=('test')),
package_data={'macropodus': ['*.*', 'data/*', 'data/dict/*',
'data/embedding/*', 'data/embedding/word2vec/*',
'data/model/*']
},
install_requires=install_requires,
license=LICENSE,
classifiers=['License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'],
)
if __name__ == "__main__":
print("setup ok!")
# 说明, tensorflow>=1.13.0 or tensorflow-gpu>=1.13.0
# 项目工程目录这里Macropodus, 实际上, 下边还要有一层macropodus, 也就是说, macropodus和setup同一层
# data包里必须要有__init__.py, 否则文件不会生成, .py文件才能copy
# anaconda3创建环境
# conda remove -n py35 --all
# conda create -n py351 python=3.5
# 编译的2种方案:
# 方案一
# 打开cmd
# 到达安装目录
# python setup.py build
# python setup.py install
# 方案二
# python setup.py bdist_wheel --universal
# twine upload dist/*
| [
"[email protected]"
] | |
2ba20a83f2e3080ecf33539555d67783b0a914b3 | 253b65bc1317abd276649020a0474533ee65c350 | /preprocess_coco.py | 54ca521272eaecdeebe953414a0d52336b71842b | [
"MIT"
] | permissive | AMDS123/Faster-RCNN-Densecap-torch | 53a19ce1e44e2ec5e27c9ec8601799e66059138f | e41c3f585a15e4438348f5402ab3c6a945ea66f1 | refs/heads/master | 2021-06-02T09:38:43.607179 | 2016-09-12T00:43:10 | 2016-09-12T00:43:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,131 | py | # coding=utf8
import argparse, os, json, string
from collections import Counter
from Queue import Queue
from threading import Thread, Lock
from math import floor
import h5py
import numpy as np
from scipy.misc import imread, imresize
"""
This file expects a JSON file containing ground-truth regions and captions
in the same format as the region descriptions file from the Visual Genome
website. Concretely, this is a single large JSON file containing a list;
each element of the list describes a single image and has the following
format:
annotation{
"id" : [int] Unique identifier for this region,
"image_id" : [int] ID of the image to which this region belongs,
"category_id" : int,
"bbox" : [x,y,width,height], 0-index
"iscrowd" : 0 or 1,
}
We assume that all images are on disk in a single folder, and that
the filename for each image is the same as its id with a .jpg extension.
This file will be preprocessed into an HDF5 file and a JSON file with
some auxiliary information. The captions will be tokenized with some
basic preprocessing (split by words, remove special characters).
Note, in general any indices anywhere in input/output of this file are 1-indexed.
The output JSON file is an object with the following elements:
- cls_to_idx: Dictionary mapping strings to integers for encoding tokens,
in 1-indexed format.
- filename_to_idx: Dictionary mapping string filenames to indices.
- idx_to_cls: Inverse of the above.
- idx_to_filename: Inverse of the above.
The output HDF5 file has the following format to describe N images with
M total regions:
- images: uint8 array of shape (N, 3, image_size, image_size) of pixel data,
in BDHW format. Images will be resized so their longest edge is image_size
pixels long, aligned to the upper left corner, and padded with zeros.
The actual size of each image is stored in the image_heights and image_widths
fields.
- image_heights: int32 array of shape (N,) giving the height of each image.
- image_widths: int32 array of shape (N,) giving the width of each image.
- original_heights: int32 array of shape (N,) giving the original height of
each image.
- original_widths: int32 array of shape (N,) giving the original width of
each image.
- boxes: int32 array of shape (M, 4) giving the coordinates of each bounding box.
Each row is (xc, yc, w, h) where yc and xc are center coordinates of the box,
and are one-indexed.
- iscrowd: int32 array of shape (M,) giving whether the region is crowded or not
- labels: int32 array of shape (M,) giving the class label index for each region.
To recover a class label from an integer in this matrix,
use idx_to_cls from the JSON output file.
- img_to_first_box: int32 array of shape (N,). If img_to_first_box[i] = j then
captions[j] and boxes[j] give the first annotation for image i
(using one-indexing).
- img_to_last_box: int32 array of shape (N,). If img_to_last_box[i] = j then
captions[j] and boxes[j] give the last annotation for image i
(using one-indexing).
- box_to_img: int32 array of shape (M,). If box_to_img[i] = j then then
regions[i] and captions[i] refer to images[j] (using one-indexing).
"""
def build_class_dict(data):
cls_to_idx, idx_to_cls = {}, {}
cidx_to_idx = {}
idx_to_cls[1] = '__background__'
cls_to_idx['__background__'] = 1
next_idx = 2
for cat in data['categories']:
cls_to_idx[cat['name']] = next_idx
idx_to_cls[next_idx] = cat['name']
cidx_to_idx[cat['id']] = next_idx
next_idx = next_idx + 1
for img in data['images']:
for region in img['regions']:
region['category_id'] = cidx_to_idx[region['category_id']]
return cls_to_idx, idx_to_cls
def encode_labels(data, cls_to_idx):
encoded_list = []
iscrowd = []
for img in data:
for region in img['regions']:
encoded_list.append(region['category_id'])
iscrowd.append(region['iscrowd'])
return np.asarray(encoded_list, dtype=np.int32), np.asarray(iscrowd, dtype=np.int32)
def encode_boxes(data, original_heights, original_widths, image_size):
all_boxes = []
xwasbad = 0
ywasbad = 0
wwasbad = 0
hwasbad = 0
for i, img in enumerate(data):
H, W = original_heights[i], original_widths[i]
scale = float(image_size) / max(H, W)
for region in img['regions']:
if region['category_id'] is None: continue
# recall: x,y are 0-indexed
x, y = round(scale*(region['bbox'][0])+1), round(scale*(region['bbox'][1])+1)
w, h = round(scale*region['bbox'][2]), round(scale*region['bbox'][3])
# clamp to image
if x < 1: x = 1
if y < 1: y = 1
if x > image_size - 1:
x = image_size - 1
xwasbad += 1
if y > image_size - 1:
y = image_size - 1
ywasbad += 1
if x + w > image_size:
w = image_size - x
wwasbad += 1
if y + h > image_size:
h = image_size - y
hwasbad += 1
box = np.asarray([x+floor(w/2), y+floor(h/2), w, h], dtype=np.int32) # also convert to center-coord oriented
assert box[2]>=0 # width height should be positive numbers
assert box[3]>=0
all_boxes.append(box)
print 'number of bad x,y,w,h: ', xwasbad, ywasbad, wwasbad, hwasbad
return np.vstack(all_boxes)
def build_img_idx_to_box_idxs(data):
img_idx = 1
box_idx = 1
num_images = len(data)
img_to_first_box = np.zeros(num_images, dtype=np.int32)
img_to_last_box = np.zeros(num_images, dtype=np.int32)
for img in data:
img_to_first_box[img_idx - 1] = box_idx
for region in img['regions']:
if region['category_id'] is None: continue
box_idx += 1
img_to_last_box[img_idx - 1] = box_idx - 1 # -1 to make these inclusive limits
img_idx += 1
return img_to_first_box, img_to_last_box
def build_filename_dict(data):
# First make sure all filenames
filenames_list = [img['file_name'] for img in data]
assert len(filenames_list) == len(set(filenames_list))
next_idx = 1
filename_to_idx, idx_to_filename = {}, {}
for img in data:
filename = img['file_name']
filename_to_idx[filename] = next_idx
idx_to_filename[next_idx] = filename
next_idx += 1
return filename_to_idx, idx_to_filename
def encode_filenames(data, filename_to_idx):
filename_idxs = []
for img in data:
filename = img['file_name']
idx = filename_to_idx[filename]
for region in img['regions']:
if region['category_id'] is None: continue
filename_idxs.append(idx)
return np.asarray(filename_idxs, dtype=np.int32)
def get_filepath(s):
if 'train' in s:
return os.path.join(s[s.find('train'):s.find('train') + 9], s)
if 'val' in s:
return os.path.join(s[s.find('val'):s.find('val') + 7], s)
def add_images(data, h5_file, args):
num_images = len(data['images'])
shape = (num_images, 3, args.image_size, args.image_size)
image_dset = h5_file.create_dataset('images', shape, dtype=np.uint8)
original_heights = np.zeros(num_images, dtype=np.int32)
original_widths = np.zeros(num_images, dtype=np.int32)
image_heights = np.zeros(num_images, dtype=np.int32)
image_widths = np.zeros(num_images, dtype=np.int32)
lock = Lock()
q = Queue()
for i, img in enumerate(data['images']):
filename = os.path.join(args.image_dir, img['file_name'])
q.put((i, filename))
def worker():
while True:
i, filename = q.get()
img = imread(filename)
# handle grayscale
if img.ndim == 2:
img = img[:, :, None][:, :, [0, 0, 0]]
H0, W0 = img.shape[0], img.shape[1]
img = imresize(img, float(args.image_size) / max(H0, W0))
H, W = img.shape[0], img.shape[1]
# swap rgb to bgr. Is this the best way?
r = img[:,:,0].copy()
img[:,:,0] = img[:,:,2]
img[:,:,2] = r
lock.acquire()
if i % 1000 == 0:
print 'Writing image %d / %d' % (i, len(data['images']))
original_heights[i] = H0
original_widths[i] = W0
image_heights[i] = H
image_widths[i] = W
image_dset[i, :, :H, :W] = img.transpose(2, 0, 1)
lock.release()
q.task_done()
print('adding images to hdf5.... (this might take a while)')
for i in xrange(args.num_workers):
t = Thread(target=worker)
t.daemon = True
t.start()
q.join()
h5_file.create_dataset('image_heights', data=image_heights)
h5_file.create_dataset('image_widths', data=image_widths)
h5_file.create_dataset('original_heights', data=original_heights)
h5_file.create_dataset('original_widths', data=original_widths)
def encode_splits(data, split_data):
""" Encode splits as intetgers and return the array. """
lookup = {'train': 0, 'val': 1, 'test': 2}
id_to_split = {}
split_array = np.zeros(len(data['images']))
for split, idxs in split_data.iteritems():
for idx in idxs:
id_to_split[idx] = split
for i, img in enumerate(data['images']):
if id_to_split[img['id']] in lookup:
split_array[i] = lookup[id_to_split[img['id']]]
return split_array
def filter_images(data, split_data):
""" Keep only images that are in some split and have some captions """
all_split_ids = set()
for split_name, ids in split_data.iteritems():
all_split_ids.update(ids)
tmp_data = []
for img in data['images']:
keep = img['id'] in all_split_ids and len(img['regions']) > 0
if keep:
tmp_data.append(img)
new_data = {}
new_data['images'] = tmp_data
new_data['categories'] = data['categories']
return new_data
def make_data(filename):
data = {}
train_data = json.load(open(filename %('train')))
val_data = json.load(open(filename %('val')))
data['images'] = train_data['images'] + val_data['images']
data['annotations'] = train_data['annotations'] + val_data['annotations']
# Merge all the regions in the key 'images'.
tmp_data = {}
for anno in data['annotations']:
tmp_data[anno['image_id']] = tmp_data.get(anno['image_id'], []) + [anno]
for img in data['images']:
img['regions'] = tmp_data.get(img['id'], [])
img['file_name'] = get_filepath(img['file_name'])
del data['annotations']
data['categories'] = train_data['categories']
return data
def main(args):
# read in the data
data = make_data(args.region_data)
with open(args.split_json, 'r') as f:
split_data = json.load(f)
# Only keep images that are in a split
print 'There are %d images total' % len(data['images'])
data = filter_images(data, split_data)
print 'After filtering for splits there are %d images' % len(data['images'])
# create the output hdf5 file handle
f = h5py.File(args.h5_output, 'w')
# add several fields to the file: images, and the original/resized widths/heights
add_images(data, f, args)
# add split information
split = encode_splits(data, split_data)
f.create_dataset('split', data=split)
# build class label mapping
cls_to_idx, idx_to_cls = build_class_dict(data) # both mappings are dicts
# Remove the redundant category information
data = data['images']
# encode labels
labels_matrix, iscrowd_vector = encode_labels(data, cls_to_idx)
f.create_dataset('labels', data=labels_matrix)
f.create_dataset('iscrowd', data=iscrowd_vector)
# encode boxes
original_heights = np.asarray(f['original_heights'])
original_widths = np.asarray(f['original_widths'])
boxes_matrix = encode_boxes(data, original_heights, original_widths, args.image_size)
f.create_dataset('boxes', data=boxes_matrix)
# integer mapping between image ids and box ids
img_to_first_box, img_to_last_box = build_img_idx_to_box_idxs(data)
f.create_dataset('img_to_first_box', data=img_to_first_box)
f.create_dataset('img_to_last_box', data=img_to_last_box)
filename_to_idx, idx_to_filename = build_filename_dict(data)
box_to_img = encode_filenames(data, filename_to_idx)
f.create_dataset('box_to_img', data=box_to_img)
f.close()
# and write the additional json file
json_struct = {
'cls_to_idx': cls_to_idx,
'idx_to_cls': idx_to_cls,
'filename_to_idx': filename_to_idx,
'idx_to_filename': idx_to_filename,
}
with open(args.json_output, 'w') as f:
json.dump(json_struct, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# INPUT settings
parser.add_argument('--region_data',
default='/home/ruotian/code/pycoco/annotations/instances_%s2014.json',
help='Input JSON file with regions and captions')
parser.add_argument('--image_dir',
default='/home/ruotian/data/MSCOCO/',
help='Directory containing all images')
parser.add_argument('--split_json',
default='info/coco_splits.json',
help='JSON file of splits')
# OUTPUT settings
parser.add_argument('--json_output',
default='data/COCO-regions-dicts.json',
help='Path to output JSON file')
parser.add_argument('--h5_output',
default='data/COCO-regions.h5',
help='Path to output HDF5 file')
# OPTIONS
parser.add_argument('--image_size',
default=720, type=int,
help='Size of longest edge of preprocessed images')
parser.add_argument('--num_workers', default=5, type=int)
args = parser.parse_args()
main(args)
| [
"[email protected]"
] | |
ab7557f54c78b00a84b9184bb4bae7e516208f59 | c0156da1c81a3a76e397974399c7345d082eca9b | /venv/lib/python3.7/site-packages/webdav/common.py | 5ba8b2c9e55df9fcb895045c8a1ca7c86de54bb2 | [
"Apache-2.0"
] | permissive | leanhvu86/matrix-server | 1823c60fc6ba5ed489bb5720474c6b56a9aec688 | 6e16fc53dfebaeaf222ff5a371ccffcc65de3818 | refs/heads/master | 2023-05-09T01:21:37.774510 | 2021-05-21T15:10:48 | 2021-05-21T15:10:48 | 369,569,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,095 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Commonly used functions for WebDAV support modules."""
import re
from six.moves.urllib.parse import urlparse
from six.moves.urllib.parse import urlunparse
from Acquisition import aq_base
from Acquisition import aq_parent
from zExceptions import HTTPConflict
from zExceptions import HTTPLocked
from zExceptions import HTTPPreconditionFailed
from zExceptions import HTTPUnsupportedMediaType
class WebDAVException(Exception):
pass
class Locked(WebDAVException, HTTPLocked):
pass
class PreconditionFailed(WebDAVException, HTTPPreconditionFailed):
pass
class Conflict(WebDAVException, HTTPConflict):
pass
class UnsupportedMediaType(WebDAVException, HTTPUnsupportedMediaType):
pass
def absattr(attr):
if callable(attr):
return attr()
return attr
def urljoin(url, s):
url = url.rstrip('/')
s = s.lstrip('/')
return '/'.join((url, s))
def urlfix(url, s):
n = len(s)
if url[-n:] == s:
url = url[:-n]
if len(url) > 1 and url[-1] == '/':
url = url[:-1]
return url
def is_acquired(ob):
# Return true if this object is not a direct
# subobject of its __parent__ object.
if not hasattr(ob, '__parent__'):
return 0
if hasattr(aq_base(aq_parent(ob)), absattr(ob.id)):
return 0
if hasattr(aq_base(ob), 'isTopLevelPrincipiaApplicationObject') and \
ob.isTopLevelPrincipiaApplicationObject:
return 0
return 1
def urlbase(url, ftype=None, fhost=None):
# Return a '/' based url such as '/foo/bar', removing
# type, host and port information if necessary.
parsed = urlparse(url)
return urlunparse(('', '') + tuple(parsed)[2:]) or '/'
def isDavCollection(object):
"""Return true if object is a DAV collection."""
return getattr(object, '__dav_collection__', 0)
def tokenFinder(token):
# takes a string like '<opaquelocktoken:afsdfadfadf> and returns the token
# part.
if not token:
return None # An empty string was passed in
if token[0] == '[':
return None # An Etag was passed in
if token[0] == '<':
token = token[1:-1]
return token[token.find(':') + 1:]
# If: header handling support. IfParser returns a sequence of
# TagList objects in the order they were parsed which can then
# be used in WebDAV methods to decide whether an operation can
# proceed or to raise HTTP Error 412 (Precondition failed)
IfHdr = re.compile(
r"(?P<resource><.+?>)?\s*\((?P<listitem>[^)]+)\)"
)
ListItem = re.compile(
r"(?P<not>not)?\s*(?P<listitem><[a-zA-Z]+:[^>]*>|\[.*?\])",
re.I)
class TagList(object):
def __init__(self):
self.resource = None
self.list = []
self.NOTTED = 0
def IfParser(hdr):
out = []
i = 0
while 1:
m = IfHdr.search(hdr[i:])
if not m:
break
i = i + m.end()
tag = TagList()
tag.resource = m.group('resource')
if tag.resource: # We need to delete < >
tag.resource = tag.resource[1:-1]
listitem = m.group('listitem')
tag.NOTTED, tag.list = ListParser(listitem)
out.append(tag)
return out
def ListParser(listitem):
out = []
NOTTED = 0
i = 0
while 1:
m = ListItem.search(listitem[i:])
if not m:
break
i = i + m.end()
out.append(m.group('listitem'))
if m.group('not'):
NOTTED = 1
return NOTTED, out
| [
"[email protected]"
] | |
cccc8870f7ed30c693be4991c997bd40760e5ee8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_180/ch62_2019_10_02_15_14_58_527777.py | b4ad3aad05eeb567a0a0710d004c18a93d56a9fd | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | def filtra_positivos(lista):
lista_positivos = []
for i in lista:
if i > 0:
lista_positivos.append(i)
returnn lista_positivos | [
"[email protected]"
] | |
d8a55ec8bdd74b0f3ae4fc16b7c292a0b5ab4452 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/python/estimator/canned/linear.py | a870fe7a1fe83f1323f5d1b7383d5c93f2edf5e8 | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e4a29da17634359c96032259089138e261570186b23d8d3ede31721e341ba111
size 1310
| [
"github@cuba12345"
] | github@cuba12345 |
8881a9b4109aac6cccaa8aad8b8db98a4aecf08a | c0f4104194a7989e44d7f0161b2425c5a5bc3a98 | /tacker/agent/linux/daemon.py | e60c6d9210e2a6984c2d5f8ba4f96c7331599496 | [] | no_license | bopopescu/Openstack-2 | f65470bdd0ee4736c45b6f869f0453cb8eb446c8 | 6f06133562e3dfd490695a92c9ddf1a322675104 | refs/heads/master | 2022-11-28T09:19:21.633850 | 2016-06-23T07:55:32 | 2016-06-23T07:55:32 | 282,095,817 | 0 | 0 | null | 2020-07-24T01:44:49 | 2020-07-24T01:44:48 | null | UTF-8 | Python | false | false | 4,324 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import fcntl
import os
import signal
import sys
from tacker.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Pidfile(object):
def __init__(self, pidfile, procname, uuid=None):
self.pidfile = pidfile
self.procname = procname
self.uuid = uuid
try:
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
LOG.exception(_("Error while handling pidfile: %s"), pidfile)
sys.exit(1)
def __str__(self):
return self.pidfile
def unlock(self):
if not not fcntl.flock(self.fd, fcntl.LOCK_UN):
raise IOError(_('Unable to unlock pid file'))
def write(self, pid):
os.ftruncate(self.fd, 0)
os.write(self.fd, "%d" % pid)
os.fsync(self.fd)
def read(self):
try:
pid = int(os.read(self.fd, 128))
os.lseek(self.fd, 0, os.SEEK_SET)
return pid
except ValueError:
return
def is_running(self):
pid = self.read()
if not pid:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
exec_out = f.readline()
return self.procname in exec_out and (not self.uuid or
self.uuid in exec_out)
except IOError:
return False
class Daemon(object):
"""A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null', procname='python', uuid=None):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.procname = procname
self.pidfile = Pidfile(pidfile, procname, uuid)
def _fork(self):
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError:
LOG.exception(_('Fork failed'))
sys.exit(1)
def daemonize(self):
"""Daemonize process by doing Stevens double fork."""
# fork first time
self._fork()
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# fork second time
self._fork()
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
stdin = open(self.stdin, 'r')
stdout = open(self.stdout, 'a+')
stderr = open(self.stderr, 'a+', 0)
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delete_pid)
signal.signal(signal.SIGTERM, self.handle_sigterm)
self.pidfile.write(os.getpid())
def delete_pid(self):
os.remove(str(self.pidfile))
def handle_sigterm(self, signum, frame):
sys.exit(0)
def start(self):
"""Start the daemon."""
if self.pidfile.is_running():
self.pidfile.unlock()
message = _('Pidfile %s already exist. Daemon already running?')
LOG.error(message, self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def run(self):
"""Override this method when subclassing Daemon.
start() will call this method after the process has daemonized.
"""
pass
| [
"egonmin@CN00119199"
] | egonmin@CN00119199 |
8ee2f1b168cb673bb9e1196e8e8507088a55e75b | 7300fc72162568f886e04509431359a62a09da79 | /lino_xl/lib/phones/mixins.py | cfc73974e2bfd9b6033a7d3015cfbcb1ca35f494 | [
"BSD-2-Clause"
] | permissive | forexblog/xl | ad27aa1e9f5669f8a78ec55f4b7d0bd952da6327 | 130303647d01c0d8271f770f3054907c183dc1e8 | refs/heads/master | 2023-03-04T01:44:39.485452 | 2021-02-13T08:18:16 | 2021-02-13T08:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,590 | py | # Copyright 2017-2019 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from etgen.html import E, join_elems
from lino.api import rt, dd, _
from lino.core.diff import ChangeWatcher
from lino.mixins import Contactable, Phonable
from .choicelists import ContactDetailTypes
class ContactDetailsOwner(Contactable, Phonable):
class Meta:
abstract = True
if dd.is_installed('phones'):
def after_ui_save(self, ar, cw):
if cw is None: # it's a new instance
for cdt in ContactDetailTypes.get_list_items():
self.propagate_contact_detail(cdt)
pass
else:
for k, old, new in cw.get_updates():
cdt = ContactDetailTypes.find(field_name=k)
# cdt = getattr(ContactDetailTypes, k, False)
if cdt:
self.propagate_contact_detail(cdt)
super(ContactDetailsOwner, self).after_ui_save(ar, cw)
def propagate_contact_detail(self, cdt):
k = cdt.field_name
if k:
value = getattr(self, k)
ContactDetail = rt.models.phones.ContactDetail
kw = dict(partner=self, primary=True, detail_type=cdt)
try:
cd = ContactDetail.objects.get(**kw)
if value:
cd.value = value
# don't full_clean() because no need to check
# primary of other items
cd.save()
else:
cd.delete()
except ContactDetail.DoesNotExist:
if value:
kw.update(value=value)
cd = ContactDetail(**kw)
# self.phones_by_partner.add(cd, bulk=False)
cd.save()
def propagate_contact_details(self, ar=None):
watcher = ChangeWatcher(self)
for cdt in ContactDetailTypes.get_list_items():
self.propagate_contact_detail(cdt)
if ar is not None:
watcher.send_update(ar)
def get_overview_elems(self, ar):
# elems = super(ContactDetailsOwner, self).get_overview_elems(ar)
yield rt.models.phones.ContactDetailsByPartner.get_table_summary(
self, ar)
@dd.displayfield(_("Contact details"))
def contact_details(self, ar):
if ar is None:
return ''
sar = rt.models.phones.ContactDetailsByPartner.request(parent=ar, master_instance=self)
items = [o.detail_type.as_html(o, sar)
for o in sar if not o.end_date]
return E.p(*join_elems(items, sep=', '))
else:
def get_overview_elems(self, ar):
return []
@dd.displayfield(_("Contact details"))
def contact_details(self, ar):
# if ar is None:
# return ''
items = []
for cdt in ContactDetailTypes.get_list_items():
if cdt.field_name:
value = getattr(self, cdt.field_name)
if value:
items.append(cdt.format(value))
# items.append(ContactDetailTypes.email.format(self.email))
# # items.append(E.a(self.email, href="mailto:" + self.email))
# items.append(self.phone)
# items.append(E.a(self.url, href=self.url))
return E.p(*join_elems(items, sep=', '))
| [
"[email protected]"
] | |
2e7b9dada3d2c6d1f5775277b7fedd5aaa57321b | c29b838371729ac04744b40d486f0b55212990b6 | /Spider-Learn/Spider/chapter4_analyse_library_pyquery.py | 5ec6b154f29be6291fe4c1e9b4b48b87708a9f36 | [] | no_license | Sugarsugarzz/PyPractice | 93c3155a94d162c9eabf0d1a641d28bc6d639c22 | d91b7d6ca996792fe409c08862fa9da5b1dc319b | refs/heads/master | 2023-02-13T01:51:24.909947 | 2021-01-20T02:57:22 | 2021-01-20T02:57:22 | 163,177,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,717 | py |
#****** 4.3 使用pyquery ******
# 适合于用CSS选择器较多的情况
# 1、安装
# pip3 install pyquery
import pyquery
# 引入PyQuery,别名py
from pyquery import PyQuery as pq
# 2、初始化
# 传入一个参数来初始化Pyquery
# *字符串初始化
html = '''
<div>
<ul>
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div
'''
doc = pyquery.PyQuery(html)
# 简化后
doc = pq(html)
print(doc('li'))
# *URL初始化
# doc = pq(url='https://cuiqingcai.com')
print(doc('title'))
# *文件初始化
doc = pq(filename='test.html')
print(doc('li'))
# 3、基本CSS选择器
# 返回的是PyQuery类型
# 实例
html = '''
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
'''
doc = pq(html)
# #container选择的是id为container的
# .list选择的是class为list的
# li直接选择li节点
print(doc('#container .list li'))
print(type(doc('#container .list li')))
# 4、查找节点
# *子节点
# 查找子节点,用到find()方法,传入的参数是CSS选择器
# find()的范围是所有子孙节点,如果只查找子节点,用children()方法
doc = pq(html)
items = doc('.list')
print(type(items))
print(items)
lis = items.find('li')
print(type(lis))
print(lis)
lis = items.children('.active')
print(type(lis))
print(lis)
# *父节点
# 用parent()方法,返回直接父节点
html = '''
<div class="wrap">
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
</div>
'''
doc = pq(html)
items = doc('.list')
container = items.parent()
print(type(container))
print(container)
# 用parents()方法,会返回所有的祖先节点
container = items.parents()
print(type(container))
print(container)
# 筛选某个祖先节点,可以传入CSS选择器
container = items.parents('.wrap')
print(type(container))
print(container)
# *兄弟节点
# 用siblings()方法,返回所有兄弟节点,可传入CSS选择器
doc = pq(html)
li = doc('.list .item-0.active')
print(li.siblings())
# *遍历
# 单个节点,可以直接打印输出,也可以转成字符串
doc = pq(html)
li = doc('.item-0.active')
print(li)
print(str(li))
# 多个节点,遍历,用items()方法,返回生成器类型
doc = pq(html)
lis = doc('li').items() # lis是generator类型
print(type(lis))
for li in lis:
print(li, type(li))
# 6、获取信息
# *获取属性
# 用attr()方法获取属性
doc = pq(html)
a = doc('.item-0.active a')
print(a)
print(a.attr('href'))
# 用attr属性获取属性
print(a.attr.href)
# 但是attr()只能得到第一个节点的属性,要获取所有a节点的属性,就要遍历
doc = pq(html)
a = doc('a')
for item in a.items():
print(item.attr('href'))
# *获取文本
# 总结:html()方法返回的是第一个节点的内部HTML文本,多个节点的结果,需要遍历
# text()方法返回的是所有节点取文本后合并成一个字符串,不需要遍历
# 获取其内部的文本,调用text()方法实现
# 此时会忽略掉节点内部包含的所有HTML,只返回纯文字内容
html = '''
<div class="wrap">
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
</div>
'''
doc = pq(html)
a = doc('.item-0.active')
print(li)
print(li.text())
# 获取这个节点内部的HTML文本,调用html()方法实现
li = doc('.item-0.active')
print(li)
print(li.html())
# 7、节点操作
# 对节点进行动态修改,如给节点添加一个class、移除某个节点等
# * addClass 和 removeClass
html = '''
<div class="wrap">
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
</div>
'''
doc = pq(html)
li = doc('.item-0.active')
print(li)
li.remove_class('active')
print(li)
li.add_class('active')
print(li)
# * attr、text和html
html = '''
<ul class="list">
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
</ul>
'''
doc = pq(html)
li = doc('.item-0.active')
print(li)
li.attr('name', 'link')
print(li)
li.text('changed item')
print(li)
li.html('<span>changed item</span>')
print(li)
# *remove()
html = '''
<div class="wrap">
Hello, World
<p>This is a paragraph.</p>
</div>
'''
doc = pq(html)
wrap = doc('.wrap')
print(wrap.text())
# 只要Hello World
wrap.find('p').remove()
print(wrap.text())
# 8、伪类选择器
# CSS选择器之所以强大,是因为支持多种多样的伪类选择器
html = '''
<div class="wrap">
<div id="container">
<ul class="list">
<li class="item-0">first item</li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-0 active"><a href="link3.html"><span class="bold">third item</span></a></li>
<li class="item-1 active"><a href ="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a></li>
</ul>
</div>
</div>
'''
doc = pq(html)
# 选择第一个li节点
li = doc('li:first-child')
print(li)
# 选择最后一个li节点
li = doc('li:last-child')
print(li)
# 选择第二个li节点
li = doc('li:nth-child(2)')
print(li)
# 选择第三个li之后的li节点
li = doc('li:gt(2)')
print(li)
# 选择偶数位置的li节点
li = doc('li:nth-child(2n)')
print(li)
# 选择包含second文本的li节点
li = doc('li:contains(second)')
print(li)
| [
"[email protected]"
] | |
c46eea6ac70388e3126470a5470b481d84d8b08e | a7b66311c2ce113789933ec3162f1128b2862f13 | /app/waterQual/EPA/ntnModel/wrapCl.py | 1059b5ef803b609b8ac5c26f307b198b30e4359e | [
"MIT"
] | permissive | ChanJeunlam/geolearn | 214b2c42359ea1164b39117fad2d7470adeb6d35 | 791caa54eb70920823ea7d46714dc8a3e7fa7445 | refs/heads/master | 2023-07-16T04:13:15.526364 | 2021-08-16T05:24:18 | 2021-08-16T05:24:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,772 | py | import os
import time
import pandas as pd
import numpy as np
import json
from hydroDL import kPath
from hydroDL.data import usgs, gageII, gridMET, transform
# varC = usgs.varC
varC = ['00940']
siteNoLst = ['0422026250', '04232050', '0423205010']
nFill = 3
varG = gageII.lstWaterQuality
caseName = 'chloride'
# add a start/end date to improve efficiency.
t = pd.date_range(start='1979-01-01', end='2019-12-30', freq='W-TUE')
sd = t[0]
ed = t[-1]
td = pd.date_range(sd, ed)
rho = 50
# temp: read NTN
dirNTN = os.path.join(kPath.dirData, 'EPA', 'NTN')
fileData = os.path.join(dirNTN, 'NTN-All-w.csv')
fileSite = os.path.join(dirNTN, 'NTNsites.csv')
tabData = pd.read_csv(fileData)
tabSite = pd.read_csv(fileSite)
tabData['siteID'] = tabData['siteID'].apply(lambda x: x.upper())
tabData = tabData.replace(-9, np.nan)
tab = tabData[tabData['siteID'] == 'NY43']
tab.index = pd.to_datetime(tab['dateon'])
weekday = tab.index.normalize().weekday
tab2 = pd.DataFrame(index=t)
tol = pd.Timedelta(3, 'D')
tab2 = pd.merge_asof(left=tab2, right=tab, right_index=True,
left_index=True, direction='nearest', tolerance=tol)
varPLst = ['ph', 'Conduc', 'Ca', 'Mg', 'K', 'Na', 'NH4', 'NO3', 'Cl', 'SO4']
dfP = tab2[varPLst]
# gageII
tabG = gageII.readData(varLst=varG, siteNoLst=siteNoLst)
tabG = gageII.updateCode(tabG)
# read data and merge to: f/q=[nT,nP,nX], g/c=[nP,nY]
fLst = list() # forcing ts
pLst = list() # concentrations in rainfall
gLst = list() # geo-const
qLst = list() # streamflow
cLst = list() # water quality
# cfLst = list() # water quality flags
infoLst = list()
t0 = time.time()
for i, siteNo in enumerate(siteNoLst):
t1 = time.time()
dfC = usgs.readSample(siteNo, codeLst=varC, startDate=sd)
dfQ = usgs.readStreamflow(siteNo, startDate=sd)
dfF = gridMET.readBasin(siteNo)
# merge to one table
df = pd.DataFrame({'date': td}).set_index('date')
df = df.join(dfC)
df = df.join(dfQ)
df = df.join(dfF)
df = df.rename(columns={'00060_00003': '00060'})
# convert to weekly
offset = pd.offsets.timedelta(days=-6)
dfW = df.resample('W-MON', loffset=offset).mean()
dfW = dfW.join(dfP)
dfC = dfW[varC].dropna(how='all')
for k in range(len(dfC)):
ct = dfC.index[k]
ctR = pd.date_range(
start=ct-pd.Timedelta(days=rho*7-1), end=ct, freq='W-TUE')
if (ctR[0] < sd) or (ctR[-1] > ed):
continue
tempQ = pd.DataFrame({'date': ctR}).set_index('date').join(
dfW['00060']).interpolate(limit=nFill, limit_direction='both')
tempF = pd.DataFrame({'date': ctR}).set_index('date').join(
dfW[gridMET.varLst+varPLst]).interpolate(limit=nFill, limit_direction='both')
qLst.append(tempQ.values)
fLst.append(tempF.values)
cLst.append(dfC.iloc[k].values)
gLst.append(tabG.loc[siteNo].values)
infoLst.append(dict(siteNo=siteNo, date=ct))
t2 = time.time()
print('{} on site {} reading {:.3f} total {:.3f}'.format(
i, siteNo, t2-t1, t2-t0))
q = np.stack(qLst, axis=-1).swapaxes(1, 2).astype(np.float32)
f = np.stack(fLst, axis=-1).swapaxes(1, 2).astype(np.float32)
g = np.stack(gLst, axis=-1).swapaxes(0, 1).astype(np.float32)
c = np.stack(cLst, axis=-1).swapaxes(0, 1).astype(np.float32)
infoDf = pd.DataFrame(infoLst)
saveFolder = os.path.join(kPath.dirWQ, 'trainData')
saveName = os.path.join(saveFolder, caseName)
np.savez(saveName, q=q, f=f, c=c, g=g)
infoDf.to_csv(saveName+'.csv')
dictData = dict(name=caseName, rho=rho, nFill=nFill,
varG=varG, varC=varC, varQ=['00060'],
varF=gridMET.varLst+varPLst, siteNoLst=siteNoLst)
with open(saveName+'.json', 'w') as fp:
json.dump(dictData, fp, indent=4)
| [
"[email protected]"
] | |
c4a71d58b51d50f238a0fcfefb454888e76cbac3 | c3cf442e56969e98fbd392ee89bd85b3e22d5cd2 | /python/Spider/github_login2.py | ddd07e038a2e21d86bbac1715e399e0fe3d6860d | [] | no_license | Eacaen/diff_Code_Learn | bd4bd409f0027ab3d606ef029de9ae4a3af09775 | e55619c5736181fd50666b61d06e6ed7cafc4394 | refs/heads/master | 2021-01-12T07:55:54.127018 | 2019-11-07T10:42:05 | 2019-11-07T10:42:05 | 77,052,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,942 | py | # -*- coding:utf-8 -*-
import requests
import re
session = requests.Session()
# header = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate, sdch, br",
# "Accept-Language": "zh-CN,zh;q=0.8",
# "Cache-Control": "max-age=0",
# "Connection": "keep-alive",
# "Cookie": "_octo=GH1.1.1664649958.1449761838; _gat=1; logged_in=no; _gh_sess=eyJsYXN0X3dyaXRlIjoxNDcyODA4MTE1NzQ5LCJzZXNzaW9uX2lkIjoiZGU3OTQ1MWE0YjQyZmI0NmNhYjM2MzU2MWQ4NzM0N2YiLCJjb250ZXh0IjoiLyIsInNweV9yZXBvIjoiY25vZGVqcy9ub2RlY2x1YiIsInNweV9yZXBvX2F0IjoxNDcyODA3ODg0LCJyZWZlcnJhbF9jb2RlIjoiaHR0cHM6Ly9naXRodWIuY29tLyIsIl9jc3JmX3Rva2VuIjoiTllUd3lDdXNPZmtyYmRtUDdCQWtpQzZrNm1DVDhmY3FPbHJEL0U3UExGaz0iLCJmbGFzaCI6eyJkaXNjYXJkIjpbXSwiZmxhc2hlcyI6eyJhbmFseXRpY3NfbG9jYXRpb25fcXVlcnlfc3RyaXAiOiJ0cnVlIn19fQ%3D%3D--91c34b792ded05823f11c6fe8415de24aaa12482; _ga=GA1.2.1827381736.1472542826; tz=Asia%2FShanghai",
# "Host": "github.com",
# "Upgrade-Insecure-Requests": "1",
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
# }
header = {
"Accept" : "text/html,application/xhtml+x…lication/xml;q=0.9,*/*;q=0.8" ,
"Accept-Encoding" : "gzip, deflate, br",
"Accept-Language" : "en-US,en;q=0.5",
"Connection" : "keep-alive",
"Cookie" : "logged_in=no; _octo=GH1.1.1970970484.1500426888; _ga=GA1.2.1727967677.1500426888; _gh_sess=eyJsYXN0X3dyaXRlIjoxNTAxMjMyMzg5MDEyLCJzZXNzaW9uX2lkIjoiZThiNTIxZmFhYjdiNWMzZTVjNTY2YWY4MmU5MWJjNWQiLCJjb250ZXh0IjoiLyIsImxhc3RfcmVhZF9mcm9tX3JlcGxpY2FzIjoxNTAxMjMyMzkyMTEzLCJyZWZlcnJhbF9jb2RlIjoiaHR0cHM6Ly9naXRodWIuY29tLyIsIl9jc3JmX3Rva2VuIjoiQ2JkYjAxSGREZTVtcnJZU29GQ29aYzNabHZjWitCQmN6WFdKcDEwV2thaz0iLCJmbGFzaCI6eyJkaXNjYXJkIjpbXSwiZmxhc2hlcyI6eyJhbmFseXRpY3NfbG9jYXRpb25fcXVlcnlfc3RyaXAiOiJ0cnVlIn19fQ%3D%3D--59c4346f810a2bd6b496962bda680907c92ba032; tz=Asia%2FShanghai; _gat=1",
"Host" : "github.com" ,
"Upgrade-Insecure-Requests" : "1",
"User-Agent" :"Mozilla/5.0 (X11; Ubuntu; Lin… Gecko/20100101 Firefox/54.0" ,
"Content-Type" : "application/x-www-form-urlencoded",
# "Content-Length" : "182",
"Referer" : "https://github.com",
}
def getToken():
html = session.get('https://github.com/login', headers=header)
pattern = re.compile(r'<input name="authenticity_token" type="hidden" value="(.*)" />')
authenticity_token = pattern.findall(html.content)[0]
print authenticity_token
return authenticity_token
def userpwdLogin():
payload = {
"login" : "Eacaen",
"password" : "HTy119110315",
'commit': 'Sign+in',
'authenticity_token': getToken(),
'utf8': '%E2%9C%93'}
r = session.post('https://github.com/session', data=payload, headers=header)
print r.status_code
print r.content #login success
userpwdLogin() | [
"[email protected]"
] | |
dab468facc509b0bc4a17bf71d78d2f64e565972 | 0689ad04900b45e6ffb85756e65e96f30781558b | /py44/数据/day06/demo03_vectorize.py | 53433c685f1e8058eb2bb0adb205b8acc6cb2766 | [] | no_license | lizhihui16/aaa | a5452b5d0de4c2ad6342fce1b8aef278d2d2943e | e8c38e012f6aa0bc05ac6481d6c3e2b4e9013b56 | refs/heads/master | 2020-04-24T01:05:19.266060 | 2019-02-20T01:43:51 | 2019-02-20T01:43:51 | 171,586,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
'''
vectorize矢量化案例
'''
import numpy as np
import math as m
def foo(x, y):
return m.sqrt(x**2 + y**2)
x, y = 3, 4
print(foo(x, y))
x = np.array([3, 4, 5, 6])
y = np.array([4, 5, 6, 7])
# z = foo(x, y) 错误
# 把foo函数矢量化处理
foo_v = np.vectorize(foo)
print(foo_v(x, y))
# 使用frompyfunc方法矢量化函数
# foo需要2个参数, 最终将会有1个返回值
foo_f = np.frompyfunc(foo, 2, 1)
print(foo_f(x, y))
| [
"[email protected]"
] | |
9012a4c3c7502633f1df59574ab7602af3edaaeb | 533c298a21e865d190e69b0c95a0f9ecd9dd8d8b | /reviewboard/__init__.py | 016a72ad23705cdef9d791d57c91bf2bda9806f0 | [
"MIT"
] | permissive | djs/reviewboard | cb78573890b821cbc228fb43a1bdb8e337d5e9d5 | 813158fbb31d7889e224f3fc1350fd4a791874ec | refs/heads/master | 2021-01-15T22:41:24.101928 | 2009-09-23T09:54:41 | 2009-09-23T09:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | # The version of Review Board.
#
# This is in the format of:
#
# (Major, Minor, Micro, alpha/beta/rc/final, Release Number, Released)
#
VERSION = (1, 1, 0, 'alpha', 2, False)
def get_version_string():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
if VERSION[3] == 'rc':
version += ' RC%s' % VERSION[4]
else:
version += ' %s %s' % (VERSION[3], VERSION[4])
if not is_release():
version += " (dev)"
return version
def get_package_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
version += '%s%s' % (VERSION[3], VERSION[4])
return version
def is_release():
return VERSION[5]
def initialize():
"""Begins initialization of Review Board.
This sets up the logging, generates cache serial numbers, and then
fires an initializing signal that other parts of the codebase can
connect to. This must be called for such features as e-mail notification
to work.
"""
import logging
import os
from djblets.util.misc import generate_cache_serials
from djblets import log
from reviewboard import signals
# Set up logging.
log.init_logging()
logging.info("Log file for Review Board v%s (PID %s)" %
(get_version_string(), os.getpid()))
# Generate cache serials
generate_cache_serials()
signals.initializing.send(sender=None)
| [
"[email protected]"
] | |
f5a8c989e65546942db6eb04c553e6eccd86bdf4 | 387d39b6be636d7a0c119882e055ee527f727e1a | /python/ccxt/bybit.py | 928a39f843ac70f3ebdf5da08cf566799862c092 | [
"MIT"
] | permissive | lobatt/ccxt | 30b30fa08d55538fe7dad4380aa1842a28ce06a3 | 9faec08330ca2565f63c7c2b11c99161883e2bdd | refs/heads/master | 2023-07-19T19:57:29.110899 | 2021-09-04T00:26:51 | 2021-09-04T00:26:51 | 228,749,714 | 4 | 4 | MIT | 2021-08-01T01:15:51 | 2019-12-18T03:25:20 | JavaScript | UTF-8 | Python | false | false | 88,075 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
class bybit(Exchange):
def describe(self):
return self.deep_extend(super(bybit, self).describe(), {
'id': 'bybit',
'name': 'Bybit',
'countries': ['VG'], # British Virgin Islands
'version': 'v2',
'userAgent': None,
'rateLimit': 100,
'has': {
'cancelOrder': True,
'CORS': True,
'cancelAllOrders': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
},
'timeframes': {
'1m': '1',
'3m': '3',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': 'D',
'1w': 'W',
'1M': 'M',
'1y': 'Y',
},
'urls': {
'test': 'https://api-testnet.bybit.com',
'logo': 'https://user-images.githubusercontent.com/51840849/76547799-daff5b80-649e-11ea-87fb-3be9bac08954.jpg',
'api': 'https://api.bybit.com',
'www': 'https://www.bybit.com',
'doc': [
'https://bybit-exchange.github.io/docs/inverse/',
'https://bybit-exchange.github.io/docs/linear/',
'https://github.com/bybit-exchange',
],
'fees': 'https://help.bybit.com/hc/en-us/articles/360039261154',
'referral': 'https://www.bybit.com/app/register?ref=X7Prm',
},
'api': {
'public': {
'get': [
'orderBook/L2',
'kline/list',
'tickers',
'trading-records',
'symbols',
'time',
'announcement',
],
},
'private': {
'get': [
'order',
'stop-order',
'position/list',
'wallet/balance',
'execution/list',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'stop-order/cancelAll',
],
},
'openapi': {
'get': [
'order/list',
'stop-order/list',
'wallet/risk-limit/list',
'wallet/risk-limit',
'funding/prev-funding-rate',
'funding/prev-funding',
'funding/predicted-funding',
'api-key',
'wallet/fund/records',
'wallet/withdraw/list',
],
'post': [
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/replace',
'position/trading-stop',
],
},
'publicLinear': {
'get': [
'kline',
'recent-trading-records',
'funding/prev-funding-rate',
'mark-price-kline',
],
},
'privateLinear': {
'get': [
'order/list',
'order/search',
'stop-order/list',
'stop-order/search',
'position/list',
'trade/execution/list',
'trade/closed-pnl/list',
'risk-limit',
'funding/prev-funding',
'funding/predicted-funding',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancelAll',
'position/switch-isolated',
'position/set-auto-add-margin',
'position/set-leverage',
'position/trading-stop',
'position/add-margin',
],
},
'position': {
'post': [
'change-position-margin',
],
},
'user': {
'get': [
'leverage',
],
'post': [
'leverage/save',
],
},
},
'httpExceptions': {
'403': RateLimitExceeded, # Forbidden -- You request too many times
},
'exceptions': {
'exact': {
'10001': BadRequest, # parameter error
'10002': InvalidNonce, # request expired, check your timestamp and recv_window
'10003': AuthenticationError, # Invalid apikey
'10004': AuthenticationError, # invalid sign
'10005': PermissionDenied, # permission denied for current apikey
'10006': RateLimitExceeded, # too many requests
'10007': AuthenticationError, # api_key not found in your request parameters
'10010': PermissionDenied, # request ip mismatch
'10017': BadRequest, # request path not found or request method is invalid
'20001': OrderNotFound, # Order not exists
'20003': InvalidOrder, # missing parameter side
'20004': InvalidOrder, # invalid parameter side
'20005': InvalidOrder, # missing parameter symbol
'20006': InvalidOrder, # invalid parameter symbol
'20007': InvalidOrder, # missing parameter order_type
'20008': InvalidOrder, # invalid parameter order_type
'20009': InvalidOrder, # missing parameter qty
'20010': InvalidOrder, # qty must be greater than 0
'20011': InvalidOrder, # qty must be an integer
'20012': InvalidOrder, # qty must be greater than zero and less than 1 million
'20013': InvalidOrder, # missing parameter price
'20014': InvalidOrder, # price must be greater than 0
'20015': InvalidOrder, # missing parameter time_in_force
'20016': InvalidOrder, # invalid value for parameter time_in_force
'20017': InvalidOrder, # missing parameter order_id
'20018': InvalidOrder, # invalid date format
'20019': InvalidOrder, # missing parameter stop_px
'20020': InvalidOrder, # missing parameter base_price
'20021': InvalidOrder, # missing parameter stop_order_id
'20022': BadRequest, # missing parameter leverage
'20023': BadRequest, # leverage must be a number
'20031': BadRequest, # leverage must be greater than zero
'20070': BadRequest, # missing parameter margin
'20071': BadRequest, # margin must be greater than zero
'20084': BadRequest, # order_id or order_link_id is required
'30001': BadRequest, # order_link_id is repeated
'30003': InvalidOrder, # qty must be more than the minimum allowed
'30004': InvalidOrder, # qty must be less than the maximum allowed
'30005': InvalidOrder, # price exceeds maximum allowed
'30007': InvalidOrder, # price exceeds minimum allowed
'30008': InvalidOrder, # invalid order_type
'30009': ExchangeError, # no position found
'30010': InsufficientFunds, # insufficient wallet balance
'30011': PermissionDenied, # operation not allowed as position is undergoing liquidation
'30012': PermissionDenied, # operation not allowed as position is undergoing ADL
'30013': PermissionDenied, # position is in liq or adl status
'30014': InvalidOrder, # invalid closing order, qty should not greater than size
'30015': InvalidOrder, # invalid closing order, side should be opposite
'30016': ExchangeError, # TS and SL must be cancelled first while closing position
'30017': InvalidOrder, # estimated fill price cannot be lower than current Buy liq_price
'30018': InvalidOrder, # estimated fill price cannot be higher than current Sell liq_price
'30019': InvalidOrder, # cannot attach TP/SL params for non-zero position when placing non-opening position order
'30020': InvalidOrder, # position already has TP/SL params
'30021': InvalidOrder, # cannot afford estimated position_margin
'30022': InvalidOrder, # estimated buy liq_price cannot be higher than current mark_price
'30023': InvalidOrder, # estimated sell liq_price cannot be lower than current mark_price
'30024': InvalidOrder, # cannot set TP/SL/TS for zero-position
'30025': InvalidOrder, # trigger price should bigger than 10% of last price
'30026': InvalidOrder, # price too high
'30027': InvalidOrder, # price set for Take profit should be higher than Last Traded Price
'30028': InvalidOrder, # price set for Stop loss should be between Liquidation price and Last Traded Price
'30029': InvalidOrder, # price set for Stop loss should be between Last Traded Price and Liquidation price
'30030': InvalidOrder, # price set for Take profit should be lower than Last Traded Price
'30031': InsufficientFunds, # insufficient available balance for order cost
'30032': InvalidOrder, # order has been filled or cancelled
'30033': RateLimitExceeded, # The number of stop orders exceeds maximum limit allowed
'30034': OrderNotFound, # no order found
'30035': RateLimitExceeded, # too fast to cancel
'30036': ExchangeError, # the expected position value after order execution exceeds the current risk limit
'30037': InvalidOrder, # order already cancelled
'30041': ExchangeError, # no position found
'30042': InsufficientFunds, # insufficient wallet balance
'30043': PermissionDenied, # operation not allowed as position is undergoing liquidation
'30044': PermissionDenied, # operation not allowed as position is undergoing AD
'30045': PermissionDenied, # operation not allowed as position is not normal status
'30049': InsufficientFunds, # insufficient available balance
'30050': ExchangeError, # any adjustments made will trigger immediate liquidation
'30051': ExchangeError, # due to risk limit, cannot adjust leverage
'30052': ExchangeError, # leverage can not less than 1
'30054': ExchangeError, # position margin is invalid
'30057': ExchangeError, # requested quantity of contracts exceeds risk limit
'30063': ExchangeError, # reduce-only rule not satisfied
'30067': InsufficientFunds, # insufficient available balance
'30068': ExchangeError, # exit value must be positive
'34026': ExchangeError, # the limit is no change
},
'broad': {
'unknown orderInfo': OrderNotFound, # {"ret_code":-1,"ret_msg":"unknown orderInfo","ext_code":"","ext_info":"","result":null,"time_now":"1584030414.005545","rate_limit_status":99,"rate_limit_reset_ms":1584030414003,"rate_limit":100}
},
},
'precisionMode': TICK_SIZE,
'options': {
'marketTypes': {
'BTC/USDT': 'linear',
},
'code': 'BTC',
'fetchBalance': {
'code': 'BTC',
},
'cancelAllOrders': {
'method': 'privatePostOrderCancelAll', # privatePostStopOrderCancelAll
},
'recvWindow': 5 * 1000, # 5 sec default
'timeDifference': 0, # the difference between system clock and Binance clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.00075,
'maker': -0.00025,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
def load_time_difference(self, params={}):
serverTime = self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - serverTime
return self.options['timeDifference']
def fetch_time(self, params={}):
response = self.publicGetTime(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {},
# time_now: '1583933682.448826'
# }
#
return self.safe_timestamp(response, 'time_now')
def fetch_markets(self, params={}):
if self.options['adjustForTimeDifference']:
self.load_time_difference()
response = self.publicGetSymbols(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# name: 'BTCUSD',
# base_currency: 'BTC',
# quote_currency: 'USD',
# price_scale: 2,
# taker_fee: '0.00075',
# maker_fee: '-0.00025',
# leverage_filter: {min_leverage: 1, max_leverage: 100, leverage_step: '0.01'},
# price_filter: {min_price: '0.5', max_price: '999999.5', tick_size: '0.5'},
# lot_size_filter: {max_trading_qty: 1000000, min_trading_qty: 1, qty_step: 1}
# },
# ],
# time_now: '1583930495.454196'
# }
#
markets = self.safe_value(response, 'result', [])
options = self.safe_value(self.options, 'fetchMarkets', {})
linearQuoteCurrencies = self.safe_value(options, 'linear', {'USDT': True})
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'name')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
linear = (quote in linearQuoteCurrencies)
inverse = not linear
symbol = base + '/' + quote
baseQuote = base + quote
if baseQuote != id:
symbol = id
lotSizeFilter = self.safe_value(market, 'lot_size_filter', {})
priceFilter = self.safe_value(market, 'price_filter', {})
precision = {
'amount': self.safe_float(lotSizeFilter, 'qty_step'),
'price': self.safe_float(priceFilter, 'tick_size'),
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': None,
'precision': precision,
'taker': self.safe_float(market, 'taker_fee'),
'maker': self.safe_float(market, 'maker_fee'),
'type': 'future',
'spot': False,
'future': True,
'option': False,
'linear': linear,
'inverse': inverse,
'limits': {
'amount': {
'min': self.safe_float(lotSizeFilter, 'min_trading_qty'),
'max': self.safe_float(lotSizeFilter, 'max_trading_qty'),
},
'price': {
'min': self.safe_float(priceFilter, 'min_price'),
'max': self.safe_float(priceFilter, 'max_price'),
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
defaultCode = self.safe_value(self.options, 'code', 'BTC')
options = self.safe_value(self.options, 'fetchBalance', {})
code = self.safe_value(options, 'code', defaultCode)
currency = self.currency(code)
request = {
'coin': currency['id'],
}
response = self.privateGetWalletBalance(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {
# BTC: {
# equity: 0,
# available_balance: 0,
# used_margin: 0,
# order_margin: 0,
# position_margin: 0,
# occ_closing_fee: 0,
# occ_funding_fee: 0,
# wallet_balance: 0,
# realised_pnl: 0,
# unrealised_pnl: 0,
# cum_realised_pnl: 0,
# given_cash: 0,
# service_cash: 0
# }
# },
# time_now: '1583937810.370020',
# rate_limit_status: 119,
# rate_limit_reset_ms: 1583937810367,
# rate_limit: 120
# }
#
result = {
'info': response,
}
balances = self.safe_value(response, 'result', {})
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
balance = balances[currencyId]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'available_balance')
account['used'] = self.safe_float(balance, 'used_margin')
account['total'] = self.safe_float(balance, 'equity')
result[code] = account
return self.parse_balance(result)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
symbol = marketId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_float(ticker, 'last_price')
open = self.safe_float(ticker, 'prev_price_24h')
percentage = self.safe_float(ticker, 'price_24h_pcnt')
if percentage is not None:
percentage *= 100
change = None
average = None
if (last is not None) and (open is not None):
change = last - open
average = self.sum(open, last) / 2
baseVolume = self.safe_float(ticker, 'turnover_24h')
quoteVolume = self.safe_float(ticker, 'volume_24h')
vwap = None
if quoteVolume is not None and baseVolume is not None:
vwap = quoteVolume / baseVolume
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high_price_24h'),
'low': self.safe_float(ticker, 'low_price_24h'),
'bid': self.safe_float(ticker, 'bid_price'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask_price'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTickers(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
first = self.safe_value(result, 0)
timestamp = self.safe_timestamp(response, 'time_now')
ticker = self.parse_ticker(first, market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
tickers = {}
for i in range(0, len(result)):
ticker = self.parse_ticker(result[i])
symbol = ticker['symbol']
tickers[symbol] = ticker
return self.filter_by_array(tickers, 'symbol', symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# inverse perpetual BTC/USD
#
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# }
#
# linear perpetual BTC/USDT
#
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
#
return [
self.safe_timestamp_2(ohlcv, 'open_time', 'start_at'),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float_2(ohlcv, 'turnover', 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.seconds()
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV requires a since argument or a limit argument')
else:
request['from'] = now - limit * duration
else:
request['from'] = int(since / 1000)
if limit is not None:
request['limit'] = limit # max 200, default 200
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
method = 'publicLinearGetKline' if (marketType == 'linear') else 'publicGetKlineList'
response = getattr(self, method)(self.extend(request, params))
#
# inverse perpetual BTC/USD
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# },
# ],
# time_now: '1583953082.397330'
# }
#
# linear perpetual BTC/USDT
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":[
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
# ],
# "time_now":"1587884120.168077"
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# id: 43785688,
# symbol: 'BTCUSD',
# price: 7786,
# qty: 67,
# side: 'Sell',
# time: '2020-03-11T19:18:30.123Z'
# }
#
# fetchMyTrades, fetchOrderTrades(private)
#
# {
# "closed_size": 0,
# "cross_seq": 277136382,
# "exec_fee": "0.0000001",
# "exec_id": "256e5ef8-abfe-5772-971b-f944e15e0d68",
# "exec_price": "8178.5",
# "exec_qty": 1,
# # the docs say the exec_time field is "abandoned" now
# # the user should use "trade_time_ms"
# "exec_time": "1571676941.70682",
# "exec_type": "Trade", #Exec Type Enum
# "exec_value": "0.00012227",
# "fee_rate": "0.00075",
# "last_liquidity_ind": "RemovedLiquidity", #Liquidity Enum
# "leaves_qty": 0,
# "nth_fill": 2,
# "order_id": "7ad50cb1-9ad0-4f74-804b-d82a516e1029",
# "order_link_id": "",
# "order_price": "8178",
# "order_qty": 1,
# "order_type": "Market", #Order Type Enum
# "side": "Buy", #Side Enum
# "symbol": "BTCUSD", #Symbol Enum
# "user_id": 1,
# "trade_time_ms": 1577480599000
# }
#
id = self.safe_string_2(trade, 'id', 'exec_id')
symbol = None
base = None
marketId = self.safe_string(trade, 'symbol')
amount = self.safe_float_2(trade, 'qty', 'exec_qty')
cost = self.safe_float(trade, 'exec_value')
price = self.safe_float_2(trade, 'price', 'exec_price')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
if market is not None:
if symbol is None:
symbol = market['symbol']
base = market['base']
if cost is None:
if amount is not None:
if price is not None:
cost = amount * price
timestamp = self.parse8601(self.safe_string(trade, 'time'))
if timestamp is None:
timestamp = self.safe_integer(trade, 'trade_time_ms')
side = self.safe_string_lower(trade, 'side')
lastLiquidityInd = self.safe_string(trade, 'last_liquidity_ind')
takerOrMaker = 'maker' if (lastLiquidityInd == 'AddedLiquidity') else 'taker'
feeCost = self.safe_float(trade, 'exec_fee')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': base,
'rate': self.safe_float(trade, 'fee_rate'),
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': self.safe_string(trade, 'order_id'),
'type': self.safe_string_lower(trade, 'order_type'),
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'from': 123, # from id
}
if limit is not None:
request['count'] = limit # default 500, max 1000
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
method = 'publicLinearGetRecentTradingRecords' if (marketType == 'linear') else 'publicGetTradingRecords'
response = getattr(self, method)(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# id: 43785688,
# symbol: 'BTCUSD',
# price: 7786,
# qty: 67,
# side: 'Sell',
# time: '2020-03-11T19:18:30.123Z'
# },
# ],
# time_now: '1583954313.393362'
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_trades(result, market, since, limit)
def parse_order_book(self, orderbook, timestamp=None, bidsKey='Buy', asksKey='Sell', priceKey='price', amountKey='size'):
bids = []
asks = []
for i in range(0, len(orderbook)):
bidask = orderbook[i]
side = self.safe_string(bidask, 'side')
if side == 'Buy':
bids.append(self.parse_bid_ask(bidask, priceKey, amountKey))
elif side == 'Sell':
asks.append(self.parse_bid_ask(bidask, priceKey, amountKey))
else:
raise ExchangeError(self.id + ' parseOrderBook encountered an unrecognized bidask format: ' + self.json(bidask))
return {
'bids': self.sort_by(bids, 0, True),
'asks': self.sort_by(asks, 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetOrderBookL2(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {symbol: 'BTCUSD', price: '7767.5', size: 677956, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7767', size: 580690, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7766.5', size: 475252, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7768', size: 330847, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7768.5', size: 97159, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7769', size: 6508, side: 'Sell'},
# ],
# time_now: '1583954829.874823'
# }
#
result = self.safe_value(response, 'result', [])
timestamp = self.safe_timestamp(response, 'time_now')
return self.parse_order_book(result, timestamp, 'Buy', 'Sell', 'price', 'size')
def parse_order_status(self, status):
statuses = {
# basic orders
'Created': 'open',
'Rejected': 'rejected', # order is triggered but failed upon being placed
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Cancelled': 'canceled',
'PendingCancel': 'canceling', # the engine has received the cancellation but there is no guarantee that it will be successful
# conditional orders
'Active': 'open', # order is triggered and placed successfully
'Untriggered': 'open', # order waits to be triggered
'Triggered': 'closed', # order is triggered
# 'Cancelled': 'canceled', # order is cancelled
# 'Rejected': 'rejected', # order is triggered but fail to be placed
'Deactivated': 'canceled', # conditional order was cancelled before triggering
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0, # in contracts, where 1 contract = 1 quote currency unit(USD for inverse contracts)
# "cum_exec_value": 0, # in contract's underlying currency(BTC for inverse contracts)
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# }
#
# fetchOrder
#
# {
# "user_id" : 599946,
# "symbol" : "BTCUSD",
# "side" : "Buy",
# "order_type" : "Limit",
# "price" : "7948",
# "qty" : 10,
# "time_in_force" : "GoodTillCancel",
# "order_status" : "Filled",
# "ext_fields" : {
# "o_req_num" : -1600687220498,
# "xreq_type" : "x_create"
# },
# "last_exec_time" : "1588150113.968422",
# "last_exec_price" : "7948",
# "leaves_qty" : 0,
# "leaves_value" : "0",
# "cum_exec_qty" : 10,
# "cum_exec_value" : "0.00125817",
# "cum_exec_fee" : "-0.00000031",
# "reject_reason" : "",
# "cancel_type" : "",
# "order_link_id" : "",
# "created_at" : "2020-04-29T08:45:24.399146Z",
# "updated_at" : "2020-04-29T08:48:33.968422Z",
# "order_id" : "dd2504b9-0157-406a-99e1-efa522373944"
# }
#
marketId = self.safe_string(order, 'symbol')
symbol = None
base = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
id = self.safe_string(order, 'order_id')
price = self.safe_float(order, 'price')
average = self.safe_float(order, 'average_price')
amount = self.safe_float(order, 'qty')
cost = self.safe_float(order, 'cum_exec_value')
filled = self.safe_float(order, 'cum_exec_qty')
remaining = self.safe_float(order, 'leaves_qty')
if market is not None:
symbol = market['symbol']
base = market['base']
lastTradeTimestamp = self.safe_timestamp(order, 'last_exec_time')
if lastTradeTimestamp == 0:
lastTradeTimestamp = None
if (filled is None) and (amount is not None) and (remaining is not None):
filled = amount - remaining
if filled is not None:
if (remaining is None) and (amount is not None):
remaining = amount - filled
if cost is None:
if price is not None:
cost = price * filled
status = self.parse_order_status(self.safe_string(order, 'order_status'))
side = self.safe_string_lower(order, 'side')
feeCost = self.safe_float(order, 'cum_exec_fee')
fee = None
if feeCost is not None:
feeCost = abs(feeCost)
fee = {
'cost': feeCost,
'currency': base,
}
type = self.safe_string_lower(order, 'order_type')
clientOrderId = self.safe_string(order, 'order_link_id')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
method = 'privateLinearGetOrderSearch' if (marketType == 'linear') else 'privateGetOrder'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
method = 'privateLinearGetStopOrderSearch' if (marketType == 'linear') else 'privateGetStopOrder'
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Limit",
# "price": "8083",
# "qty": 10,
# "time_in_force": "GoodTillCancel",
# "order_status": "New",
# "ext_fields": {"o_req_num": -308787, "xreq_type": "x_create", "xreq_offset": 4154640},
# "leaves_qty": 10,
# "leaves_value": "0.00123716",
# "cum_exec_qty": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-10-21T07:28:19.396246Z",
# "updated_at": "2019-10-21T07:28:19.396246Z",
# "order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"
# },
# "time_now": "1571651135.291930",
# "rate_limit_status": 99, # The remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": "8000",
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Untriggered",
# "ext_fields": {},
# "leaves_qty": 1,
# "leaves_value": "0.00013333",
# "cum_exec_qty": 0,
# "cum_exec_value": null,
# "cum_exec_fee": null,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-12-27T19:56:24.052194Z",
# "updated_at": "2019-12-27T19:56:24.052194Z",
# "order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"
# },
# "time_now": "1577476584.386958",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
qty = self.amount_to_precision(symbol, amount)
if market['inverse']:
qty = int(qty)
else:
qty = float(qty)
request = {
# orders ---------------------------------------------------------
'side': self.capitalize(side),
'symbol': market['id'],
'order_type': self.capitalize(type),
'qty': qty, # order quantity in USD, integer only
# 'price': float(self.price_to_precision(symbol, price)), # required for limit orders
'time_in_force': 'GoodTillCancel', # ImmediateOrCancel, FillOrKill, PostOnly
# 'take_profit': 123.45, # take profit price, only take effect upon opening the position
# 'stop_loss': 123.45, # stop loss price, only take effect upon opening the position
# 'reduce_only': False, # reduce only
# when creating a closing order, bybit recommends a True value for
# close_on_trigger to avoid failing due to insufficient available margin
# 'close_on_trigger': False,
# 'order_link_id': 'string', # unique client order id, max 36 characters
# conditional orders ---------------------------------------------
# base_price is used to compare with the value of stop_px, to decide
# whether your conditional order will be triggered by crossing trigger
# price from upper side or lower side, mainly used to identify the
# expected direction of the current conditional order
# 'base_price': 123.45, # required for conditional orders
# 'stop_px': 123.45, # trigger price, required for conditional orders
# 'trigger_by': 'LastPrice', # IndexPrice, MarkPrice
}
priceIsRequired = False
if type == 'limit':
priceIsRequired = True
if priceIsRequired:
if price is not None:
request['price'] = float(self.price_to_precision(symbol, price))
else:
raise ArgumentsRequired(self.id + ' createOrder requires a price argument for a ' + type + ' order')
stopPx = self.safe_value(params, 'stop_px')
basePrice = self.safe_value(params, 'base_price')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
method = 'privateLinearPostOrderCreate' if (marketType == 'linear') else 'privatePostOrderCreate'
if stopPx is not None:
if basePrice is None:
raise ArgumentsRequired(self.id + ' createOrder requires both the stop_px and base_price params for a conditional ' + type + ' order')
else:
method = 'privateLinearPostStopOrderCreate' if (marketType == 'linear') else 'openapiPostStopOrderCreate'
request['stop_px'] = float(self.price_to_precision(symbol, stopPx))
request['base_price'] = float(self.price_to_precision(symbol, basePrice))
params = self.omit(params, ['stop_px', 'base_price'])
elif basePrice is not None:
raise ArgumentsRequired(self.id + ' createOrder requires both the stop_px and base_price params for a conditional ' + type + ' order')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0,
# "cum_exec_value": 0,
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# },
# "time_now": "1575111823.458705",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_status": "Untriggered",
# "ext_fields": {
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "expected_direction": "Rising",
# "trigger_price": 7500,
# "op_from": "api",
# "remark": "127.0.01",
# "o_req_num": 0
# },
# "leaves_qty": 1,
# "leaves_value": 0.00013333,
# "reject_reason": null,
# "cross_seq": -1,
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# "ext_info": null,
# "time_now": "1577450904.327654",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1577450904335,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' editOrder requires an symbol argument')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
if marketType == 'linear':
raise NotSupported(self.id + ' does not support editOrder for ' + marketType + ' ' + symbol + ' market type')
self.load_markets()
market = self.market(symbol)
request = {
# 'order_id': id, # only for non-conditional orders
'symbol': market['id'],
# 'p_r_qty': self.amount_to_precision(symbol, amount), # new order quantity, optional
# 'p_r_price' self.priceToprecision(symbol, price), # new order price, optional
# ----------------------------------------------------------------
# conditional orders
# 'stop_order_id': id, # only for conditional orders
# 'p_r_trigger_price': 123.45, # new trigger price also known as stop_px
}
stopOrderId = self.safe_string(params, 'stop_order_id')
method = 'openapiPostOrderReplace'
if stopOrderId is not None:
method = 'openapiPostStopOrderReplace'
request['stop_order_id'] = stopOrderId
params = self.omit(params, ['stop_order_id'])
else:
request['order_id'] = id
if amount is not None:
request['p_r_qty'] = int(self.amount_to_precision(symbol, amount))
if price is not None:
request['p_r_price'] = float(self.price_to_precision(symbol, price))
response = getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"},
# "time_now": "1539778407.210858",
# "rate_limit_status": 99, # remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"stop_order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"},
# "ext_info": null,
# "time_now": "1577475760.604942",
# "rate_limit_status": 96,
# "rate_limit_reset_ms": 1577475760612,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result', {})
return {
'info': response,
'id': self.safe_string_2(result, 'order_id', 'stop_order_id'),
'order_id': self.safe_string(result, 'order_id'),
'stop_order_id': self.safe_string(result, 'stop_order_id'),
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_value(marketTypes, symbol)
method = 'privateLinearPostOrderCancel' if (marketType == 'linear') else 'privatePostOrderCancel'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
method = 'privateLinearPostStopOrderCancel' if (marketType == 'linear') else 'openapiPostStopOrderCancel'
response = getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
options = self.safe_value(self.options, 'cancelAllOrders')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
defaultMethod = 'privateLinearPostOrderCancelAll' if (marketType == 'linear') else 'privatePostOrderCancelAll'
method = self.safe_string(options, 'method', defaultMethod)
response = getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'order_id': 'string'
# 'order_link_id': 'string', # unique client order id, max 36 characters
# 'symbol': market['id'], # default BTCUSD
# 'order': 'desc', # asc
# 'page': 1,
# 'limit': 20, # max 50
# 'order_status': 'Created,New'
# conditional orders ---------------------------------------------
# 'stop_order_id': 'string',
# 'stop_order_status': 'Untriggered',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
options = self.safe_value(self.options, 'fetchOrders', {})
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
defaultMethod = 'privateLinearGetOrderList' if (marketType == 'linear') else 'openapiGetOrderList'
query = params
if ('stop_order_id' in params) or ('stop_order_status' in params):
stopOrderStatus = self.safe_value(params, 'stopOrderStatus')
if stopOrderStatus is not None:
if isinstance(stopOrderStatus, list):
stopOrderStatus = ','.join(stopOrderStatus)
request['stop_order_status'] = stopOrderStatus
query = self.omit(params, 'stop_order_status')
defaultMethod = 'privateLinearGetStopOrderList' if (marketType == 'linear') else 'openapiGetStopOrderList'
method = self.safe_string(options, 'method', defaultMethod)
response = getattr(self, method)(self.extend(request, query))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 6,
# "data": [
# {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Market",
# "price": 7074,
# "qty": 2,
# "time_in_force": "ImmediateOrCancel",
# "order_status": "Filled",
# "ext_fields": {
# "close_on_trigger": True,
# "orig_order_type": "BLimit",
# "prior_x_req_price": 5898.5,
# "op_from": "pc",
# "remark": "127.0.0.1",
# "o_req_num": -34799032763,
# "xreq_type": "x_create"
# },
# "last_exec_time": "1577448481.696421",
# "last_exec_price": 7070.5,
# "leaves_qty": 0,
# "leaves_value": 0,
# "cum_exec_qty": 2,
# "cum_exec_value": 0.00028283,
# "cum_exec_fee": 0.00002,
# "reject_reason": "NoError",
# "order_link_id": "",
# "created_at": "2019-12-27T12:08:01.000Z",
# "updated_at": "2019-12-27T12:08:01.000Z",
# "order_id": "f185806b-b801-40ff-adec-52289370ed62"
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577448922.437871",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 1,
# "data": [
# {
# "user_id": 1,
# "stop_order_status": "Untriggered",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_link_id": "",
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# ]
# },
# "ext_info": null,
# "time_now": "1577451658.755468",
# "rate_limit_status": 599,
# "rate_limit_reset_ms": 1577451658762,
# "rate_limit": 600
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_orders(data, market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Rejected',
'Filled',
'Cancelled',
# conditional orders
# 'Active',
# 'Triggered',
# 'Cancelled',
# 'Rejected',
# 'Deactivated',
]
options = self.safe_value(self.options, 'fetchClosedOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Created',
'New',
'PartiallyFilled',
'PendingCancel',
# conditional orders
# 'Untriggered',
]
options = self.safe_value(self.options, 'fetchOpenOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
'order_id': id,
}
return self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'order_id': 'f185806b-b801-40ff-adec-52289370ed62', # if not provided will return user's trading records
# 'symbol': market['id'],
# 'start_time': int(since / 1000),
# 'page': 1,
# 'limit' 20, # max 50
}
market = None
if symbol is None:
orderId = self.safe_string(params, 'order_id')
if orderId is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument or an order_id param')
else:
request['order_id'] = orderId
params = self.omit(params, 'order_id')
else:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['start_time'] = since
if limit is not None:
request['limit'] = limit # default 20, max 50
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
method = 'privateLinearGetTradeExecutionList' if (marketType == 'linear') else 'privateGetExecutionList'
response = getattr(self, method)(self.extend(request, params))
#
# inverse
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "order_id": "Abandonednot !", # Abandonednot !
# "trade_list": [
# {
# "closed_size": 0,
# "cross_seq": 277136382,
# "exec_fee": "0.0000001",
# "exec_id": "256e5ef8-abfe-5772-971b-f944e15e0d68",
# "exec_price": "8178.5",
# "exec_qty": 1,
# "exec_time": "1571676941.70682",
# "exec_type": "Trade", #Exec Type Enum
# "exec_value": "0.00012227",
# "fee_rate": "0.00075",
# "last_liquidity_ind": "RemovedLiquidity", #Liquidity Enum
# "leaves_qty": 0,
# "nth_fill": 2,
# "order_id": "7ad50cb1-9ad0-4f74-804b-d82a516e1029",
# "order_link_id": "",
# "order_price": "8178",
# "order_qty": 1,
# "order_type": "Market", #Order Type Enum
# "side": "Buy", #Side Enum
# "symbol": "BTCUSD", #Symbol Enum
# "user_id": 1
# }
# ]
# },
# "time_now": "1577483699.281488",
# "rate_limit_status": 118,
# "rate_limit_reset_ms": 1577483699244737,
# "rate_limit": 120
# }
#
# linear
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":{
# "current_page":1,
# "data":[
# {
# "order_id":"b59418ec-14d4-4ef9-b9f4-721d5d576974",
# "order_link_id":"",
# "side":"Sell",
# "symbol":"BTCUSDT",
# "exec_id":"0327284d-faec-5191-bd89-acc5b4fafda9",
# "price":0.5,
# "order_price":0.5,
# "order_qty":0.01,
# "order_type":"Market",
# "fee_rate":0.00075,
# "exec_price":9709.5,
# "exec_type":"Trade",
# "exec_qty":0.01,
# "exec_fee":0.07282125,
# "exec_value":97.095,
# "leaves_qty":0,
# "closed_size":0.01,
# "last_liquidity_ind":"RemovedLiquidity",
# "trade_time":1591648052,
# "trade_time_ms":1591648052861
# }
# ]
# },
# "time_now":"1591736501.979264",
# "rate_limit_status":119,
# "rate_limit_reset_ms":1591736501974,
# "rate_limit":120
# }
#
result = self.safe_value(response, 'result', {})
trades = self.safe_value_2(result, 'trade_list', 'data', [])
return self.parse_trades(trades, market, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals() requires a currency code argument')
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
if limit is not None:
request['count'] = limit
response = self.privateGetGetDeposits(self.extend(request, params))
#
# {
# "jsonrpc": "2.0",
# "id": 5611,
# "result": {
# "count": 1,
# "data": [
# {
# "address": "2N35qDKDY22zmJq9eSyiAerMD4enJ1xx6ax",
# "amount": 5,
# "currency": "BTC",
# "received_timestamp": 1549295017670,
# "state": "completed",
# "transaction_id": "230669110fdaf0a0dbcdc079b6b8b43d5af29cc73683835b9bc6b3406c065fda",
# "updated_timestamp": 1549295130159
# }
# ]
# }
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, params)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'coin': currency['id'],
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'status': 'Pending', # ToBeConfirmed, UnderReview, Pending, Success, CancelByUser, Reject, Expire
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = self.openapiGetWalletWithdrawList(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# },
# ],
# "current_page": 1,
# "last_page": 1
# },
# "ext_info": null,
# "time_now": "1577482295.125488",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577482295132,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
'ToBeConfirmed': 'pending',
'UnderReview': 'pending',
'Pending': 'pending',
'Success': 'ok',
'CancelByUser': 'canceled',
'Reject': 'rejected',
'Expire': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchWithdrawals
#
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# }
#
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.parse8601(self.safe_string(transaction, 'submited_at'))
updated = self.parse8601(self.safe_string(transaction, 'updated_at'))
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
address = self.safe_string(transaction, 'address')
feeCost = self.safe_float(transaction, 'fee')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': 'withdrawal',
'amount': self.safe_float(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'coin': currency['id'],
# 'currency': currency['id'], # alias
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'wallet_fund_type': 'Deposit', # Withdraw, RealisedPNL, Commission, Refund, Prize, ExchangeOrderWithdraw, ExchangeOrderDeposit
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit
response = self.openapiGetWalletFundRecords(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577481867.115552",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577481867122,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_ledger(data, currency, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
#
currencyId = self.safe_string(item, 'coin')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_float(item, 'amount')
after = self.safe_float(item, 'wallet_balance')
direction = 'out' if (amount < 0) else 'in'
before = None
if after is not None and amount is not None:
difference = amount if (direction == 'out') else -amount
before = self.sum(after, difference)
timestamp = self.parse8601(self.safe_string(item, 'exec_time'))
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
id = self.safe_string(item, 'id')
referenceId = self.safe_string(item, 'tx_id')
return {
'id': id,
'currency': code,
'account': self.safe_string(item, 'wallet_id'),
'referenceAccount': None,
'referenceId': referenceId,
'status': None,
'amount': amount,
'before': before,
'after': after,
'fee': None,
'direction': direction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': type,
'info': item,
}
def parse_ledger_entry_type(self, type):
types = {
'Deposit': 'transaction',
'Withdraw': 'transaction',
'RealisedPNL': 'trade',
'Commission': 'fee',
'Refund': 'cashback',
'Prize': 'prize', # ?
'ExchangeOrderWithdraw': 'transaction',
'ExchangeOrderDeposit': 'transaction',
}
return self.safe_string(types, type, type)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api']
request = path
# public v2
if api == 'public':
request = '/' + self.version + '/' + api + '/' + request
if params:
request += '?' + self.rawencode(params)
elif api == 'publicLinear':
request = '/public/linear/' + request
if params:
request += '?' + self.rawencode(params)
else:
self.check_required_credentials()
if api == 'openapi':
request = '/open-api/' + request
elif api == 'private':
# private v2
request = '/' + self.version + '/' + api + '/' + request
elif api == 'privateLinear':
request = '/private/linear/' + request
else:
# position, user
request = '/' + api + '/' + request
timestamp = self.nonce()
query = self.extend(params, {
'api_key': self.apiKey,
'recv_window': self.options['recvWindow'],
'timestamp': timestamp,
})
auth = self.rawencode(self.keysort(query))
# fix https://github.com/ccxt/ccxt/issues/7377
# bybit encodes whole floats as integers without .0
auth = auth.replace('.0&', '&')
signature = self.hmac(self.encode(auth), self.encode(self.secret))
if method == 'POST':
body = self.json(self.extend(query, {
'sign': signature,
}))
headers = {
'Content-Type': 'application/json',
}
else:
request += '?' + auth + '&sign=' + signature
url += request
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
#
# {
# ret_code: 10001,
# ret_msg: 'ReadMapCB: expect {or n, but found \u0000, error ' +
# 'found in #0 byte of ...||..., bigger context ' +
# '...||...',
# ext_code: '',
# ext_info: '',
# result: null,
# time_now: '1583934106.590436'
# }
#
errorCode = self.safe_value(response, 'ret_code')
if errorCode != 0:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback) # unknown message
| [
"[email protected]"
] | |
0c6a313f175fae0b84ab49d728b75441dcf2ea5f | 13b14c9c75143bf2eda87cb4a41006a52dd6f02b | /AOJ/ITP1_1_D/ITP1_1_D_p.py | a6acf7b75b82545eb6dc179dabcbd87b1719406f | [] | no_license | yutaka-watanobe/problem-solving | 2c311ac856c79c20aef631938140118eb3bc3835 | f0b92125494fbd3c8d203989ec9fef53f52ad4b4 | refs/heads/master | 2021-06-03T12:58:39.881107 | 2020-12-16T14:34:16 | 2020-12-16T14:34:16 | 94,963,754 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | s = int(input())
h = s // 3600
m = s % 3600 // 60
s = s % 60
print(str(h) +":" + str(m) +":" + str(s))
| [
"[email protected]"
] | |
4e0e82716584b6f00ebb5773c0e041000aa55a11 | 1d9138d777744fa2d9d6e3b629a43041f2358d06 | /real_time/abc/118/B.py | 0cbaafe88c142600626cc3f11fe6341f0d44f97f | [] | no_license | Yuyats/AtCoderAnswers | f1956b790ee64a4d0b3b48b98791a91679a30244 | fac7e3eb74a888e77ba7a6b6a15d836c589baa3e | refs/heads/master | 2021-06-24T16:19:45.848524 | 2021-06-13T03:51:07 | 2021-06-13T03:51:07 | 198,857,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | import math, string, itertools, fractions, heapq, collections, re, array, bisect, sys, random, time, copy, functools
sys.setrecursionlimit(10**7)
inf = 10 ** 20
eps = 1.0 / 10**10
mod = 10**9+7
dd = [(-1, 0), (0, 1), (1, 0), (0, -1)]
ddn = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]
def LI(): return [int(x) for x in sys.stdin.readline().split()]
def LI_(): return [int(x)-1 for x in sys.stdin.readline().split()]
def LF(): return [float(x) for x in sys.stdin.readline().split()]
def LS(): return sys.stdin.readline().split()
def I(): return int(sys.stdin.readline())
def F(): return float(sys.stdin.readline())
def S(): return input()
def pf(s): return print(s, flush=True)
def main():
N, M = LI()
KA = []
[KA.append(LI()) for i in range(N)]
result = 0
for i in KA[0][1:]:
if all([i in j[1:] for j in KA]):
result += 1
print(result)
main()
| [
"[email protected]"
] | |
8b2fb070f5bd9c5bfb41af82e806c1cdd09c1850 | e764c69d09cb69653817df8fa410ce7a31dd5d1d | /residuosWebapp/residuos/models.py | 95607856e983b103db5e578608aeadf61d6b7687 | [] | no_license | fafaschiavo/residuosWebbapp | 5620d60a933e3894864c89de232ebebf11df6a5f | f1915bc1f136801e96c5bf01bd7d5127eddb8551 | refs/heads/master | 2021-01-20T17:12:27.770482 | 2016-08-14T22:26:03 | 2016-08-14T22:26:03 | 65,656,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class members(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
email = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
newsletter = models.IntegerField(default=1)
is_adm = models.IntegerField(default=0)
company_id = models.IntegerField(default=0)
def __first_name__(self):
return self.first_name
def __last_name__(self):
return self.last_name
def __email__(self):
return self.email
def __phone__(self):
return self.phone
def __created_at__(self):
return self.created_at
def __newsletter__(self):
return self.newsletter
def __is_adm__(self):
return self.is_adm
class company(models.Model):
company_name = models.CharField(max_length=400)
email = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
cnpj = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now=False, auto_now_add=True)
zip_code = models.CharField(max_length=200)
address = models.CharField(max_length=200)
def __company_name__(self):
return self.company_name
def __email__(self):
return self.email
def __phone__(self):
return self.phone
def __cnpj__(self):
return self.cnpj
def __created_at__(self):
return self.created_at
def __zip_code__(self):
return self.zip_code
def __address__(self):
return self.address | [
"[email protected]"
] | |
33c85c8bf476a439d5dacd0afbbd365c0df5f844 | fb65b7c000642dca68c93ee85a87795b3f30fe21 | /Advance_Python/Quantifiers/Rule4.py | 2c441191c03a5512a02d8b5f8398a98bb93e4222 | [] | no_license | toncysara17/luminarpythonprograms | f41b446251feba641e117d87ce235dc556086f8f | 17bc37c3f83c0e9792aaa8bccd901371a6413f14 | refs/heads/master | 2023-04-17T18:51:31.493118 | 2021-04-20T05:25:02 | 2021-04-20T05:25:02 | 358,550,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | #Quantifiers
import re
x = "a{3}" #no of a position
r="aaa abc aaaa cga"
matcher=re.finditer(x,r)
for match in matcher:
print(match.start())
print(match.group()) | [
"[email protected]"
] | |
02ca81936a9bbc323cdc7593087daf093dfe7a6a | dc0d7e49eafe40f1c41f631621a6ccdefdcbbf7c | /press/log.py | fd7a77624aba7893cf089a1fce44ac9a436ccd5f | [] | no_license | jr0d/press | b2314b319da5b44d23110036064775796246c5c1 | 477b78700b644b2d333f4d9289f319a52fc54100 | refs/heads/master | 2021-06-15T20:44:18.061919 | 2019-04-24T17:01:37 | 2019-04-24T17:01:37 | 80,559,927 | 7 | 3 | null | 2021-03-25T21:49:09 | 2017-01-31T20:38:44 | Python | UTF-8 | Python | false | false | 830 | py | import logging
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def setup_logging(log_level=logging.ERROR,
console_logging=True,
log_file=None,
cli_debug=False):
press_logger = logging.getLogger('press')
press_cli_logger = logging.getLogger('press.helpers.cli')
if console_logging: # True unless explicitly untrue
stream_handler = logging.StreamHandler()
press_logger.addHandler(stream_handler)
press_logger.setLevel(log_level)
if log_file:
fh = logging.FileHandler(log_file)
fh.setFormatter(logging.Formatter(fmt=FORMAT))
press_logger.info('Setting log file: {}'.format(log_file))
press_logger.addHandler(fh)
if not cli_debug:
press_cli_logger.setLevel(logging.ERROR)
| [
"[email protected]"
] | |
6ab505a1ac637cbf578adba0cb1b1eb19c59b563 | 4ad94b71e30883d6df07a3277265bd6fb7457ba7 | /python/examples/doc_examples/plot/axis_title_3d.py | e81c6c337eadf7fd0d7458a698deea9e1388cc48 | [
"MIT"
] | permissive | Tecplot/handyscripts | 7cb1d4c80f323c785d06b0c8d37aeb0acb67f58c | 84a89bfecff5479a0319f08eb8aa9df465283830 | refs/heads/master | 2023-08-22T15:29:22.629644 | 2023-08-12T01:19:59 | 2023-08-12T01:19:59 | 149,826,165 | 89 | 64 | MIT | 2022-01-13T01:11:02 | 2018-09-21T22:47:23 | Jupyter Notebook | UTF-8 | Python | false | false | 1,345 | py | from os import path
import tecplot as tp
from tecplot.constant import PlotType, SurfacesToPlot, Color, AxisTitleMode
examples_dir = tp.session.tecplot_examples_directory()
infile = path.join(examples_dir, 'SimpleData', 'F18.plt')
dataset = tp.data.load_tecplot(infile)
plot = tp.active_frame().plot(PlotType.Cartesian3D)
plot.activate()
plot.show_contour = True
plot.contour(0).variable = dataset.variable('S')
plot.contour(0).colormap_name = 'Sequential - Yellow/Green/Blue'
plot.contour(0).legend.show = False
plot.fieldmap(0).surfaces.surfaces_to_plot = SurfacesToPlot.BoundaryFaces
xaxis = plot.axes.x_axis
xaxis.show = True
#{DOC:highlight}[
xaxis.title.title_mode = AxisTitleMode.UseText
xaxis.title.text = 'Longitudinal (m)'
xaxis.title.color = Color.BluePurple
xaxis.title.position = 10
#]
yaxis = plot.axes.y_axis
yaxis.show = True
#{DOC:highlight}[
yaxis.title.title_mode = AxisTitleMode.UseText
yaxis.title.text = 'Transverse (m)'
yaxis.title.color = Color.BluePurple
yaxis.title.position = 90
#]
zaxis = plot.axes.z_axis
zaxis.show = True
#{DOC:highlight}[
zaxis.title.title_mode = AxisTitleMode.UseText
zaxis.title.text = 'Height (m)'
zaxis.title.color = Color.BluePurple
zaxis.title.offset = 13
#]
plot.view.fit()
tp.export.save_png('axis_title_3d.png', 600, supersample=3)
| [
"[email protected]"
] | |
5a9c07053f256cb8360b535a35fb9b97ed2bcae8 | c652797f5303bb7102967fc6603e5704025afb36 | /gamelayer/uitools/textline.py | 179db7b1680cae5dd59791572dfc2579cff6863c | [
"MIT"
] | permissive | Windspar/Gamelayer | fc1ce499cccb6530a4dcd446f9d86fd44026e564 | 65e1cf11548bc02bc49348eb265c209172c14844 | refs/heads/master | 2022-06-13T08:06:37.828771 | 2020-05-07T17:17:59 | 2020-05-07T17:17:59 | 258,047,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,569 | py | import string
from pygame import KMOD_CTRL, Rect, KEYDOWN, MOUSEMOTION, MOUSEBUTTONDOWN
from .label import Label
from .ui_base import UI_Base
from .textline_core import *
class TextLine(UI_Base):
def __init__(self, font, color, callback, rect, allowed_keys=None, *groups):
UI_Base.__init__(self, rect, (0, 0), "topleft", *groups)
self.callback = callback
self._left = 0
self._right = 0
self._offset = 2
if allowed_keys:
self.allowed_keys = allowed_keys
else:
self.allowed_keys = string.digits + string.ascii_letters + string.punctuation + " "
self.recall = Recall()
position = self.rect.x + self._offset, self.rect.centery
self.carrot = Carrot("|", font, color, position)
self.carrot.set_callback(self.build_image)
self.buffer = Buffer(self.carrot, self.recall, callback)
self.label = Label("", font, color, position, "midleft")
self.label.set_apply_image(self.build_image)
def bind(self, events):
events.bind(KEYDOWN, self.on_keydown)
events.bind(MOUSEMOTION, self.on_mousemotion)
events.bind(MOUSEBUTTONDOWN, self.on_mousebuttondown)
def build_image(self, tool=None):
self.image = self.build_surface()
self.label.draw_to(self.image, self.rect)
self.carrot.draw_to(self.image, self.rect)
self.apply_image()
def draw(self, surface):
self.label.draw(surface)
self.carrot.draw(surface)
def on_keydown(self, event):
if self._toggle:
self.carrot.show(True)
ctrl = event.mod & KMOD_CTRL
if ctrl == 0 and event.unicode in self.allowed_keys and event.unicode != "":
self.buffer.insert(self.carrot.position, event.unicode)
self.carrot.position += 1
self.update_text()
elif ctrl == 0:
if event.key in self.buffer.key_events.keys():
self.buffer.key_events[event.key]()
self.update_text()
def on_mousebuttondown(self, event):
self._toggle = False
if event.button == 1:
if self._hover:
self._toggle = True
if not self.carrot._enable:
self.carrot.enable(True)
if not self._toggle:
self.carrot.enable(False)
self.apply_image()
def update(self, delta):
self.carrot.update(delta)
def update_text(self):
if not self.buffer.empty():
text = self.buffer.text
font = self.label._font
width = self.rect.width - self._offset * 3
self.label.set_text(text)
if self.carrot.position > self._right:
self._right = self.carrot.position
elif self.carrot.position < self._left:
self._left = self.carrot.position
# Looking for clipping text best fit. Base on carrot position
# Move left position to the left.
while font.size(text[self._left:self._right])[0] < width and self._left > 0:
self._left -= 1
# Move left position to the right.
while font.size(text[self._left:self._right])[0] > width and self._left < self.carrot.position:
self._left += 1
# Move right position to right.
while font.size(text[self._left:self._right])[0] < width and self._right < len(self.buffer):
self._right += 1
# Move right position to left.
while font.size(text[self._left:self._right])[0] > width:
self._right -= 1
label_x = self.label.rect.x - 1
x = font.size(text[0: self._left])[0]
w = min(width, self.label.rect.width - x)
# Smooth scrolling effect
if w < width < self.label.rect.width:
offset = width - (self.label.rect.width - x)
x -= offset
w += offset
label_x += offset
# Clip rect
clip_rect = Rect(x, 0, w, self.label.rect.height)
# Carrot position
slice = text[self._left:self.carrot.position]
self.carrot.rect.x = font.size(slice)[0] + label_x
# Must set label clip rect. After setting carrot x position.
# For image is update correctly.
self.label.clip(clip_rect)
else:
self.carrot.rect.x = self.label.rect.x
self.label.set_text("")
| [
"[email protected]"
] | |
20aed6156cab0fb01197eb7232f5d902cc34d1ae | 5023f3f6f493a6cf3a6e4acf7ee742fdecc2a558 | /ScopeFoundryHW/newport_esp300/esp300_xyz_stage_hw.py | ea5170c66505e93c87a73e26db3c8a6b14c200da | [
"BSD-3-Clause"
] | permissive | erictang000/stackbot | 1a0de1a30c0b17a67808cbb7f084149f0c744070 | e9a20930d790c995163192b29394a266af54a3d0 | refs/heads/master | 2022-04-10T06:48:25.785204 | 2020-03-18T23:08:57 | 2020-03-18T23:08:57 | 248,362,086 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,673 | py | from ScopeFoundry.hardware import HardwareComponent
class ESP300XYZStageHW(HardwareComponent):
name = 'esp300_xyz_stage'
def __init__(self, app, debug=False, name=None, ax_names='xyz'):
"""
ax_names defines the names of the three axes connected to the stage.
if an "_" underscore is found, that axis will be skipped.
May be any iterable. examples include 'xyz' or ['r', 'theta', 'phi']
"""
self.ax_names = ax_names
HardwareComponent.__init__(self, app, debug=debug, name=name)
def setup(self):
self.settings.New('port', str, initial='COM5')
for axis in self.ax_names:
if axis == '_' or axis == None:
continue
self.settings.New(axis + "_position",
dtype=float,
ro=True,
unit='mm',
spinbox_decimals=6,
si=False
)
#self.settings.New(axis + "_ref_position", dtype=float, ro=True, unit='nm')
self.settings.New(axis + "_target_position",
dtype=float,
ro=False,
vmin=-20,
vmax=20,
unit='mm',
spinbox_decimals=6,
spinbox_step=0.01,
si=False)
self.settings.New(axis + '_enabled', bool, initial=True)
self.settings.New(axis + '_is_moving', bool, ro=True)
self.settings.New(axis + "_step_delta", dtype=float, unit='m', si=True, initial=100e-6, vmin=0 )
def connect(self):
S = self.settings
from .esp300_dev import ESP300
E = self.esp300 = ESP300(port=S['port'], debug=S['debug_mode'])
for axis_index, axis_name in enumerate(self.ax_names):
axis_num = axis_index + 1
# skip axes that are excluded from ax_names
if axis_name == '_' or axis_name == None:
continue
unit = E.read_unit(axis_num)
self.settings.get_lq(axis_name + "_position").change_unit(unit)
self.settings.get_lq(axis_name + "_target_position").change_unit(unit)
self.settings.get_lq(axis_name + "_position").connect_to_hardware(
lambda a=axis_num: E.read_pos(a))
self.settings.get_lq(axis_name + "_target_position").connect_to_hardware(
write_func = lambda new_pos, a=axis_num: E.write_target_pos_abs(a, new_pos))
self.settings.get_lq(axis_name + "_enabled").connect_to_hardware(
read_func = lambda a=axis_num: E.read_enabled(a),
write_func = lambda enabled, a=axis_num: E.write_enabled(a, enabled))
self.settings.get_lq(axis_name + "_is_moving").connect_to_hardware(
read_func = lambda a=axis_num: E.read_is_moving(a))
def disconnect(self):
self.settings.disconnect_all_from_hardware()
if hasattr(self, 'esp300'):
self.esp300.close()
del self.esp300
def move_step_delta(self, axname, dir=+1):
"dir should be +/- 1"
dir = dir * 1.0/ abs(dir)
self.settings[axname + "_target_position"] += dir * self.settings[axname + '_step_delta']
| [
"[email protected]"
] | |
761694d396861a5c422b785015b5680bb34787ac | 81f6fd135813f3727576bd5d74acaf0469b53615 | /test/test_variables_api.py | 92de66d295e33ba672eaac9f15c6edcb303a029b | [] | no_license | rlisowski/phrase-python | cb65ded1e80d1985aa95a4403c7aa3f012bd33b4 | cbd6bf580a74140928b7536bb9b466d43276cc29 | refs/heads/master | 2023-06-18T09:24:43.916142 | 2021-07-15T14:21:58 | 2021-07-15T14:21:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,350 | py | # coding: utf-8
"""
Phrase API Reference
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import phrase_api
from phrase_api.api.variables_api import VariablesApi # noqa: E501
from phrase_api.rest import ApiException
class TestVariablesApi(unittest.TestCase):
"""VariablesApi unit test stubs"""
def setUp(self):
self.api = phrase_api.api.variables_api.VariablesApi() # noqa: E501
def tearDown(self):
pass
def test_variable_create(self):
"""Test case for variable_create
Create a variable # noqa: E501
"""
pass
def test_variable_delete(self):
"""Test case for variable_delete
Delete a variable # noqa: E501
"""
pass
def test_variable_show(self):
"""Test case for variable_show
Get a single variable # noqa: E501
"""
pass
def test_variable_update(self):
"""Test case for variable_update
Update a variable # noqa: E501
"""
pass
def test_variables_list(self):
"""Test case for variables_list
List variables # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
673e7667066dc50650cfcf844997ca18b98537de | 4ba18540bfd8c523fe39bbe7d6c8fa29d4ec0947 | /atlas/testing/auth_acceptance/config.py | 8cd7cc13ffa195f811cd91010896c159bec16db3 | [
"BSD-3-Clause",
"MIT",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause",
"MPL-2.0"
] | permissive | yottabytt/atlas | c9d8ef45a0921c9f46d3ed94d42342f11488a85e | b040e574fbc64c833039b003f8a90345dd98e0eb | refs/heads/master | 2022-10-14T11:12:12.311137 | 2020-06-13T13:19:35 | 2020-06-13T13:19:35 | 272,008,756 | 0 | 0 | Apache-2.0 | 2020-06-13T12:55:29 | 2020-06-13T12:55:28 | null | UTF-8 | Python | false | false | 1,381 | py |
# separates test runs
from uuid import uuid4
TEST_UUID = uuid4()
def set_foundations_home():
import os
os.environ["FOUNDATIONS_HOME"] = os.getcwd() + "/auth_acceptance/foundations_home"
os.environ["FOUNDATIONS_COMMAND_LINE"] = "True"
def _flattened_config_walk():
import os
import os.path as path
for dir_name, _, files in os.walk("auth_acceptance/foundations_home"):
for file_name in files:
if file_name.endswith(".envsubst.yaml"):
yield path.join(dir_name, file_name)
def _load_execution_config():
from foundations_core_cli.typed_config_listing import TypedConfigListing
from foundations_internal.config.execution import translate
TypedConfigListing("execution").update_config_manager_with_config(
"default", translate
)
def _config():
import os
import subprocess
for env_var in ["FOUNDATIONS_HOME"]:
if not os.environ.get(env_var, None):
print(f"{env_var} was not set")
exit(1)
for template_file_name in _flattened_config_walk():
output_file_name = template_file_name[: -len(".envsubst.yaml")] + ".yaml"
subprocess.run(
f"envsubst < {template_file_name} > {output_file_name}", shell=True
)
# _load_execution_config()
def setup_auth_home_config():
set_foundations_home()
_config()
| [
"[email protected]"
] | |
aa94f2a5beb0b786f90536824232dccead006413 | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/zhzd_3_20190605095859.py | fff25287104a8fd3c22d7bf52709955151761a05 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
import pandas as pd
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHRzhzd')#txt目录提取
dis = open(r'C:\Users\Administrator\Desktop\ICD-10.txt',errors='ignore')
ds=dis.readlines()
ds_cs = []
for line in ds:
line = re.sub('\n','',line)
ds_cs.append(line)
ryzd=[]
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrpath = os.path.basename(emrtxt)
emrpath = os.path.splitext(emrpath)[0]#提取目录
pattern =r'\s*\d+、+\s?(.*)'
c=re.compile(pattern)
output=[]
for line in f.readlines():
line1=line.strip('\n')
line2 = ''.join(line1)
line2 = line2.strip( )
line3=c.findall(line2)
line3=''.join(line3)
line4 = str(line3)
out = line4
out= re.sub(r'右侧|两侧|双侧|左侧|右|左|双','',out)
out = re.sub(r'肺肺','肺',out)
out = re.sub('(.*?)', '', out)
for ds in ds_cs:
if EMRdef.SBS(out,ds) > 0.8:
output.append(out)
output=EMRdef.delre(output)
output1='\n'.join(output)
EMRdef.text_create(r'D:\DeepLearning ER\EHRzhzd2','.txt',emrpath,output1)
| [
"[email protected]"
] | |
36e76dcb3732230740ae113deefc19b4e2a6d793 | fac4c2fa64e6a22d0a80eec7b65c93d7a6236b7f | /original-modules/text-to-text-transfer-transformer-master/t5/data/utils.py | 99bc9b12ed3388a6db2db6e2ca7b57fef107517b | [
"Apache-2.0"
] | permissive | zouning68/nlp-transfer-learning | ec2b9e91f4b3bb9d77bf88dd78282f6ff5aaa4fd | e9b1544b55905ceb2235471f036abc1d7c4160db | refs/heads/master | 2023-04-13T18:25:22.206475 | 2020-01-15T02:36:19 | 2020-01-15T02:36:19 | 228,514,893 | 2 | 0 | Apache-2.0 | 2023-03-24T23:36:35 | 2019-12-17T02:21:15 | Python | UTF-8 | Python | false | false | 38,608 | py | # Copyright 2019 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for data loading and processing.
Defines Tasks, TaskRegistry, Mixture, and MixtureRegistry
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import inspect
import json
import os
import re
from absl import logging
import gin
import numpy as np
from t5.data import sentencepiece_vocabulary
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
_DEFAULT_FEATURE_KEYS = ["inputs", "targets"]
_VALID_TASK_NAME_REGEX = re.compile(r"^[\w\d\._]+$")
_INFO_FILENAME = "info.{split}.json"
_STATS_FILENAME = "stats.{split}.json"
_TFRECORD_PREFIX = "{split}.tfrecord"
_MAX_EXAMPLES_TO_MEM_CACHE = 10000
_SHUFFLE_BUFFER_SIZE = 1000
_TFDS_DATA_DIR_OVERRIDE = None
_GLOBAL_CACHE_DIRECTORIES = []
DEFAULT_SPM_PATH = "gs://t5-data/vocabs/cc_all.32000/sentencepiece.model" # GCS
def set_tfds_data_dir_override(tfds_data_dir):
global _TFDS_DATA_DIR_OVERRIDE
_TFDS_DATA_DIR_OVERRIDE = tfds_data_dir
def set_global_cache_dirs(global_cache_dirs):
global _GLOBAL_CACHE_DIRECTORIES
_GLOBAL_CACHE_DIRECTORIES = global_cache_dirs
def add_global_cache_dirs(global_cache_dirs):
global _GLOBAL_CACHE_DIRECTORIES
_GLOBAL_CACHE_DIRECTORIES += global_cache_dirs
class DatasetProviderBase(object):
"""Abstract base for classes that provide a tf.data.Dataset."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def sentencepiece_model_path(self):
raise NotImplementedError
@abc.abstractproperty
def output_features(self):
raise NotImplementedError
@abc.abstractmethod
def get_vocabulary(self):
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self, sequence_length, split, use_cached=False, shuffle=True):
raise NotImplementedError
@abc.abstractmethod
def num_input_examples(self, split):
raise NotImplementedError
class DatasetProviderRegistry(object):
"""Base for registry of data providers.
Child classes must implement a _REGISTRY dict.
"""
_PROVIDER_TYPE = DatasetProviderBase
@classmethod
def add(cls, name, provider_cls, *provider_args, **provider_kwargs):
"""Adds provider to the registry."""
if name in cls._REGISTRY:
raise ValueError("Attempting to register duplicate provider: %s" % name)
provider = provider_cls(*provider_args, **provider_kwargs)
if not isinstance(provider, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, provider_cls))
cls._REGISTRY[name] = provider
@classmethod
def remove(cls, name):
"""Remove provider from the registry, if it exists."""
if name in cls._REGISTRY:
del cls._REGISTRY[name]
@classmethod
def get(cls, name):
"""Returns provider from the registry."""
if name not in cls._REGISTRY:
raise ValueError("Provider name not registered: %s" % name)
return cls._REGISTRY[name]
@classmethod
def names(cls):
"""Returns all provider names in registry."""
return cls._REGISTRY.keys()
@classmethod
def get_dataset(
cls, name, sequence_length, split, use_cached=False, shuffle=True):
return cls.get(name).get_dataset(
sequence_length=sequence_length, split=split, use_cached=use_cached,
shuffle=shuffle)
class LazyTfdsLoader(object):
"""Wrapper for TFDS datasets with memoization and additional functionality.
Lazily loads info from TFDS and provides memoization to avoid expensive hidden
file operations. Also provides additional utility methods.
"""
_MEMOIZED_INSTANCES = {}
def __new__(cls, name, data_dir=None):
"""Either creates a new dataset or returns it if it already exists."""
key = (name, data_dir)
if key not in cls._MEMOIZED_INSTANCES:
cls._MEMOIZED_INSTANCES[key] = object.__new__(cls)
return cls._MEMOIZED_INSTANCES[key]
def __init__(self, name, data_dir=None):
"""LazyTfdsLoader constructor.
Args:
name: str, the name of the TFDS dataset.
data_dir: str (optional), directory to read/write TFDS data.
"""
self._name = name
self._data_dir = data_dir
self._builder = None
def __getstate__(self):
"""Remove un-pickle-able attributes and return the state."""
state = self.__dict__.copy()
del state["_builder"]
return state
def __getnewargs__(self):
return self._name, self._data_dir
@property
def name(self):
return self._name
@property
def data_dir(self):
if _TFDS_DATA_DIR_OVERRIDE:
if self._data_dir:
logging.warning(
"Overriding TFDS data directory '%s' with '%s' for dataset '%s'.",
self._data_dir, _TFDS_DATA_DIR_OVERRIDE, self.name)
return _TFDS_DATA_DIR_OVERRIDE
return self._data_dir
@property
def builder(self):
if not self._builder:
self._builder = tfds.builder(self.name, data_dir=self.data_dir)
return self._builder
@property
def info(self):
return self.builder.info
def files(self, split):
"""Returns set containing paths to TFDS TFRecord files for the dataset."""
self.verify_split(split)
files = set()
def _get_builder_files(builder):
split_info = builder.info.splits[split]
if builder.version.implements(tfds.core.Experiment.S3):
num_shards = len(split_info.shard_lengths)
else:
num_shards = split_info.num_shards
return tfds.core.naming.filepaths_for_dataset_split(
dataset_name=builder.name,
split=split_info.name,
num_shards=num_shards,
data_dir=builder._data_dir, # pylint:disable=protected-access
filetype_suffix="tfrecord",
)
if self.builder.BUILDER_CONFIGS and "/" not in self.name:
# If builder has multiple configs, and no particular config was
# requested, then compute all.
for config in self.builder.BUILDER_CONFIGS:
builder_for_config = tfds.builder(self.builder.name, config=config)
files.update(_get_builder_files(builder_for_config))
else:
files.update(_get_builder_files(self.builder))
if not files:
logging.fatal("No TFRecord files found for dataset: %s", self.name)
return files
def load(self, split, shuffle_files):
"""Returns a tf.data.Dataset for the given split."""
self.verify_split(split)
return tfds.load(
self._name,
split=split,
data_dir=self.data_dir,
shuffle_files=shuffle_files,
download=True,
try_gcs=True)
def load_shard(self, shard_path):
"""Returns a dataset for a single shard of the TFDS TFRecord files."""
ds = tfds.core.file_format_adapter.TFRecordExampleAdapter(
self.info.features.get_serialized_info()).dataset_from_filename(
shard_path)
ds = ds.map(self.info.features.decode_example)
return ds
def verify_split(self, split):
"""Verify that `split` is a valid split."""
if split not in self.info.splits.keys():
raise ValueError("{} has no '{}' split".format(self.name, split))
def size(self, split):
"""Returns the number of examples in the split."""
self.verify_split(split)
ds_splits = self.info.splits
dataset_size = ds_splits[split].num_examples
# Very large datasets have num_examples = 0; default instead to np.inf
dataset_size = dataset_size if dataset_size > 0 else np.inf
return dataset_size
def encode_string_features(
dataset, vocabulary, keys, copy_plaintext=False):
"""Encode specified string features.
Passes through non-string features unchanged. Optionally passes through copy
of original string features with "_plaintext" suffix added to the key.
Args:
dataset: a tf.data.Dataset
vocabulary: a vocabulary.Vocabulary
keys: list of strings, keys of features to encode.
copy_plaintext: bool, whether to pass through copies of plaintext strings
with a "_plaintext" suffix added to the key.
Returns:
a tf.data.Dataset
"""
keys = set(keys)
def my_fn(features):
"""Encode all specified feature that are strings and return a dictionary.
Args:
features: a dictionary
Returns:
a dictionary
"""
ret = {}
for k, v in features.items():
if v.dtype == tf.string and k in keys:
if copy_plaintext:
ret["%s_plaintext" % k] = v
v = tf.cast(vocabulary.encode_tf(v), tf.int64)
ret[k] = v
return ret
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def dict_to_tfexample(ex):
"""Convert example dictionary to tf.train.Example proto."""
feature_dict = {}
for k, v in ex.items():
t = tf.constant(v)
if len(t.shape) == 0: # pylint:disable=g-explicit-length-test
v = [v]
elif len(t.shape) == 1:
v = list(v)
else:
raise ValueError(
"Unsupported shape (%s) for '%s' value: %s" %
(tf.shape, k, v))
if t.dtype == tf.string and len(t.shape) <= 1:
feature_dict[k] = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.compat.as_bytes(t) for t in v]))
elif t.dtype in (tf.int32, tf.int64) and len(t.shape) <= 1:
feature_dict[k] = tf.train.Feature(
int64_list=tf.train.Int64List(value=v))
else:
raise ValueError(
"Unsupported type (%s) and shape (%s) for '%s' value: %s" %
(tf.dtype, tf.shape, k, v))
return tf.train.Example(features=tf.train.Features(feature=feature_dict))
def inverse_dataset(dataset, label):
"""Invert examples and prepend the given label to the new inputs.
Args:
dataset: tf.data.Dataset, contains "inputs" and "targets" keys
label: str, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
def map_fn(x):
return {
"inputs": tf.strings.join([label, x["targets"]]),
"targets": x["inputs"],
}
return dataset.map(
map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# ================================ Tasks =======================================
def get_info_path(data_dir, split):
return os.path.join(data_dir, _INFO_FILENAME.format(split=split))
def get_tfrecord_prefix(data_dir, split):
return os.path.join(data_dir, _TFRECORD_PREFIX.format(split=split))
def get_stats_path(data_dir, split):
return os.path.join(data_dir, _STATS_FILENAME.format(split=split))
class Task(DatasetProviderBase):
"""A wrapper for a `tf.data.Dataset` along with preprocessing information.
Tasks handle preprocessing (via arbitrary TF function) and tokenization
(via SentencePiece). Non-train splits also pass through the original
plaintext strings with a "_plaintext" suffix added to the key.
"""
def __init__(self,
name,
dataset_fn,
splits,
text_preprocessor,
sentencepiece_model_path,
metric_fns,
postprocess_fn=None,
token_preprocessor=None,
output_features=None,
num_input_examples=None):
"""Task constructor.
Args:
name: string, a unique name for the Task. A ValueError will be raised if
another task with this name is already registered.
dataset_fn: callable, a function with the signature
`dataset_fn(split, shuffle_files)' that returns a `tf.data.Dataset`.
splits: list(string), a list of allowable splits to request from the
`dataset_fn`.
text_preprocessor: a function (or list of functions) that (each) takes in
a tf.data.Dataset of string features and returns a tf.data.Dataset of
string features. Can be set to None as a no-op. If a list is given,
they will be executed sequentially.
sentencepiece_model_path: string, path to a SentencePiece model file to
use for tokenization.
metric_fns: list(callable), list of metric functions with the signature
`metric_fn(targets, predictions)` to use during evaluation.
postprocess_fn: function, a function that takes in decoded model outputs
(strings) and returns a string which is ready for evaluation using the
metric functions in `metric_fns`. Can be set to None as a no-op.
token_preprocessor: an optional function (or list of functions) that
(each) takes in a tf.data.Dataset of token features and returns a
tf.data.Dataset of token features.
Can be set to None as a no-op. If a list is given, they will be
executed sequentially.
The functions are also passed `sequence_length` and `vocabulary`
keyword arguments.
output_features: list(string), a list of the primary output features of
the dataset that will be prepared for the model. Defaults to 'inputs'
and 'targets'.
num_input_examples: dict(string: int) or None, a dictionary mapping split
to its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
"""
if not _VALID_TASK_NAME_REGEX.match(name):
raise ValueError(
"Task name '%s' contains invalid characters. Must match regex: %s" % (
name, _VALID_TASK_NAME_REGEX.pattern))
_validate_args(dataset_fn, ["split", "shuffle_files"])
for metric_fn in metric_fns:
_validate_args(metric_fn, ["targets", "predictions"])
self._name = name
self._dataset_fn = dataset_fn
self._text_preprocessor = (
[] if text_preprocessor is None else text_preprocessor)
self._token_preprocessor = (
[] if token_preprocessor is None else token_preprocessor)
self._sentencepiece_model_path = sentencepiece_model_path
self._metric_fns = metric_fns
# Use a pass-through if postprocess_fn is not provided
self._postprocess_fn = postprocess_fn or (lambda x, **unused_kwargs: x)
self._cache_dir = None
self._stats = {}
self._output_features = sorted(
set(output_features or _DEFAULT_FEATURE_KEYS))
self._splits = splits
self._num_input_examples = num_input_examples
@property
def name(self):
return self._name
@property
def postprocess_fn(self):
return self._postprocess_fn
@property
def metric_fns(self):
return self._metric_fns
@property
def sentencepiece_model_path(self):
return self._sentencepiece_model_path
@property
def output_features(self):
return self._output_features
@property
def token_preprocessor(self):
return self._token_preprocessor
@property
def splits(self):
return self._splits
def num_input_examples(self, split):
if self._num_input_examples is None:
return None
return self._num_input_examples[split]
def _preprocess_dataset(self, dataset, preprocessors, **preprocess_kwargs):
if not hasattr(preprocessors, "__iter__"):
preprocessors = [preprocessors]
for prep_fn in preprocessors:
dataset = prep_fn(dataset, **preprocess_kwargs)
return dataset
def _validate_dataset(
self,
dataset,
expected_output_type,
expected_output_rank,
error_label,
ensure_no_eos=False):
"""Validates properties of a tf.data.Dataset, raising Exceptions if needed.
Args:
dataset: a tf.data.Dataset to validate.
expected_output_type: a tf.dtype, the expected type of the model features.
expected_output_rank: an int, the expected rank of the model features.
error_label: a string, an identifier for the previous processing step to
report in raised ValueErrors.
ensure_no_eos: a bool, whether or not to verify that the model features
contain no EOS tokens.
Returns:
a validated tf.data.Dataset.
"""
types = tf.data.get_output_types(dataset)
shapes = tf.data.get_output_shapes(dataset)
for feat in self.output_features:
if feat not in types:
raise ValueError(
"Task dataset is missing expected output feature after {label}: "
"{feat}".format(label=error_label, feat=feat))
if expected_output_type != types[feat]:
raise ValueError(
"Task dataset has incorrect type for feature '{feat}' after "
"{label}: Got {actual}, expected {expected}".format(
feat=feat, label=error_label, actual=types[feat].name,
expected=expected_output_type.name))
if expected_output_rank != len(shapes[feat]):
raise ValueError(
"Task dataset has incorrect rank for feature '{feat}' after "
"{label}: Got {actual}, expected {expected}".format(
feat=feat, label=error_label, actual=len(shapes[feat]),
expected=expected_output_rank))
def _ensure_no_eos(feat, v):
if feat not in self.output_features:
return v
with tf.control_dependencies([
tf.assert_none_equal(
v, tf.constant(1, tf.int64),
message="Feature '{feat}' unexpectedly contains EOS=1 token "
"after {label}.".format(feat=feat, label=error_label))
]):
return v
if ensure_no_eos:
dataset = dataset.map(
lambda ex: {k: _ensure_no_eos(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def preprocess_text(self, dataset):
"""Preprocessed text dataset."""
dataset = self._preprocess_dataset(dataset, self._text_preprocessor)
dataset = self._validate_dataset(
dataset, expected_output_type=tf.string, expected_output_rank=0,
error_label="text preprocessing")
return dataset
def preprocess_tokens(self, dataset, sequence_length):
"""Preprocesses tokenized dataset.
Args:
dataset: a tf.data.Dataset
sequence_length: dict mapping feature key to int length for that feature
Returns:
a tf.data.Dataset
"""
dataset = self._preprocess_dataset(
dataset, self._token_preprocessor,
sequence_length=sequence_length,
vocabulary=self.get_vocabulary())
dataset = self._validate_dataset(
dataset,
expected_output_type=tf.int64,
expected_output_rank=1,
error_label="token preprocessing",
ensure_no_eos=True)
# Trim and append EOS=1 token to model features.
def _trim_and_append_eos(feat, v):
if feat not in self.output_features:
return v
return tf.concat([v[:sequence_length[feat]-1], [1]], axis=0)
return dataset.map(
lambda ex: {k: _trim_and_append_eos(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def initialize(self):
"""Attempts to load cached dataset and stats."""
if self._cache_dir:
return
# See if cached data exists in any of the cache directories.
potential_cache_dirs = [
os.path.join(d, self.name) for d in _GLOBAL_CACHE_DIRECTORIES]
for cache_dir in potential_cache_dirs:
if tf.io.gfile.exists(os.path.join(cache_dir, "COMPLETED")):
self._cache_dir = cache_dir
logging.info("'%s' is cached at %s.", self.name, self.cache_dir)
return
logging.info(
"'%s' does not exist in any task cache directories (searched %s).",
self.name,
potential_cache_dirs,
)
@property
def cached(self):
"""Returns whether or not cached dataset exists, initializing if needed."""
self.initialize()
return self._cache_dir is not None
@property
def cache_dir(self):
"""Returns the cache directory, initializing if needed."""
self.assert_cached()
return self._cache_dir
def assert_cached(self):
"""Raises an assertion error if cached dataset does not exist."""
assert self.cached, (
"'%s' does not exist in any of the task cache directories" % self.name)
def get_cached_stats(self, split=tfds.Split.TRAIN):
"""Returns basic statistics for cached dataset."""
self.assert_cached()
if split not in self._stats:
stats_path = get_stats_path(self.cache_dir, split)
if not tf.io.gfile.exists(stats_path):
raise ValueError(
"Stats do not exist for '%s' split: %s" % (self.name, split))
with tf.io.gfile.GFile(stats_path) as f:
self._stats[split] = json.load(f)
return self._stats[split]
def get_vocabulary(self):
"""Returns a SentencePieceVocabulary object using the Task's model."""
return sentencepiece_vocabulary.SentencePieceVocabulary(
self.sentencepiece_model_path)
def get_dataset(
self,
sequence_length,
split=tfds.Split.TRAIN,
use_cached=False,
shuffle=True,
shuffle_buffer_size=_SHUFFLE_BUFFER_SIZE,
):
"""Returns a tf.data.Dataset from cache or generated on the fly.
Args:
sequence_length: dict mapping feature key to int length for that feature
split: string, the split to return.
use_cached: bool, whether to use the cached dataset instead of processing
it on the fly. Defaults to True.
shuffle: bool, whether to shuffle the dataset. Only used when generating
on the fly (use_cached=False).
shuffle_buffer_size: an integer
Returns:
A mixed tf.data.Dataset.
"""
if use_cached:
ds = self._get_cached_dataset(split, shuffle)
else:
ds = self._dataset_fn(split=split, shuffle_files=shuffle)
ds = self.preprocess_text(ds)
# Tokenize
ds = encode_string_features(
ds, self.get_vocabulary(), keys=self.output_features,
copy_plaintext=True)
if (not use_cached and self.num_input_examples(split) and
self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE):
ds = ds.cache()
# Post tokenization processing.
ds = self.preprocess_tokens(ds, sequence_length)
if shuffle:
# Shuffle before mixing since preprocessor can output multiple
# (correlated) examples per input.
ds = ds.shuffle(shuffle_buffer_size)
return ds
def _get_cached_dataset(self, split=tfds.Split.TRAIN, shuffle=True):
"""Returns a tf.data.Dataset read from cached files."""
self.assert_cached()
with tf.io.gfile.GFile(get_info_path(self.cache_dir, split)) as f:
split_info = json.load(f)
# Use `FixedLenSequenceFeature` for sequences with variable length.
def _feature_config(shape, dtype):
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(
shape[1:], dtype, allow_missing=True)
return tf.io.FixedLenFeature(shape, dtype)
feature_desc = {
feat: _feature_config(**desc)
for feat, desc in split_info["features"].items()}
ds = tf.data.Dataset.list_files(
"%s-*-of-*%d" % (
get_tfrecord_prefix(self.cache_dir, split),
split_info["num_shards"]),
shuffle=shuffle)
ds = ds.interleave(
tf.data.TFRecordDataset,
cycle_length=16, block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.map(lambda ex: tf.parse_single_example(ex, feature_desc),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self.get_cached_stats(split)["examples"] <= _MAX_EXAMPLES_TO_MEM_CACHE:
ds = ds.cache()
return ds
class TfdsTask(Task):
"""A `Task` that uses TensorFlow Datasets to provide the input dataset."""
def __init__(
self,
name,
tfds_name,
text_preprocessor,
sentencepiece_model_path,
metric_fns,
tfds_data_dir=None,
splits=None,
**task_kwargs):
"""TfdsTask constructor.
Args:
name: string, a unique name for the Task. A ValueError will be raised if
another task with this name is already registered.
tfds_name: string, the name and version number of a TFDS dataset,
optionally with a config.
text_preprocessor: a function (or list of functions) that (each) takes in
a tf.data.Dataset of string features and returns a tf.data.Dataset of
string features. Can be set to None as a no-op. If a list is given,
they will be executed sequentially.
sentencepiece_model_path: string, path to a SentencePiece model file to
use for tokenization.
metric_fns: list(callable), list of metric functions with the signature
metric_fn(targets, predictions) to use during evaluation.
tfds_data_dir: string, an optional path to a specific TFDS data directory
to use.
splits: list(string) or None, a list of allowable splits to load. The
default, None, uses all available splits from the TFDS dataset info.
**task_kwargs: dict, additional keyword arguments for the parent `Task`
class.
"""
if ":" not in tfds_name:
raise ValueError(
"TFDS name must contain a version number, got: %s" % tfds_name)
self._tfds_dataset = LazyTfdsLoader(tfds_name, tfds_data_dir)
def dataset_fn(split, shuffle_files):
return self._tfds_dataset.load(split, shuffle_files)
super(TfdsTask, self).__init__(
name,
dataset_fn=dataset_fn,
splits=splits,
text_preprocessor=text_preprocessor,
sentencepiece_model_path=sentencepiece_model_path,
metric_fns=metric_fns,
**task_kwargs)
@property
def splits(self):
"""Override since we can't call `info.splits` until after init."""
return self._splits or self._tfds_dataset.info.splits
@property
def tfds_dataset(self):
return self._tfds_dataset
def num_input_examples(self, split):
return self.tfds_dataset.size(split)
class TextLineTask(Task):
"""A `Task` that reads text lines as input.
Requires a text_processor to be passed that takes a tf.data.Dataset of
strings and returns a tf.data.Dataset of feature dictionaries.
e.g. preprocessors.preprocess_tsv()
"""
def __init__(
self,
name,
split_to_filepattern,
text_preprocessor,
sentencepiece_model_path,
metric_fns,
skip_header_lines=0,
**task_kwargs):
"""TextLineTask constructor.
Args:
name: string, a unique name for the Task. A ValueError will be raised if
another task with this name is already registered.
split_to_filepattern: dict of string (split name) to string (filename or
filepattern).
text_preprocessor: a function (or list of functions) that (each) takes in
a tf.data.Dataset of string features and returns a tf.data.Dataset of
string features. Can be set to None as a no-op. If a list is given,
they will be executed sequentially.
sentencepiece_model_path: string, path to a SentencePiece model file to
use for tokenization.
metric_fns: list(callable), list of metric functions with the signature
metric_fn(targets, predictions) to use during evaluation.
skip_header_lines: int, number of header lines to skip in each source
file.
**task_kwargs: dict, additional keyword arguments for the parent `Task`
class.
"""
def dataset_fn(split, shuffle_files):
filepattern = split_to_filepattern[split]
def _read_file(fname):
return tf.data.TextLineDataset(fname).skip(skip_header_lines)
files = tf.data.Dataset.list_files(filepattern, shuffle=shuffle_files)
return files.interleave(
_read_file,
cycle_length=16, block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
super(TextLineTask, self).__init__(
name,
dataset_fn=dataset_fn,
splits=split_to_filepattern.keys(),
text_preprocessor=text_preprocessor,
sentencepiece_model_path=sentencepiece_model_path,
metric_fns=metric_fns,
**task_kwargs)
class TaskRegistry(DatasetProviderRegistry):
_REGISTRY = {}
_PROVIDER_TYPE = Task
@classmethod
def add(cls, name, task_cls=Task, **kwargs):
super(TaskRegistry, cls).add(name, task_cls, name, **kwargs)
# ================================ Mixtures ====================================
class Mixture(DatasetProviderBase):
"""Class for mixing multiple tasks."""
def __init__(self, tasks, default_rate=None):
"""Mixture constructor.
A mixture specifies a set of tasks with associated mixing rates.
Mixing happens on preprocessed tokenized examples.
The mixing rates represent relative numbers of examples to use from their
associated tasks. Setting the mixing rates to be equal to the numbers of
examples in the tasks will result in each task going through an epoch in
about the same amount of time - i.e. all examples are sampled equally across
all tasks.
Rates can be expressed either as absolute numbers or as functions that
receive the Task as an argument.
Args:
tasks: a list where each element is either a string (task name) or a
pair whose first element is the task name and whose second element
is either a float (rate) or a function from Task to float.
default_rate: a float or a function from Task to float. This specifies the
default rate if rates are not provided in the `tasks` argument.
"""
self._task_to_rate = {}
self._tasks = []
for t in tasks:
if isinstance(t, str):
task_name = t
rate = default_rate
if default_rate is None:
raise ValueError("need a rate for each task")
else:
task_name, rate = t
self._tasks.append(TaskRegistry.get(task_name))
self._task_to_rate[task_name] = rate
if len(set(tuple(t.output_features) for t in self._tasks)) != 1:
raise ValueError(
"All Tasks in a Mixture must have the same output features."
)
if len(set(t.sentencepiece_model_path for t in self._tasks)) != 1:
raise ValueError(
"All Tasks in a Mixture must have the same sentencepiece_model_path."
)
@property
def tasks(self):
return self._tasks
def get_rate(self, task):
rate = self._task_to_rate[task.name]
return float(rate(task) if callable(rate) else rate)
def num_input_examples(self, split):
return sum(t.num_input_examples(split) for t in self.tasks)
@property
def output_features(self):
# We require all tasks to have the same output_features in __init__
# so we can just get the output_features for the 0th task
return self._tasks[0].output_features
@property
def sentencepiece_model_path(self):
# We require all tasks to have the same sentencepiece_model_path in __init__
# so we can just get the sentencepiece_model_path for the first task
return self._tasks[0].sentencepiece_model_path
def get_vocabulary(self):
"""Returns a SentencePieceVocabulary object using the Tasks' model."""
return self._tasks[0].get_vocabulary()
def get_dataset(
self,
sequence_length,
split=tfds.Split.TRAIN,
use_cached=False,
shuffle=True,
compute_stats_empirically=False,
):
"""Returns the dataset of mixed tasks using the object-specified rates.
Args:
sequence_length: dict mapping feature key to int length for that feature
split: string, the split to return for all tasks.
use_cached: bool, whether to use the cached dataset instead of processing
it on the fly. Defaults to True.
shuffle: bool, whether to shuffle the dataset. Only used when generating
on the fly (use_cached=False).
compute_stats_empirically: a boolean - does not work on TPU
"""
tasks = []
for task in self.tasks:
if split not in task.splits:
logging.info(
"Task %s has no '%s' split, skipping.", task.name, split
)
continue
tasks.append(task)
if not tasks:
raise ValueError("No datasets have a '{}' split".format(split))
def filter_features(ex):
return {k: v for k, v in ex.items() if k in self.output_features}
datasets = [
task.get_dataset(sequence_length, split, use_cached, shuffle=shuffle) # pylint:disable=g-complex-comprehension
.repeat()
.map(filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
for task in tasks]
rates = [self.get_rate(task) for task in tasks]
# Sample from the dataset with the rates rates
dataset = tf.data.experimental.sample_from_datasets(datasets, rates)
if split == "train" and use_cached:
_log_mixing_proportions(tasks, datasets, rates, dataset, sequence_length,
compute_stats_empirically)
return dataset
# Functions to be used as mixing rates:
@gin.configurable
def rate_num_examples(task, maximum=None, temperature=1.0, scale=1.0):
"""Mixing rate equal to the number of examples for the task."""
# TODO(adarob): Support case when there are no cached stats.
ret = task.get_cached_stats("train")["examples"]
ret *= scale
if maximum:
ret = min(ret, maximum)
if temperature != 1.0:
ret = ret ** (1.0 / temperature)
return ret
@gin.configurable
def rate_unsupervised(task, value=1e6):
"""Gin-configurable mixing rate for the unsupervised co-training task."""
del task
return value
def _log_padding_fractions(dataset, sequence_length, num_examples=100):
"""Empirically compute the fraction of padding - log the results.
Args:
dataset: a tf.data.Dataset
sequence_length: dict from string to int (packed lengths)
num_examples: an integer
"""
logging.info("computing padding fractions")
keys = sequence_length.keys()
padding_frac = {k: 0 for k in keys}
for ex in tfds.as_numpy(dataset.take(num_examples)):
for k in keys:
padding_frac[k] += 1 - (sequence_length[k] / len(ex[k]))
for k in keys:
logging.info("%s padding fraction = %g", k, padding_frac[k])
def _log_mixing_proportions(
tasks, datasets, rates, mixed_dataset,
sequence_length, compute_stats_empirically):
"""Log information about the mixing proportions.
Called from Mixture.get_dataset.
Args:
tasks: a list of Task
datasets: a list of tf.data.Dataset
rates: a list of floats
mixed_dataset: a tf.data.Dataset
sequence_length: dict from string to int (packed lengths)
compute_stats_empirically: a boolean - does not work on TPU
"""
def _normalize(l):
denom = sum(l)
return [x / denom for x in l]
# compute some stats about the mixture
examples_fraction = _normalize(rates)
if compute_stats_empirically:
stats_examples = 100
mean_inputs_length = []
mean_targets_length = []
for dataset in datasets:
inputs_sum = 0
targets_sum = 0
for ex in tfds.as_numpy(dataset.take(stats_examples)):
inputs_sum += ex["inputs"].size
targets_sum += ex["targets"].size
mean_inputs_length.append(inputs_sum / float(stats_examples))
mean_targets_length.append(targets_sum / float(stats_examples))
else:
def _estimated_mean_length(task, key):
if task.token_preprocessor:
return sequence_length[key]
else:
return min(sequence_length[key],
(task.get_cached_stats("train")[key + "_tokens"] /
task.get_cached_stats("train")["examples"]))
mean_inputs_length = [_estimated_mean_length(task, "inputs")
for task in tasks]
mean_targets_length = [_estimated_mean_length(task, "targets")
for task in tasks]
inputs_fraction = _normalize(
[l * r for l, r in zip(mean_inputs_length, rates)])
targets_fraction = _normalize(
[l * r for l, r in zip(mean_targets_length, rates)])
logging.info("%12s %12s %12s %12s %12s %12s %s",
"rate", "ex.frac.", "inp.frac.", "tgt.frac.",
"inp.len.", "tgt.len", "task")
for i in range(len(rates)):
logging.info("%12g %12g %12g %12g %12g %12g %s",
rates[i], examples_fraction[i],
inputs_fraction[i], targets_fraction[i],
mean_inputs_length[i], mean_targets_length[i],
tasks[i].name)
if compute_stats_empirically:
_log_padding_fractions(mixed_dataset, sequence_length)
class MixtureRegistry(DatasetProviderRegistry):
_REGISTRY = {}
_PROVIDER_TYPE = Mixture
@classmethod
def add(cls, name, tasks, default_rate=None):
super(MixtureRegistry, cls).add(name, Mixture, tasks, default_rate)
def get_mixture_or_task(task_or_mixture_name):
"""Return the Task or Mixture from the appropriate registry."""
mixtures = MixtureRegistry.names()
tasks = TaskRegistry.names()
if task_or_mixture_name in mixtures:
if task_or_mixture_name in tasks:
logging.warning("%s is both a Task and a Mixture, returning Mixture",
task_or_mixture_name)
return MixtureRegistry.get(task_or_mixture_name)
if task_or_mixture_name in tasks:
return TaskRegistry.get(task_or_mixture_name)
else:
raise ValueError("No Task or Mixture found with name: %s" %
task_or_mixture_name)
def get_subtasks(task_or_mixture):
"""Returns all the Tasks in a Mixture as a list or the Task itself."""
if isinstance(task_or_mixture, Task):
return [task_or_mixture]
else:
return task_or_mixture.tasks
def _validate_args(fn, expected_pos_args):
"""Ensure function has exactly expected positional args."""
argspec = inspect.getargspec(fn)
expected_pos_args = tuple(expected_pos_args)
actual_args = tuple(argspec.args)
if actual_args[:len(expected_pos_args)] != expected_pos_args:
raise ValueError(
"'%s' must have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_args))
actual_pos_args = tuple(
argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
if actual_pos_args != expected_pos_args[:len(actual_pos_args)]:
raise ValueError(
"'%s' may only have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_pos_args))
| [
"[email protected]"
] | |
f25a62b621331ffbb01cb7d174dcc64601a12e56 | 032a1ad3c94e1126729417a16e2a95743d121244 | /cell_fitting/optimization/evaluation/plot_sine_stimulus/when_doublet_start.py | d9a7d6b7ffa9710e6429e0347cd53e945c59af5e | [] | no_license | cafischer/cell_fitting | 0fd928f5ae59488e12c77648c2e6227c1911d0e9 | 75a81987e1b455f43b5abdc8a9baf6b8f863bee2 | refs/heads/master | 2021-01-23T19:27:30.635173 | 2019-09-14T08:46:57 | 2019-09-14T08:46:57 | 44,301,986 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,321 | py | from __future__ import division
import matplotlib.pyplot as pl
from matplotlib.patches import Rectangle
from matplotlib.colors import Normalize
import numpy as np
import os
from nrn_wrapper import Cell
from cell_fitting.optimization.evaluation.plot_sine_stimulus import simulate_sine_stimulus
from cell_characteristics.analyze_APs import get_AP_onset_idxs
from cell_fitting.util import init_nan
from cell_characteristics import to_idx
pl.style.use('paper')
if __name__ == '__main__':
# parameters
save_dir = '/home/cf/Phd/programming/projects/cell_fitting/cell_fitting/results/best_models/1'
model_dir = os.path.join(save_dir, 'cell.json')
mechanism_dir = '../../../model/channels/vavoulis'
# load model
cell = Cell.from_modeldir(model_dir, mechanism_dir)
# parameters
AP_threshold = -10
amp1 = 0.6
sine1_dur = 1000
onset_dur = 500
offset_dur = 500
dt = 0.01
d_amp = 0.1
amp2s = np.arange(0.1, 1.0+d_amp, d_amp)
d_freq = 2
freq2s = np.arange(3, 15+d_freq, d_freq)
ISI_first = init_nan((len(amp2s), len(freq2s)))
save_dir_img = os.path.join(save_dir, 'img', 'sine_stimulus', 'when_doublet', 'start',
'amp1_'+str(amp1) + '_dur1_'+str(sine1_dur))
if not os.path.exists(save_dir_img):
os.makedirs(save_dir_img)
for i, amp2 in enumerate(amp2s):
for j, freq2 in enumerate(freq2s):
v, t, _ = simulate_sine_stimulus(cell, amp1, amp2, sine1_dur, freq2, onset_dur, offset_dur, dt)
onsets = get_AP_onset_idxs(v, AP_threshold) # use only period in the middle
if len(onsets) >= 2:
if (onsets[1] - onsets[0]) * dt < 1/2 * 1/freq2 * 1000:
ISI_first[i, j] = (onsets[1] - onsets[0]) * dt
print ISI_first[i, j]
pl.figure(figsize=(18, 8))
pl.plot(t, v, 'k', linewidth=1.0)
pl.xlabel('Time (ms)')
pl.ylabel('Membrane Potential (mV)')
pl.ylim(-95, 55)
pl.tight_layout()
pl.savefig(os.path.join(save_dir_img, 'v_'+str(amp2)+'_'+str(freq2)+'.png'))
#pl.show()
# plot
cmap = pl.get_cmap('viridis')
ISI_max = 15
norm = Normalize(vmin=0, vmax=ISI_max)
fig, ax = pl.subplots()
for i, amp2 in enumerate(amp2s):
for j, freq2 in enumerate(freq2s):
if not np.isnan(ISI_first[i, j]):
if ISI_first[i, j] > ISI_max:
w = d_amp / 2
h = d_freq / 6
ax.add_patch(Rectangle((amp2 - w / 2, freq2 - h / 2), w, h, color='r'))
else:
c = cmap(norm(ISI_first[i, j]))
w = d_amp/2
h = d_freq/6
ax.add_patch(Rectangle((amp2-w/2, freq2-h/2), w, h, color=c))
pl.xlim(amp2s[0]-d_amp/2, amp2s[-1]+d_amp/2)
pl.ylim(freq2s[0]-d_freq/2, freq2s[-1]+d_freq/2)
pl.xlabel('Amplitude (nA)')
pl.ylabel('Frequency (Hz)')
sm = pl.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(np.array([0, ISI_max]))
cb = pl.colorbar(sm)
cb.ax.get_yaxis().labelpad = 20
cb.ax.set_ylabel('$ ISI_{2nd-1st}$', rotation=270)
pl.tight_layout()
pl.savefig(os.path.join(save_dir_img, 'ISI.png'))
#pl.show() | [
"[email protected]"
] | |
df74c510b2fa1f4bec7ac08c8ae445e9eb2ce365 | f259ca399ab33b5c2e66ae07921711ea5917ac9e | /pytorch/sphere20a.py | d4ce73637194c4236b20b4eb2bb1a4d6717c6d89 | [] | no_license | jizhuoran/HyperTea_Maker | 9a7930e1d6af995c8fdb9a15354eea5fc29f0806 | 2c3f8dfcb699495093165cd986eebedfb17a2433 | refs/heads/master | 2020-04-22T19:32:39.385611 | 2019-04-14T15:12:06 | 2019-04-14T15:12:48 | 170,610,900 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,046 | py | import torch.nn as nn
import torch
class sphere20a(nn.Module):
def __init__(self,classnum=10574,feature=False):
super(sphere20a, self).__init__()
self.classnum = classnum
self.feature = feature
#input = B*3*112*96
self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*48
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64,64,3,1,1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64,64,3,1,1)
self.relu1_3 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*24
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128,128,3,1,1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128,128,3,1,1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128,128,3,1,1)
self.relu2_5 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*12
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256,256,3,1,1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256,256,3,1,1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256,256,3,1,1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256,256,3,1,1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256,256,3,1,1)
self.relu3_9 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256,512,3,2,1) #=>B*512*7*6
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512,512,3,1,1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512,512,3,1,1)
self.relu4_3 = nn.PReLU(512)
self.fc5 = nn.Linear(512*7*6,512)
self.fc6 = nn.Linear(512,self.classnum)
def forward(self, x):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
x = x.view(x.size(0),-1)
x = self.fc5(x)
x = self.fc6(x)
return x | [
"[email protected]"
] | |
42978fcaa46628548561391c85f29c13b5e7dd6d | 44600adf1731a449ff2dd5c84ce92c7f8b567fa4 | /colour_down/examples/plotting/examples_volume_plots.py | 769af73894ba737a07e58c6c32c7848950048d7f | [] | no_license | ajun73/Work_Code | b6a3581c5be4ccde93bd4632d8aaaa9ecc782b43 | 017d12361f7f9419d4b45b23ed81f9856278e849 | refs/heads/master | 2020-04-11T23:16:43.994397 | 2019-12-28T07:48:44 | 2019-12-28T07:48:44 | 162,161,852 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | # -*- coding: utf-8 -*-
"""
Showcases colour models volume and gamut plotting examples.
"""
import numpy as np
from colour.plotting import (RGB_colourspaces_gamuts_plot, RGB_scatter_plot,
colour_plotting_defaults)
from colour.utilities import message_box
message_box('Colour Models Volume and Gamut Plots')
colour_plotting_defaults()
message_box(('Plotting "ITU-R BT.709" RGB colourspace volume in "CIE xyY" '
'colourspace.'))
RGB_colourspaces_gamuts_plot(
('ITU-R BT.709', ), reference_colourspace='CIE xyY')
print('\n')
message_box(('Comparing "ITU-R BT.709" and "ACEScg" RGB colourspaces volume '
'in "CIE L*a*b*" colourspace.'))
RGB_colourspaces_gamuts_plot(
('ITU-R BT.709', 'ACEScg'),
reference_colourspace='CIE Lab',
style={
'face_colours': (None, (0.25, 0.25, 0.25)),
'edge_colours': (None, (0.25, 0.25, 0.25)),
'edge_alpha': (1.0, 0.1),
'face_alpha': (1.0, 0.0)
})
print('\n')
message_box(('Plotting "ACEScg" colourspaces values in "CIE L*a*b*" '
'colourspace.'))
RGB = np.random.random((32, 32, 3))
RGB_scatter_plot(
RGB,
'ACEScg',
reference_colourspace='CIE Lab',
colourspaces=('ACEScg', 'ITU-R BT.709'),
face_colours=((0.25, 0.25, 0.25), None),
edge_colours=((0.25, 0.25, 0.25), None),
edge_alpha=(0.1, 0.5),
face_alpha=(0.1, 0.5),
grid_face_colours=(0.1, 0.1, 0.1),
grid_edge_colours=(0.1, 0.1, 0.1),
grid_edge_alpha=0.5,
grid_face_alpha=0.1)
| [
"[email protected]"
] | |
ead60febeb04e387de8528926f63dddb77c1025d | d27b030ce654d523b266821080acb246d71a85af | /PDB/clrender.py | b5283cb280bcb2552552a4dac8d1945ddc356746 | [] | no_license | amiller/graphicsii | 9b6d638591a8df3267865a1be83cb1591586f662 | da6cc6347d2b1f344056b71358a4b5b8efabdb77 | refs/heads/master | 2016-09-03T06:23:42.297039 | 2011-05-02T02:39:15 | 2011-05-02T02:39:15 | 1,689,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,923 | py | import pyglet.gl
from OpenGL.GL import *
from OpenGL.GLU import *
from molecule import Molecule
import pyopencl as cl
import numpy as np
def print_info(obj, info_cls):
for info_name in sorted(dir(info_cls)):
if not info_name.startswith("_") and info_name != "to_string":
info = getattr(info_cls, info_name)
try:
info_value = obj.get_info(info)
except:
info_value = "<error>"
print "%s: %s" % (info_name, info_value)
platform = cl.get_platforms()[0]
device = platform.get_devices()[0]
context = cl.Context([device])
print_info(context.devices[0], cl.device_info)
queue = cl.CommandQueue(context,
properties = cl.command_queue_properties.PROFILING_ENABLE)
mf = cl.mem_flags
N = 512
class CLRender(object):
angles = [0,0,0]
scale = 1
mol = None
env_buf = None
def __init__(self):
self.dst = np.empty((N,N,4)).astype(np.uint8)
self.dst_buf = cl.Buffer(context, mf.WRITE_ONLY, self.dst.nbytes)
self.inv_matrix = cl.Buffer(context, mf.READ_ONLY, 16 * 4)
self.matrix = cl.Buffer(context, mf.READ_ONLY, 16 * 4)
with open('kernel.cl','r') as f:
self.program = cl.Program(context, f.read()).build("-cl-mad-enable")
print self.program.get_build_info(context.devices[0], cl.program_build_info.LOG)
self.dstTex = glGenTextures(1);
glBindTexture(GL_TEXTURE_2D, self.dstTex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, N, N, 0, GL_RGBA, GL_UNSIGNED_BYTE, None);
glBindTexture(GL_TEXTURE_2D, 0);
print_info(self.program, cl.program_info)
print_info(self.program.pdbTracer, cl.kernel_info)
grid = np.array(range(256),dtype=np.float32)/256
x1,x2 = np.meshgrid(grid, grid)
rad = np.sqrt(x1)
phi = 2*np.pi * x2
phimap = np.dstack((np.cos(phi)*rad, np.sin(phi)*rad, np.sqrt(1-rad*rad), 0*rad))
self.p = phimap
fmt = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
self.phimap = cl.Image(context, mf.READ_ONLY | mf.COPY_HOST_PTR, fmt,
shape=phimap.shape[:2], hostbuf=np.array(phimap, order='C'))
def applySceneTransforms(self):
gluLookAt(0, 0, 2*self.mol.radius, 0, 0, 0, 0, 1, 0); # Push molecule away from the origin along -Z direction.
glScalef(self.scale,self.scale,self.scale);
def mouse_rotate(xAngle, yAngle, zAngle):
glRotatef(xAngle, 1.0, 0.0, 0.0);
glRotatef(yAngle, 0.0, 1.0, 0.0);
glRotatef(zAngle, 0.0, 0.0, 1.0);
mouse_rotate(self.angles[0],self.angles[1],self.angles[2]);
glTranslatef(-self.mol.x, -self.mol.y, -self.mol.z); # Bring molecue center to origin
def render(self):
glBindTexture(GL_TEXTURE_2D, self.dstTex)
glEnable(GL_TEXTURE_2D)
glBegin(GL_QUADS)
glTexCoord2f( 0.0, 0.0 ); glVertex3f( -1.0, -1.0, -1.0 )
glTexCoord2f( 0.0, 1.0 ); glVertex3f( -1.0, 1.0, -1.0 )
glTexCoord2f( 1.0, 1.0 ); glVertex3f( 1.0, 1.0, -1.0 )
glTexCoord2f( 1.0, 0.0 ); glVertex3f( 1.0, -1.0, -1.0 )
glEnd()
glDisable(GL_TEXTURE_2D)
def compute(self):
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
self.applySceneTransforms()
mat = np.array(glGetFloat(GL_MODELVIEW_MATRIX).transpose(), order='C')
glPopMatrix()
inv = np.array(np.linalg.inv(mat), order='C')
e1 = cl.enqueue_write_buffer(queue, self.matrix, mat)
e2 = cl.enqueue_write_buffer(queue, self.inv_matrix, inv)
e3 = self.program.pdbTracer(queue, self.dst.shape[:2], self.dst_buf,
self.matrix, self.inv_matrix,
np.array(len(self.mol.spheres)), self.spheredata,
self.envmap, self.phimap, self.sampler)
e4 = cl.enqueue_read_buffer(queue, self.dst_buf, self.dst)
queue.finish()
e4.wait()
for e in [e3]:
print (e.profile.END - e.profile.START)*1e-9
glBindTexture(GL_TEXTURE_2D, self.dstTex)
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, N, N, GL_RGBA, GL_UNSIGNED_BYTE, self.dst)
def set_envmap(self, envmap):
fmt = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
em = np.zeros(envmap.shape[:2] + (4,), dtype=np.float32)
em[:,:,:3] = envmap; em[:,:,3] = 1;
self.envmap = cl.Image(context, mf.READ_ONLY | mf.COPY_HOST_PTR, fmt,
shape=em.shape[:2], hostbuf=em)
self.sampler = cl.Sampler(context, True, cl.addressing_mode.CLAMP, cl.filter_mode.LINEAR)
def set_molecule(self, mol):
self.mol = mol
self.spheredata = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf = self.mol.spheredata)
def load_molecule(self, filename):
self.set_molecule(Molecule(filename))
if __name__ == "__main__":
from pfmloader import load_pfm
r = CLRender()
r.set_molecule(Molecule('data/sugars/sucrose.pdb'))
r.set_envmap(load_pfm('data/probes/stpeters_probe.pfm'))
r.compute()
| [
"[email protected]"
] | |
21e1b0da1f6e231a3370a401206faebd2f2aff3e | c351c54ff292d4ce8628cf033f8f3026829d79f3 | /blog_api/apis/authorization_layer/python/bin/pyrsa-keygen | 126ebac139a75ad6bfe8c1f5d7e0f83016d8882e | [] | no_license | MathiasDarr/Portfolio | 424ba0d3bd3b36bb9be09a31ea0b9bca2d3cc568 | 0eb6377d9aedba75ac30a0a5583f47dc31d31810 | refs/heads/master | 2023-02-06T04:33:44.123544 | 2020-12-31T08:35:45 | 2020-12-31T08:35:45 | 261,949,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | #!/home/mddarr/data/anaconda3/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import keygen
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(keygen())
| [
"[email protected]"
] | ||
d0c9931e691e704b6c556340c506cac64843ae85 | 08966e05b74e20774ed9cdd4501e843fab0a3a86 | /capacitacion/views.py | 9cc93845a726d801555587f8fe78a07b320dc903 | [] | no_license | luisfarfan/capacitacion_v2 | 6a650ea3119ad7da65f26f146c7e5d9d5139e76d | 802ef6b4c1101153a1c77e1bdf41bfe1966f4bff | refs/heads/master | 2020-06-30T20:14:52.686006 | 2016-12-10T00:05:01 | 2016-12-10T00:05:01 | 74,354,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,455 | py | from rest_framework.views import APIView
from django.db.models import Count, Value
from django.http import JsonResponse
from django.http import HttpResponse
from django.template import loader
from serializer import *
from rest_framework import generics
from django.views.decorators.csrf import csrf_exempt
from django.db.models import F
from datetime import datetime
import pandas as pd
from django.db.models.functions import Concat
from django.core.exceptions import ObjectDoesNotExist
import json
def modulo_registro(request):
template = loader.get_template('capacitacion/modulo_registro.html')
context = {
'titulo_padre': 'Capacitacion',
'titulo_hijo': 'REGISTRO DE LOCAL'
}
return HttpResponse(template.render(context, request))
def cursos_evaluaciones(request):
template = loader.get_template('capacitacion/cursos_evaluaciones.html')
context = {
'titulo_padre': 'Capacitacion',
'titulo_hijo': 'Cursos y Evaluaciones'
}
return HttpResponse(template.render(context, request))
def asistencia(request):
template = loader.get_template('capacitacion/asistencia.html')
context = {
'titulo_padre': 'Capacitacion',
'titulo_hijo': 'Modulo de Asistencia'
}
return HttpResponse(template.render(context, request))
def distribucion(request):
template = loader.get_template('capacitacion/distribucion.html')
context = {
'titulo_padre': 'Capacitacion',
'titulo_hijo': 'Modulo de Distribucion'
}
return HttpResponse(template.render(context, request))
# Create your views here.
class DepartamentosList(APIView):
def get(self, request):
departamentos = list(
Ubigeo.objects.values('ccdd', 'departamento').annotate(dcount=Count('ccdd', 'departamento')))
response = JsonResponse(departamentos, safe=False)
return response
class ProvinciasList(APIView):
def get(self, request, ccdd):
provincias = list(
Ubigeo.objects.filter(ccdd=ccdd).values('ccpp', 'provincia').annotate(dcount=Count('ccpp', 'provincia')))
response = JsonResponse(provincias, safe=False)
return response
class DistritosList(APIView):
def get(self, request, ccdd, ccpp):
distritos = list(Ubigeo.objects.filter(ccdd=ccdd, ccpp=ccpp).values('ccdi', 'distrito').annotate(
dcount=Count('ccdi', 'distrito')))
response = JsonResponse(distritos, safe=False)
return response
class ZonasList(APIView):
def get(self, request, ubigeo):
zonas = list(
Zona.objects.filter(UBIGEO=ubigeo).values('UBIGEO', 'ZONA', 'ETIQ_ZONA').annotate(
dcount=Count('UBIGEO', 'ZONA')))
response = JsonResponse(zonas, safe=False)
return response
class TbLocalByUbigeoViewSet(generics.ListAPIView):
serializer_class = LocalSerializer
def get_queryset(self):
ubigeo = self.kwargs['ubigeo']
return Local.objects.filter(ubigeo=ubigeo)
class TbLocalByZonaViewSet(generics.ListAPIView):
serializer_class = LocalAulasSerializer
def get_queryset(self):
ubigeo = self.kwargs['ubigeo']
zona = self.kwargs['zona']
return Local.objects.filter(ubigeo=ubigeo, zona=zona)
def TbLocalAmbienteByLocalViewSet(request, id_local):
query = LocalAmbiente.objects.filter(id_local=id_local).order_by('-capacidad').annotate(
nombre_ambiente=F('id_ambiente__nombre_ambiente')).values(
'id_localambiente', 'numero', 'capacidad', 'nombre_ambiente')
return JsonResponse(list(query), safe=False)
class LocalAmbienteByLocalAulaViewSet(generics.ListAPIView):
serializer_class = LocalAmbienteSerializer
def get_queryset(self):
id_local = self.kwargs['id_local']
id_ambiente = self.kwargs['id_ambiente']
return LocalAmbiente.objects.filter(id_local=id_local, id_ambiente=id_ambiente)
class AmbienteViewSet(viewsets.ModelViewSet):
queryset = Ambiente.objects.all()
serializer_class = AmbienteSerializer
class LocalViewSet(viewsets.ModelViewSet):
queryset = Local.objects.all()
serializer_class = LocalSerializer
class LocalAmbienteViewSet(viewsets.ModelViewSet):
queryset = LocalAmbiente.objects.all()
serializer_class = LocalAmbienteSerializer
class CursobyEtapaViewSet(generics.ListAPIView):
serializer_class = CursoSerializer
def get_queryset(self):
id_etapa = self.kwargs['id_etapa']
return Curso.objects.filter(id_etapa=id_etapa)
class CriteriosViewSet(viewsets.ModelViewSet):
queryset = Criterio.objects.all()
serializer_class = CriterioSerializer
class CursoCriteriosViewSet(viewsets.ModelViewSet):
queryset = CursoCriterio.objects.all()
serializer_class = CursoCriterioSerializer
class CursoViewSet(viewsets.ModelViewSet):
queryset = Curso.objects.all()
serializer_class = CursoSerializer
class CursoCriteriobyCursoViewSet(generics.ListAPIView):
serializer_class = CursoCriterioSerializer
def get_queryset(self):
id_curso = self.kwargs['id_curso']
return CursoCriterio.objects.filter(id_curso=id_curso)
class PEA_BY_AULAViewSet(viewsets.ModelViewSet):
queryset = LocalAmbiente.objects.all()
serializer_class = PEA_BY_AULASerializer
class PEA_ASISTENCIAViewSet(viewsets.ModelViewSet):
queryset = PEA_ASISTENCIA.objects.all()
serializer_class = PEA_ASISTENCIASerializer
class PEA_AULAViewSet(generics.ListAPIView):
serializer_class = PEA_AULASerializer
def get_queryset(self):
id_localambiente = self.kwargs['id_localambiente']
return PEA_AULA.objects.filter(id_localambiente=id_localambiente)
class PEA_AULAbyLocalAmbienteViewSet(generics.ListAPIView):
serializer_class = PEA_AULASerializer
def get_queryset(self):
id_localambiente = self.kwargs['id_localambiente']
return PEA_AULA.objects.filter(id_localambiente=id_localambiente)
@csrf_exempt
def sobrantes_zona(request):
if request.method == "POST" and request.is_ajax():
ubigeo = request.POST['ubigeo']
zona = request.POST['zona']
sobrantes = PEA.objects.exclude(id_pea__in=PEA_AULA.objects.values('id_pea')).filter(ubigeo=ubigeo,
zona=zona).order_by(
'ape_paterno').values('dni', 'ape_paterno', 'ape_materno', 'nombre', 'cargo')
return JsonResponse(list(sobrantes), safe=False)
return JsonResponse({'msg': False})
@csrf_exempt
def asignar(request):
if request.method == "POST" and request.is_ajax():
ubigeo = request.POST['ubigeo']
zona = request.POST['zona']
locales_zona = Local.objects.filter(ubigeo=ubigeo, zona=zona)
for e in locales_zona:
aulas_by_local = LocalAmbiente.objects.filter(id_local=e.id_local).order_by('-capacidad')
for a in aulas_by_local:
if disponibilidad_aula(a.id_localambiente):
pea_ubicar = PEA.objects.exclude(id_pea__in=PEA_AULA.objects.values('id_pea')).filter(
ubigeo=ubigeo, zona=zona,
id_cargofuncional__in=Funcionario.objects.filter(id_curso=e.id_curso)).order_by(
'ape_paterno')[:a.capacidad]
for p in pea_ubicar:
pea = PEA.objects.get(pk=p.id_pea)
aula = LocalAmbiente.objects.get(pk=a.id_localambiente)
pea_aula = PEA_AULA(id_pea=pea, id_localambiente=aula)
pea_aula.save()
return JsonResponse({'msg': True})
return JsonResponse({'msg': False})
def disponibilidad_aula(aula):
aula = LocalAmbiente.objects.get(pk=aula)
cantidad_asignada = PEA_AULA.objects.filter(id_localambiente=aula).count()
is_disponible = True
if cantidad_asignada >= aula.capacidad:
is_disponible = False
return is_disponible
"""
TURNO
0 = MANANA
1 = TARDE
2 = TOOD EL DIA
"""
def getRangeDatesLocal(request, id_local):
format_fechas = []
local = Local.objects.filter(pk=id_local).values('fecha_inicio', 'fecha_fin', 'turno_uso_local')
fecha_inicio = datetime.strptime(local[0]['fecha_inicio'], '%d/%m/%Y').strftime('%Y-%m-%d')
fecha_fin = datetime.strptime(local[0]['fecha_fin'], '%d/%m/%Y').strftime('%Y-%m-%d')
rango_fechas = pd.Series(pd.date_range(fecha_inicio, fecha_fin).format())
for f in rango_fechas:
format_fechas.append(datetime.strptime(f, '%Y-%m-%d').strftime('%d/%m/%Y'))
return JsonResponse({'fechas': format_fechas, 'turno': local[0]['turno_uso_local']}, safe=False)
def getPeaAsistencia(request):
id_localambiente = request.POST['id_localambiente']
fecha = request.POST['fecha']
pea = PEA_AULA.objects.filter(id_localambiente=id_localambiente).annotate(
nombre_completo=Concat(
'id_pea__ape_paterno', Value(' '), 'id_pea__ape_materno', Value(' '), 'id_pea__nombre'),
cargo=F('id_pea__cargo')).values('nombre_completo', 'cargo', 'id_pea__pea_aula__pea_asistencia__turno_manana',
'id_pea__pea_aula__pea_asistencia__turno_tarde')
return JsonResponse(list(pea), safe=False)
@csrf_exempt
def save_asistencia(request):
if request.method == "POST" and request.is_ajax():
data = json.loads(request.body)
for i in data:
try:
pea = PEA_ASISTENCIA.objects.get(fecha=i['fecha'],
id_peaaula=PEA_AULA.objects.get(pk=i['id_peaaula']))
except ObjectDoesNotExist:
pea = None
if pea is None:
pea_asistencia = PEA_ASISTENCIA(fecha=i['fecha'], turno_manana=i['turno_manana'],
turno_tarde=i['turno_tarde'],
id_peaaula=PEA_AULA.objects.get(pk=i['id_peaaula']))
pea_asistencia.save()
else:
pea_asistencia = PEA_ASISTENCIA.objects.get(fecha=i['fecha'],
id_peaaula=PEA_AULA.objects.get(pk=i['id_peaaula']))
pea_asistencia.turno_tarde = i['turno_tarde']
pea_asistencia.turno_manana = i['turno_manana']
pea_asistencia.save()
return JsonResponse({'msg': True})
| [
"[email protected]"
] | |
7ecaa4450f543c9a68460f1cc3e01872c9cb707f | 09ee86d0bd77ca79992f073b6c8b1e98b88cb09b | /resource_allocation.py | df8c166cbc456843a049ef8501f85c59300fe21a | [] | no_license | JaneWuNEU/hitdl_server | 624fbb5cfea3641cb624a291ed6de1e274982463 | 9076a813c803bc9c47054fff7bae2824304da282 | refs/heads/master | 2022-12-29T22:14:44.496492 | 2020-10-13T01:58:19 | 2020-10-13T01:58:19 | 303,327,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,589 | py | import cvxpy as cp
import numpy as np
c1 = np.array([1,3,5])
e1 = np.array([1.505,1.351,1.27])*c1
c2 = np.array([2,5,7])
e2 = np.array([1.844,1.502,1.843])*c2
c3 = np.array([1,5])
e3 = np.array([1.505,1.148])*c3
C = 12
'''
x1 = cp.Variable(name="inception",shape=(len(c1),1),integer=True,pos=True)
y1 = cp.Variable(shape=(len(c1),1),integer=True,pos=True)
x2 = cp.Variable(name="mobilenet",shape=(len(c2),1),integer=True,pos=True)
y2 = cp.Variable(shape=(len(c2),1),integer=True,pos=True)
x3= cp.Variable(name="resnet",shape=(len(c3),1),integer=True,pos=True)
y3 = cp.Variable(shape=(len(c3),1),integer=True,pos=True)
x1 = cp.Variable(name="inception",shape=(len(c1),1),pos=True)
y1 = cp.Variable(shape=(len(c1),1),pos=True)
x2 = cp.Variable(name="mobilenet",shape=(len(c2),1),pos=True)
y2 = cp.Variable(shape=(len(c2),1),pos=True)
x3= cp.Variable(name="resnet",shape=(len(c3),1),pos=True)
y3 = cp.Variable(shape=(len(c3),1),pos=True)
exp1 = [email protected](x1-np.ones((len(c1),1)),y1-np.ones((len(c1),1)))
exp2 = [email protected](x2-np.ones((len(c2),1)),y2-np.ones((len(c2),1)))
exp3 = [email protected](x3-np.ones((len(c3),1)),y3-np.ones((len(c3),1)))
exp4 = [email protected](x1-np.ones((len(c1),1)),y1-np.ones((len(c1),1)))
exp5 = [email protected](x2-np.ones((len(c2),1)),y2-np.ones((len(c2),1)))
exp6 = [email protected](x3-np.ones((len(c3),1)),y3-np.ones((len(c3),1)))
obj = exp1+exp2+exp3
print(obj.shape)
cores_cons = exp4+exp5+exp6
prob1 = cp.Problem(cp.Maximize(obj),
[cp.sum(y1-np.ones((len(c1),1)))==1,
cp.sum(y2-np.ones((len(c2),1)))==1,
cp.sum(y3-np.ones((len(c3),1)))==1,
cores_cons <= C])
result = prob1.solve(gp=True)
'''
class MCKPAllocation:
def __init__(self,CPU_Cores,F):
self.CPU_Cores = CPU_Cores
self.F = F
self.C_upper = round(C*F)
def cpu_const(self):
ins_size = {"inception":{"intra":[1,3,5],"efficiency":[2.605,1.351,1.27]},
"resnet":{"intra":[2,5,7],"efficiency":[1.844,1.502,1.843]},
"mobilenet":{"intra":[1,5],"efficiency":[1.505,1.148]}}
total_plans = len(ins_size["inception"]["intra"])+len(ins_size["resnet"]["intra"])+len(ins_size["mobilenet"]["intra"])
overall_cons = np.zeros(total_plans)
overall_E = np.zeros(total_plans)
model_cons = [np.zeros(total_plans),np.zeros(total_plans),np.zeros(total_plans)]#{"inception":np.zeros(total_plans),"resnet":np.zeros(total_plans),"mobilenet":np.zeros(total_plans)}
cons_start = {"inception":0,"resnet":len(ins_size["inception"]["intra"]),"mobilenet":len(ins_size["resnet"]["intra"])+len(ins_size["inception"]["intra"])}
i = 0
ins_num_upper = np.zeros(total_plans)
ins_num_lower = [np.zeros(total_plans),np.zeros(total_plans),np.zeros(total_plans)]
for model_name in ["inception","resnet","mobilenet"]:
model_cons[i][cons_start[model_name]:cons_start[model_name]+len(ins_size[model_name]["intra"])] = ins_size[model_name]["intra"]
overall_cons[cons_start[model_name]:cons_start[model_name]+len(ins_size[model_name]["intra"])] = ins_size[model_name]["intra"]
overall_E[cons_start[model_name]:cons_start[model_name] + len(ins_size[model_name]["intra"])] = ins_size[model_name]["efficiency"]
ins_num_upper[cons_start[model_name]:cons_start[model_name] + len(ins_size[model_name]["intra"])] = np.floor(C_upper/np.array(ins_size[model_name]["intra"]))
ins_num_lower[i][cons_start[model_name]:cons_start[model_name] + len(ins_size[model_name]["intra"])] =np.ones(len(ins_size[model_name]["intra"]))
i = i+1
return overall_cons,model_cons,overall_E,ins_num_upper.reshape((total_plans,1)),ins_num_lower
def resource_allocation(self):
result = self.cpu_const()
overall_cons = result[0]
model_cons = result[1]
overall_E = result[2]
ins_num_upper = result[3]
ins_num_lower = result[4]
Z = cp.Variable((len(overall_cons),1),integer=True)
obj = cp.Maximize(overall_E@Z)
prob = cp.Problem(obj,[overall_cons@Z<=C,
model_cons[0]@Z<=C_upper,model_cons[1]@Z<=C_upper,model_cons[2]@Z<=C_upper,Z<=ins_num_upper,
ins_num_lower[0] @ Z >=1, ins_num_lower[1] @ Z >=1, ins_num_lower[2] @ Z >=1,
Z <= ins_num_upper,
Z>=np.zeros(shape=(len(overall_cons),1))])
print(prob.solve(),prob.status)
print(Z.value)
resource_allocation()
| [
"[email protected]"
] | |
e739ca2725e96d1eb54ca21c74baf7f2e0a954eb | 7ce56dc3a1110b61d0087565f02b4fe576cad58c | /scrapy_test/coolscrapy/coolscrapy/middlewares.py | 2c8cf90cc52912785d05f2df18a5cb027042f66d | [] | no_license | lssxfy123/PythonStudy | 7c251961ce72217e83184853cb0c11dc773e4075 | d5beba373b78c6c0276c413a44819d3084899d01 | refs/heads/master | 2022-11-28T17:25:36.957483 | 2021-11-26T09:55:32 | 2021-11-26T09:55:32 | 55,392,700 | 1 | 1 | null | 2022-11-22T01:39:12 | 2016-04-04T07:28:25 | Jupyter Notebook | UTF-8 | Python | false | false | 3,605 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class CoolscrapySpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class CoolscrapyDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
bef5e7923ef0e16ee3bfb5807262adf9b9c54494 | 159f1032e3da50f15718e2ca99f6a3e50642b4b0 | /disquaire_project/disquaire_project/settings.py | b5ca2ae7e481309473f430f6948a4b80df16b46c | [] | no_license | Ellobo1er/disquaire_project | a3b29372dfe95f9938cd84723633f0ef3120ab3e | 0af1d93b2f8aa6302cb6ecb0b2d5b3bd7ddcb2ef | refs/heads/master | 2023-06-28T14:23:54.285800 | 2021-07-29T16:01:23 | 2021-07-29T16:01:23 | 390,767,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,373 | py | """
Django settings for disquaire_project project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$4fpf$=&&zwkr1qty!b1gu)57_y+_kvvygn5@bz698or1jqa&s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'store.apps.StoreConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
]
# ...
INTERNAL_IPS = ['127.0.0.1']
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'disquaire_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'disquaire_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'disquaire',
'USER': 'postgres',
'PASSWORD': 'admin',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'fr'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
INTERNAL_IPS = ['127.0.0.1']
| [
"[email protected]"
] | |
6618838361e332c0f1e2a1d03010d913848c0609 | 4144df22392350035a9a24fcbc23fd1c6bce5c12 | /Lib/glyphNameFormatter/rangeProcessors/ipa_extensions.py | ccf36e61c84dd3c9b8071eb6a3377ab9c632c3e7 | [
"BSD-3-Clause",
"Adobe-Glyph"
] | permissive | danielgrumer/glyphNameFormatter | 55b6076684bed7ff4cc6e37ce4a0bb0e2ce86a4a | 9a41b3ef02c01cd18afe0232f6e436a2f7379178 | refs/heads/master | 2020-12-11T05:35:47.835908 | 2016-03-19T09:50:33 | 2016-03-19T09:50:33 | 53,578,090 | 0 | 0 | null | 2016-03-10T11:07:31 | 2016-03-10T11:07:30 | null | UTF-8 | Python | false | false | 1,329 | py |
def process(self):
self.edit("LATIN")
self.edit("OPEN", "open")
self.edit("WITH FISHHOOK", "fishhook")
self.edit("SCRIPT", "script")
self.edit("WITH BELT", "belt")
self.edit("WITH MIDDLE TILDE", "middletilde")
self.edit("WITH LONG LEG", "longleg")
self.edit("WITH CROSSED-TAIL", "crossedtail")
self.edit("BILABIAL", "bilabial")
self.edit("BIDENTAL", "bidental")
self.edit("STRETCHED", "stretched")
self.edit("WITH STROKE", "stroke")
self.edit("SQUAT", "squat")
self.edit("INVERTED", "inverted")
self.edit("REVERSED", "reversed")
self.replace("DZ", "dzed")
self.replace("LZ", "lzed")
self.replace("DIGRAPH")
self.replace("PERCUSSIVE", "percussive")
self.replace("GLOTTAL", "glottal")
self.replace("STOP", "stop")
self.replace("PHARYNGEAL", "pharyngeal")
self.replace("VOICED", "voiced")
self.replace("FRICATIVE", "fricative")
self.replace("LETTER CLICK", "click")
self.replace("LETTER GLOTTAL STOP WITH STROKE", "glottalstopstroke")
self.replace("LETTER SMALL CAPITAL OE", "OEsmall")
self.processDiacritics()
self.processShape()
self.handleCase()
self.replace("LETTER")
self.compress()
if __name__ == "__main__":
from glyphNameFormatter.test import printRange
printRange("IPA Extensions")
| [
"[email protected]"
] | |
e6fae05c449f2092d5fda416fb23b95be3b3aa1f | 73105a000374f7bbe97dac50b91b0c019826a1ba | /account/pipelines.py | 8fb36edc4bd7c7340a1ddea7f7606a19b22a27d7 | [
"MIT"
] | permissive | carpedm20/UNIST-auction | 657e80840e4c6adbfaeebd118acc03d4e04cc2a5 | f2db1d6fdb2c7781b3c142f8a2582888e24ad06d | refs/heads/master | 2021-01-22T04:43:55.844905 | 2014-10-21T14:01:32 | 2014-10-21T14:01:32 | 22,419,149 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | from social_auth.backends.facebook import FacebookBackend
from social_auth.backends.twitter import TwitterBackend
from social_auth.backends import google
from social_auth.signals import socialauth_registered
def get_user_avatar(backend, details, response, social_user, uid,\
user, *args, **kwargs):
url = None
if backend.__class__ == FacebookBackend:
url = "http://graph.facebook.com/%s/picture?type=large" % response['id']
elif backend.__class__ == TwitterBackend:
url = response.get('profile_image_url', '').replace('_normal', '')
else:
url = 'http://www.gravatar.com/avatar/00000000000000000000000000000000'
if url:
user.profile_image_url = url
user.save()
| [
"[email protected]"
] | |
7bb48802e116289f7974b0bc98bf6ea4da6cdcc9 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/tests/unit/gapic/dialogflowcx_v3beta1/test_session_entity_types.py | e149c587481234541d44591766391b6ac202a885 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97,381 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflowcx_v3beta1.services.session_entity_types import SessionEntityTypesAsyncClient
from google.cloud.dialogflowcx_v3beta1.services.session_entity_types import SessionEntityTypesClient
from google.cloud.dialogflowcx_v3beta1.services.session_entity_types import pagers
from google.cloud.dialogflowcx_v3beta1.services.session_entity_types import transports
from google.cloud.dialogflowcx_v3beta1.services.session_entity_types.transports.base import _API_CORE_VERSION
from google.cloud.dialogflowcx_v3beta1.services.session_entity_types.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.dialogflowcx_v3beta1.types import entity_type
from google.cloud.dialogflowcx_v3beta1.types import session_entity_type
from google.cloud.dialogflowcx_v3beta1.types import session_entity_type as gcdc_session_entity_type
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-api-core >= 1.26.0 is required:
# - Delete all the api-core and auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
requires_api_core_lt_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"),
reason="This test requires google-api-core < 1.26.0",
)
requires_api_core_gte_1_26_0 = pytest.mark.skipif(
packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"),
reason="This test requires google-api-core >= 1.26.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SessionEntityTypesClient._get_default_mtls_endpoint(None) is None
assert SessionEntityTypesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert SessionEntityTypesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert SessionEntityTypesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert SessionEntityTypesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert SessionEntityTypesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
SessionEntityTypesClient,
SessionEntityTypesAsyncClient,
])
def test_session_entity_types_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
@pytest.mark.parametrize("client_class", [
SessionEntityTypesClient,
SessionEntityTypesAsyncClient,
])
def test_session_entity_types_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_session_entity_types_client_get_transport_class():
transport = SessionEntityTypesClient.get_transport_class()
available_transports = [
transports.SessionEntityTypesGrpcTransport,
]
assert transport in available_transports
transport = SessionEntityTypesClient.get_transport_class("grpc")
assert transport == transports.SessionEntityTypesGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SessionEntityTypesClient, transports.SessionEntityTypesGrpcTransport, "grpc"),
(SessionEntityTypesAsyncClient, transports.SessionEntityTypesGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(SessionEntityTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SessionEntityTypesClient))
@mock.patch.object(SessionEntityTypesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SessionEntityTypesAsyncClient))
def test_session_entity_types_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SessionEntityTypesClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SessionEntityTypesClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(SessionEntityTypesClient, transports.SessionEntityTypesGrpcTransport, "grpc", "true"),
(SessionEntityTypesAsyncClient, transports.SessionEntityTypesGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(SessionEntityTypesClient, transports.SessionEntityTypesGrpcTransport, "grpc", "false"),
(SessionEntityTypesAsyncClient, transports.SessionEntityTypesGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(SessionEntityTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SessionEntityTypesClient))
@mock.patch.object(SessionEntityTypesAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SessionEntityTypesAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_session_entity_types_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SessionEntityTypesClient, transports.SessionEntityTypesGrpcTransport, "grpc"),
(SessionEntityTypesAsyncClient, transports.SessionEntityTypesGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_session_entity_types_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(SessionEntityTypesClient, transports.SessionEntityTypesGrpcTransport, "grpc"),
(SessionEntityTypesAsyncClient, transports.SessionEntityTypesGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_session_entity_types_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_session_entity_types_client_client_options_from_dict():
with mock.patch('google.cloud.dialogflowcx_v3beta1.services.session_entity_types.transports.SessionEntityTypesGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = SessionEntityTypesClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_list_session_entity_types(transport: str = 'grpc', request_type=session_entity_type.ListSessionEntityTypesRequest):
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = session_entity_type.ListSessionEntityTypesResponse(
next_page_token='next_page_token_value',
)
response = client.list_session_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == session_entity_type.ListSessionEntityTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSessionEntityTypesPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_session_entity_types_from_dict():
test_list_session_entity_types(request_type=dict)
def test_list_session_entity_types_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__') as call:
client.list_session_entity_types()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == session_entity_type.ListSessionEntityTypesRequest()
@pytest.mark.asyncio
async def test_list_session_entity_types_async(transport: str = 'grpc_asyncio', request_type=session_entity_type.ListSessionEntityTypesRequest):
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(session_entity_type.ListSessionEntityTypesResponse(
next_page_token='next_page_token_value',
))
response = await client.list_session_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == session_entity_type.ListSessionEntityTypesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListSessionEntityTypesAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_session_entity_types_async_from_dict():
await test_list_session_entity_types_async(request_type=dict)
def test_list_session_entity_types_field_headers():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session_entity_type.ListSessionEntityTypesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__') as call:
call.return_value = session_entity_type.ListSessionEntityTypesResponse()
client.list_session_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_session_entity_types_field_headers_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session_entity_type.ListSessionEntityTypesRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(session_entity_type.ListSessionEntityTypesResponse())
await client.list_session_entity_types(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_session_entity_types_flattened():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = session_entity_type.ListSessionEntityTypesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_session_entity_types(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_session_entity_types_flattened_error():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_session_entity_types(
session_entity_type.ListSessionEntityTypesRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_session_entity_types_flattened_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = session_entity_type.ListSessionEntityTypesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(session_entity_type.ListSessionEntityTypesResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_session_entity_types(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_session_entity_types_flattened_error_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_session_entity_types(
session_entity_type.ListSessionEntityTypesRequest(),
parent='parent_value',
)
def test_list_session_entity_types_pager():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
],
next_page_token='abc',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[],
next_page_token='def',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
],
next_page_token='ghi',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_session_entity_types(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, session_entity_type.SessionEntityType)
for i in results)
def test_list_session_entity_types_pages():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
],
next_page_token='abc',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[],
next_page_token='def',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
],
next_page_token='ghi',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
],
),
RuntimeError,
)
pages = list(client.list_session_entity_types(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_session_entity_types_async_pager():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
],
next_page_token='abc',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[],
next_page_token='def',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
],
next_page_token='ghi',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
],
),
RuntimeError,
)
async_pager = await client.list_session_entity_types(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, session_entity_type.SessionEntityType)
for i in responses)
@pytest.mark.asyncio
async def test_list_session_entity_types_async_pages():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_session_entity_types),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
],
next_page_token='abc',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[],
next_page_token='def',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
],
next_page_token='ghi',
),
session_entity_type.ListSessionEntityTypesResponse(
session_entity_types=[
session_entity_type.SessionEntityType(),
session_entity_type.SessionEntityType(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_session_entity_types(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_session_entity_type(transport: str = 'grpc', request_type=session_entity_type.GetSessionEntityTypeRequest):
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = session_entity_type.SessionEntityType(
name='name_value',
entity_override_mode=session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE,
)
response = client.get_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == session_entity_type.GetSessionEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, session_entity_type.SessionEntityType)
assert response.name == 'name_value'
assert response.entity_override_mode == session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE
def test_get_session_entity_type_from_dict():
test_get_session_entity_type(request_type=dict)
def test_get_session_entity_type_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session_entity_type),
'__call__') as call:
client.get_session_entity_type()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == session_entity_type.GetSessionEntityTypeRequest()
@pytest.mark.asyncio
async def test_get_session_entity_type_async(transport: str = 'grpc_asyncio', request_type=session_entity_type.GetSessionEntityTypeRequest):
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(session_entity_type.SessionEntityType(
name='name_value',
entity_override_mode=session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE,
))
response = await client.get_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == session_entity_type.GetSessionEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, session_entity_type.SessionEntityType)
assert response.name == 'name_value'
assert response.entity_override_mode == session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE
@pytest.mark.asyncio
async def test_get_session_entity_type_async_from_dict():
await test_get_session_entity_type_async(request_type=dict)
def test_get_session_entity_type_field_headers():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session_entity_type.GetSessionEntityTypeRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session_entity_type),
'__call__') as call:
call.return_value = session_entity_type.SessionEntityType()
client.get_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_session_entity_type_field_headers_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session_entity_type.GetSessionEntityTypeRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session_entity_type),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(session_entity_type.SessionEntityType())
await client.get_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_session_entity_type_flattened():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = session_entity_type.SessionEntityType()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_session_entity_type(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_session_entity_type_flattened_error():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_session_entity_type(
session_entity_type.GetSessionEntityTypeRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_session_entity_type_flattened_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = session_entity_type.SessionEntityType()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(session_entity_type.SessionEntityType())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_session_entity_type(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_session_entity_type_flattened_error_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_session_entity_type(
session_entity_type.GetSessionEntityTypeRequest(),
name='name_value',
)
def test_create_session_entity_type(transport: str = 'grpc', request_type=gcdc_session_entity_type.CreateSessionEntityTypeRequest):
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_session_entity_type.SessionEntityType(
name='name_value',
entity_override_mode=gcdc_session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE,
)
response = client.create_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_session_entity_type.CreateSessionEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_session_entity_type.SessionEntityType)
assert response.name == 'name_value'
assert response.entity_override_mode == gcdc_session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE
def test_create_session_entity_type_from_dict():
test_create_session_entity_type(request_type=dict)
def test_create_session_entity_type_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session_entity_type),
'__call__') as call:
client.create_session_entity_type()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_session_entity_type.CreateSessionEntityTypeRequest()
@pytest.mark.asyncio
async def test_create_session_entity_type_async(transport: str = 'grpc_asyncio', request_type=gcdc_session_entity_type.CreateSessionEntityTypeRequest):
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcdc_session_entity_type.SessionEntityType(
name='name_value',
entity_override_mode=gcdc_session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE,
))
response = await client.create_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_session_entity_type.CreateSessionEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_session_entity_type.SessionEntityType)
assert response.name == 'name_value'
assert response.entity_override_mode == gcdc_session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE
@pytest.mark.asyncio
async def test_create_session_entity_type_async_from_dict():
await test_create_session_entity_type_async(request_type=dict)
def test_create_session_entity_type_field_headers():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_session_entity_type.CreateSessionEntityTypeRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session_entity_type),
'__call__') as call:
call.return_value = gcdc_session_entity_type.SessionEntityType()
client.create_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_session_entity_type_field_headers_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_session_entity_type.CreateSessionEntityTypeRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session_entity_type),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_session_entity_type.SessionEntityType())
await client.create_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_session_entity_type_flattened():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_session_entity_type.SessionEntityType()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_session_entity_type(
parent='parent_value',
session_entity_type=gcdc_session_entity_type.SessionEntityType(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].session_entity_type == gcdc_session_entity_type.SessionEntityType(name='name_value')
def test_create_session_entity_type_flattened_error():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_session_entity_type(
gcdc_session_entity_type.CreateSessionEntityTypeRequest(),
parent='parent_value',
session_entity_type=gcdc_session_entity_type.SessionEntityType(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_session_entity_type_flattened_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_session_entity_type.SessionEntityType()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_session_entity_type.SessionEntityType())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_session_entity_type(
parent='parent_value',
session_entity_type=gcdc_session_entity_type.SessionEntityType(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].session_entity_type == gcdc_session_entity_type.SessionEntityType(name='name_value')
@pytest.mark.asyncio
async def test_create_session_entity_type_flattened_error_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_session_entity_type(
gcdc_session_entity_type.CreateSessionEntityTypeRequest(),
parent='parent_value',
session_entity_type=gcdc_session_entity_type.SessionEntityType(name='name_value'),
)
def test_update_session_entity_type(transport: str = 'grpc', request_type=gcdc_session_entity_type.UpdateSessionEntityTypeRequest):
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_session_entity_type.SessionEntityType(
name='name_value',
entity_override_mode=gcdc_session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE,
)
response = client.update_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_session_entity_type.UpdateSessionEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_session_entity_type.SessionEntityType)
assert response.name == 'name_value'
assert response.entity_override_mode == gcdc_session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE
def test_update_session_entity_type_from_dict():
test_update_session_entity_type(request_type=dict)
def test_update_session_entity_type_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_session_entity_type),
'__call__') as call:
client.update_session_entity_type()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_session_entity_type.UpdateSessionEntityTypeRequest()
@pytest.mark.asyncio
async def test_update_session_entity_type_async(transport: str = 'grpc_asyncio', request_type=gcdc_session_entity_type.UpdateSessionEntityTypeRequest):
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcdc_session_entity_type.SessionEntityType(
name='name_value',
entity_override_mode=gcdc_session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE,
))
response = await client.update_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcdc_session_entity_type.UpdateSessionEntityTypeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_session_entity_type.SessionEntityType)
assert response.name == 'name_value'
assert response.entity_override_mode == gcdc_session_entity_type.SessionEntityType.EntityOverrideMode.ENTITY_OVERRIDE_MODE_OVERRIDE
@pytest.mark.asyncio
async def test_update_session_entity_type_async_from_dict():
await test_update_session_entity_type_async(request_type=dict)
def test_update_session_entity_type_field_headers():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_session_entity_type.UpdateSessionEntityTypeRequest()
request.session_entity_type.name = 'session_entity_type.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_session_entity_type),
'__call__') as call:
call.return_value = gcdc_session_entity_type.SessionEntityType()
client.update_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session_entity_type.name=session_entity_type.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_session_entity_type_field_headers_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_session_entity_type.UpdateSessionEntityTypeRequest()
request.session_entity_type.name = 'session_entity_type.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_session_entity_type),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_session_entity_type.SessionEntityType())
await client.update_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'session_entity_type.name=session_entity_type.name/value',
) in kw['metadata']
def test_update_session_entity_type_flattened():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_session_entity_type.SessionEntityType()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_session_entity_type(
session_entity_type=gcdc_session_entity_type.SessionEntityType(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].session_entity_type == gcdc_session_entity_type.SessionEntityType(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_session_entity_type_flattened_error():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_session_entity_type(
gcdc_session_entity_type.UpdateSessionEntityTypeRequest(),
session_entity_type=gcdc_session_entity_type.SessionEntityType(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_session_entity_type_flattened_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_session_entity_type.SessionEntityType()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcdc_session_entity_type.SessionEntityType())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_session_entity_type(
session_entity_type=gcdc_session_entity_type.SessionEntityType(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].session_entity_type == gcdc_session_entity_type.SessionEntityType(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_session_entity_type_flattened_error_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_session_entity_type(
gcdc_session_entity_type.UpdateSessionEntityTypeRequest(),
session_entity_type=gcdc_session_entity_type.SessionEntityType(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_delete_session_entity_type(transport: str = 'grpc', request_type=session_entity_type.DeleteSessionEntityTypeRequest):
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == session_entity_type.DeleteSessionEntityTypeRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_session_entity_type_from_dict():
test_delete_session_entity_type(request_type=dict)
def test_delete_session_entity_type_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session_entity_type),
'__call__') as call:
client.delete_session_entity_type()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == session_entity_type.DeleteSessionEntityTypeRequest()
@pytest.mark.asyncio
async def test_delete_session_entity_type_async(transport: str = 'grpc_asyncio', request_type=session_entity_type.DeleteSessionEntityTypeRequest):
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == session_entity_type.DeleteSessionEntityTypeRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_session_entity_type_async_from_dict():
await test_delete_session_entity_type_async(request_type=dict)
def test_delete_session_entity_type_field_headers():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session_entity_type.DeleteSessionEntityTypeRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session_entity_type),
'__call__') as call:
call.return_value = None
client.delete_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_session_entity_type_field_headers_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = session_entity_type.DeleteSessionEntityTypeRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session_entity_type),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_session_entity_type(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_session_entity_type_flattened():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_session_entity_type(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_session_entity_type_flattened_error():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_session_entity_type(
session_entity_type.DeleteSessionEntityTypeRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_session_entity_type_flattened_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_session_entity_type),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_session_entity_type(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_session_entity_type_flattened_error_async():
client = SessionEntityTypesAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_session_entity_type(
session_entity_type.DeleteSessionEntityTypeRequest(),
name='name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SessionEntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SessionEntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SessionEntityTypesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.SessionEntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SessionEntityTypesClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SessionEntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SessionEntityTypesClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.SessionEntityTypesGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.SessionEntityTypesGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.SessionEntityTypesGrpcTransport,
transports.SessionEntityTypesGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.SessionEntityTypesGrpcTransport,
)
def test_session_entity_types_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SessionEntityTypesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_session_entity_types_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.dialogflowcx_v3beta1.services.session_entity_types.transports.SessionEntityTypesTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.SessionEntityTypesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'list_session_entity_types',
'get_session_entity_type',
'create_session_entity_type',
'update_session_entity_type',
'delete_session_entity_type',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_session_entity_types_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflowcx_v3beta1.services.session_entity_types.transports.SessionEntityTypesTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SessionEntityTypesTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_session_entity_types_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.dialogflowcx_v3beta1.services.session_entity_types.transports.SessionEntityTypesTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SessionEntityTypesTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
def test_session_entity_types_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.dialogflowcx_v3beta1.services.session_entity_types.transports.SessionEntityTypesTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SessionEntityTypesTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_session_entity_types_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SessionEntityTypesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_session_entity_types_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SessionEntityTypesClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SessionEntityTypesGrpcTransport,
transports.SessionEntityTypesGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_session_entity_types_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/dialogflow',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.SessionEntityTypesGrpcTransport,
transports.SessionEntityTypesGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_session_entity_types_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SessionEntityTypesGrpcTransport, grpc_helpers),
(transports.SessionEntityTypesGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_gte_1_26_0
def test_session_entity_types_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SessionEntityTypesGrpcTransport, grpc_helpers),
(transports.SessionEntityTypesGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_session_entity_types_transport_create_channel_old_api_core(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus")
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.SessionEntityTypesGrpcTransport, grpc_helpers),
(transports.SessionEntityTypesGrpcAsyncIOTransport, grpc_helpers_async)
],
)
@requires_api_core_lt_1_26_0
def test_session_entity_types_transport_create_channel_user_scopes(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
scopes=["1", "2"],
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.SessionEntityTypesGrpcTransport, transports.SessionEntityTypesGrpcAsyncIOTransport])
def test_session_entity_types_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_session_entity_types_host_no_port():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com'),
)
assert client.transport._host == 'dialogflow.googleapis.com:443'
def test_session_entity_types_host_with_port():
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='dialogflow.googleapis.com:8000'),
)
assert client.transport._host == 'dialogflow.googleapis.com:8000'
def test_session_entity_types_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SessionEntityTypesGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_session_entity_types_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.SessionEntityTypesGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SessionEntityTypesGrpcTransport, transports.SessionEntityTypesGrpcAsyncIOTransport])
def test_session_entity_types_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.SessionEntityTypesGrpcTransport, transports.SessionEntityTypesGrpcAsyncIOTransport])
def test_session_entity_types_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/dialogflow',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_session_entity_type_path():
project = "squid"
location = "clam"
agent = "whelk"
session = "octopus"
entity_type = "oyster"
expected = "projects/{project}/locations/{location}/agents/{agent}/sessions/{session}/entityTypes/{entity_type}".format(project=project, location=location, agent=agent, session=session, entity_type=entity_type, )
actual = SessionEntityTypesClient.session_entity_type_path(project, location, agent, session, entity_type)
assert expected == actual
def test_parse_session_entity_type_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"agent": "mussel",
"session": "winkle",
"entity_type": "nautilus",
}
path = SessionEntityTypesClient.session_entity_type_path(**expected)
# Check that the path construction is reversible.
actual = SessionEntityTypesClient.parse_session_entity_type_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = SessionEntityTypesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = SessionEntityTypesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SessionEntityTypesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder, )
actual = SessionEntityTypesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = SessionEntityTypesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SessionEntityTypesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization, )
actual = SessionEntityTypesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = SessionEntityTypesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SessionEntityTypesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project, )
actual = SessionEntityTypesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = SessionEntityTypesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SessionEntityTypesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = SessionEntityTypesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = SessionEntityTypesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SessionEntityTypesClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.SessionEntityTypesTransport, '_prep_wrapped_messages') as prep:
client = SessionEntityTypesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.SessionEntityTypesTransport, '_prep_wrapped_messages') as prep:
transport_class = SessionEntityTypesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
6237d5cd45456cf4aea5e5eaa2cd7525a5a0f984 | 22bf910b64283b3c15cc4d80542e83fa89e9f09d | /monero_glue/messages/DebugLinkShowTextStyle.py | 6ddc6028fdf6a547536fc717cd8d48b7bf7a8654 | [
"MIT"
] | permissive | ph4r05/monero-agent | 24ed1aa17d6616b2ae6bcdb7b9997f982f8b7b5d | 0bac0e6f33142b2bb885565bfd1ef8ac04559280 | refs/heads/master | 2022-10-18T06:30:43.550133 | 2021-07-01T16:27:56 | 2021-07-01T16:27:56 | 126,215,119 | 24 | 5 | MIT | 2022-09-23T22:53:44 | 2018-03-21T17:18:21 | Python | UTF-8 | Python | false | false | 315 | py | # Automatically generated by pb2py
# fmt: off
if False:
from typing_extensions import Literal
NORMAL = 0 # type: Literal[0]
BOLD = 1 # type: Literal[1]
MONO = 2 # type: Literal[2]
MONO_BOLD = 3 # type: Literal[3]
BR = 4 # type: Literal[4]
BR_HALF = 5 # type: Literal[5]
SET_COLOR = 6 # type: Literal[6]
| [
"[email protected]"
] | |
759b0b137a7faf1da9dc6ffbab58053fdcbad295 | bb5465b31067d8e2ef20a93c87bfad2c6a8e6569 | /orders/forms.py | c30ac21ad1c2d2d3346d20718321be245f6af33b | [] | no_license | greypanda/Django-Bootcamp-1 | cc7e1b131b55be4ca224702397f0e4aee6e1d2d9 | d66886bd2ab65f07cba08dc26640f52e0da72ac4 | refs/heads/main | 2022-12-27T01:27:26.516712 | 2020-10-14T23:45:57 | 2020-10-14T23:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | from django import forms
from .models import Order
class OrderForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
product = kwargs.pop("product") or None
super().__init__(*args, **kwargs)
self.product = product
class Meta:
model = Order
fields = [
'shipping_address',
'billing_address',
]
def clean(self, *args, **kwargs):
cleaned_data = super().clean(*args, **kwargs)
# check product inventory
if self.product != None:
if not self.product.has_inventory():
raise forms.ValidationError("This product is out of inventory.")
return cleaned_data | [
"[email protected]"
] | |
3a05d1a9e15233697f2611e6105e3a61f8da2282 | b0ede55e98d454f558e5397369f9265893deedb5 | /SWEA/D3/3750_digit_sum.py | 91b7ee6c6d6baa5d0dc8631446283554e277d0fb | [] | no_license | YeonggilGo/python_practice | 5ff65852900c4c6769d541af16f74a27a67920ec | 43082568b5045a8efc1d596074bdca3e66b2fed1 | refs/heads/master | 2023-06-22T02:09:31.906745 | 2023-06-17T01:27:22 | 2023-06-17T01:27:22 | 280,361,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # 매 테스트 케이스마다 print를 하지않고
# 배열에 저장해서 한꺼번에 출력하니까 동작시간이 훨씬 줄어들었다.
# 이유가 뭔지는 아직 모르겠다.
T = int(input())
ans = []
for tc in range(1, T + 1):
N = input()
while len(N) > 1:
N_li = list(map(int, N))
N = str(sum(N_li))
ans.append(N)
for tc in range(0, T):
print(f'#{tc+1} {ans[tc]}')
| [
"[email protected]"
] | |
25e7860fa269e96b48ce74d7908cadb94fc03315 | 0ddbbc997883aa7c17e50a08de7aa40c3a4955c7 | /project1/package1/plot_test.py | ac3581147ec93402bf0aa6e75ea365f5c588c3e6 | [] | no_license | kwoolter/vscode-online | 39eef2ab9c13c0460d6f8a45a8674906e7594bdd | f13c0a1378a2724a44d95ce4ab06700eb0642cae | refs/heads/master | 2022-07-14T01:44:28.495267 | 2020-05-16T10:21:42 | 2020-05-16T10:21:42 | 264,375,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 20, 100)
plt.plot(x, np.sin(x))
plt.show(block=False)
input('press <ENTER> to continue') | [
"[email protected]"
] | |
70cd3506623f02e09d026e8fbf4721df8d98cd99 | 8cb8bfd2dae516612251039e0632173ea1ea4c8a | /modules/user/publisher_user.py | 946ebcc7023cc69b7181138f470652fe3331ebb9 | [] | no_license | nyzsirt/lift-prod | 563cc70700d26a5812a1bce0bd9795998dce6e99 | 9a5f28e49ad5e80e422a5d5efee77a2d0247aa2b | refs/heads/master | 2020-04-22T01:05:42.262876 | 2019-02-09T13:31:15 | 2019-02-09T13:31:15 | 170,003,361 | 1 | 0 | null | 2019-02-10T17:11:50 | 2019-02-10T17:11:50 | null | UTF-8 | Python | false | false | 979 | py | class PublisherUserOnCreate:
"""
Publisher for notifying subscribers on new service admin user creating
"""
def __init__(self):
self.__new_user=None
self.__subscribers=[]
@property
def new_user(self):
"""
Publisher notifies subscribers about this user dict
:return: new user dict
"""
return self.__new_user
@new_user.setter
def new_user(self,user):
"""
Set new_user dict and if not None notify all subscribers
:param user: user dict
:return: Void
"""
self.__new_user=user
if self.__new_user:
self.notify_subscribers()
def notify_subscribers(self):
for subscriber in self.__subscribers :
subscriber.notify()
pass
def add_subscriber(self,subscriber):
self.__subscribers.append(subscriber)
def remove_subscriber(self,subscriber):
self.__subscribers.remove(subscriber)
| [
"[email protected]"
] | |
1bacbdd7d2adb957a389d64b3941a31252aa6e64 | 609582ee37a01ac6a67fb9c957825dcd3c9a5b3a | /LeetCode_Linked_List/160_Intersection_Of_Two_Linked_List.py | b95d8c954f372d3807f1ca3cb6bbbed0548eadf4 | [] | no_license | captainjack331089/captainjack33.LeetCode | a9ad7b3591675c76814eda22e683745068e0abed | 4c03f28371e003e8e6a7c30b7b0c46beb5e2a8e7 | refs/heads/master | 2022-03-07T19:53:40.454945 | 2019-11-06T19:32:00 | 2019-11-06T19:32:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | """
160. Intersection of Two Linked Lists
Category: Linked List
Difficulty: Easy
"""
"""
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
begin to intersect at node c1.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA = 2, skipB = 3
Output: Reference of the node with value = 8
Input Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
Example 2:
Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Reference of the node with value = 2
Input Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: null
Input Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNone(self, headA, headB):
p1 = headA
p2 = headB
while p1 != p2:
if not p1:
p1 = headB
else:
p1 = p1.next
if not p2:
p2 = headA
else:
p2 = p2.next
return p2 | [
"[email protected]"
] | |
6d960235947bbf4e658d18e273fb3658fd207da8 | 91b3f9f1803161c22ff5bed3e5604a07d67728ac | /patterns/factory/overlay_factory.py | 802822ffdc740832cd8fbf414f2218ceb02f190f | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | AndreTeixeira1998/TekkenBot | dab01fd022f91787f709241a17a903291e7089bd | 015c601afbea5d75a46b3385f1d322b2655249b0 | refs/heads/master | 2023-07-17T16:52:14.182255 | 2021-04-22T17:29:55 | 2021-04-22T17:29:55 | 273,787,610 | 0 | 0 | MIT | 2020-06-20T21:34:27 | 2020-06-20T21:34:26 | null | UTF-8 | Python | false | false | 2,082 | py | #!/usr/bin/env python3
# Copyright (c) 2019, Alchemy Meister
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
"""
import inspect
from gui.my_tkinter.overlay import Overlay
from .factory import Factory
class OverlayFactory(Factory):
def __init__(self):
super().__init__()
self.__register_subclasses(Overlay)
def __register_subclasses(self, parent_class):
for cls in parent_class.__subclasses__():
if inspect.isabstract(cls):
self.__register_subclasses(cls)
else:
self.register_class(cls.CLASS_ID, cls)
| [
"[email protected]"
] | |
c48a6539ae876c3189fcf79c05265e1fdc2a596b | 4a399d20f9934c4984bab229a015be69e9189067 | /devel/lib/python2.7/dist-packages/roboy_communication_control/msg/_DebugNotification.py | 39749bcc8f1c881c53ae2f12b81695e3eb409819 | [
"BSD-3-Clause"
] | permissive | Roboy/myoarm_small_FPGA | 09af14c7d82c9e8fc923842ae5aad1be6344bf27 | f2f11bee50078d8a03f352e3b3ef9f3d9244d87a | refs/heads/master | 2021-01-21T03:21:49.777564 | 2017-08-30T22:11:44 | 2017-08-30T22:11:44 | 101,892,113 | 0 | 0 | null | 2017-08-30T14:49:18 | 2017-08-30T14:33:46 | null | UTF-8 | Python | false | false | 7,015 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from roboy_communication_control/DebugNotification.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class DebugNotification(genpy.Message):
_md5sum = "e83a19f2165c907848c09efd00ad9d5e"
_type = "roboy_communication_control/DebugNotification"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 code
string object
string msg
string extra
int32 validityDuration"""
__slots__ = ['code','object','msg','extra','validityDuration']
_slot_types = ['int32','string','string','string','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
code,object,msg,extra,validityDuration
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(DebugNotification, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.code is None:
self.code = 0
if self.object is None:
self.object = ''
if self.msg is None:
self.msg = ''
if self.extra is None:
self.extra = ''
if self.validityDuration is None:
self.validityDuration = 0
else:
self.code = 0
self.object = ''
self.msg = ''
self.extra = ''
self.validityDuration = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_i().pack(self.code))
_x = self.object
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.msg
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.extra
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.validityDuration))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(self.code,) = _get_struct_i().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.object = str[start:end].decode('utf-8')
else:
self.object = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.msg = str[start:end].decode('utf-8')
else:
self.msg = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.extra = str[start:end].decode('utf-8')
else:
self.extra = str[start:end]
start = end
end += 4
(self.validityDuration,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_i().pack(self.code))
_x = self.object
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.msg
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.extra
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.validityDuration))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(self.code,) = _get_struct_i().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.object = str[start:end].decode('utf-8')
else:
self.object = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.msg = str[start:end].decode('utf-8')
else:
self.msg = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.extra = str[start:end].decode('utf-8')
else:
self.extra = str[start:end]
start = end
end += 4
(self.validityDuration,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_i = None
def _get_struct_i():
global _struct_i
if _struct_i is None:
_struct_i = struct.Struct("<i")
return _struct_i
| [
"[email protected]"
] | |
674d799ef87465a5e5b80fdd21d63878fb2e1361 | e7b956cd98f3400249cd5097029f0a1a9e8ba645 | /app/relations/many_to_many/migrations/0002_auto_20180205_0701.py | 727d216af345dda00738984b376c0fcafe2b46fb | [] | no_license | standbyme227/fc-django-document | 8ffc4430099fbee037f1336e319e40292bcf7af4 | 8f01c108f773f3f7edc49e1f6527ed3789754ba9 | refs/heads/master | 2021-05-04T16:08:11.133487 | 2018-02-22T03:05:44 | 2018-02-22T03:05:44 | 120,244,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | # Generated by Django 2.0.2 on 2018-02-05 07:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('many_to_many', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Postlike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='many_to_many.Post')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='postlike',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='many_to_many.User'),
),
]
| [
"[email protected]"
] | |
c0816befac5b3984dad7c534e48520cc62c3eb87 | 88c1fa6dd5b51a93c4345951c41c4f56a82ba5a3 | /LiveProject-Python/AppBuilder9000/ZPYLP0612/GreatestComedies/models.py | 8eda5a405781578635b00d1f099ff55c4023617a | [] | no_license | Sean-Beyer/PythonDjango-LiveProject | 83335c4d5e22d00c34dac1c71c39f770ad896c4e | 986b567fad49368c52182eb5196534ff8a8ebcfc | refs/heads/master | 2022-12-13T22:43:21.820355 | 2020-09-01T00:34:18 | 2020-09-01T00:34:18 | 291,854,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | from django.db import models
# Created models
class Comedies(models.Model):
comedy = models.CharField(max_length=100, blank=True)
actor = models.CharField(max_length=100, blank=True)
director = models.CharField(max_length=100, blank=True)
year = models.CharField(max_length=100, blank=True)
imdb_rating = models.CharField(max_length=100, null=True)
rating = models.DecimalField(max_digits=2, decimal_places=1, null=True)
review = models.CharField(max_length=1000, blank=True, null=True)
Comedy= models.Manager() # object manager for Movie Database
def __str__(self):
return self.comedy
| [
"[email protected]"
] | |
239ad35cba71416a20dce988b43a1d29433918c0 | 748b0b8d653e2bf0a33b72ca97ac145c1bdce9b3 | /backend/msm_mobile_261109_d_15730/urls.py | 06b219555ca9a5c714db9636877f1c6fa779b44e | [] | no_license | crowdbotics-apps/msm-mobile-261109-d-15730 | b511e190dbd6fa5535698ce5d1b02bba649e129f | dfbf6260071f3b6a3f4a5e97f4ad9ddd06969b1d | refs/heads/master | 2023-01-19T12:59:19.711357 | 2020-11-26T05:42:47 | 2020-11-26T05:42:47 | 316,133,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,975 | py | """msm_mobile_261109_d_15730 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "MSM-mobile-261109"
admin.site.site_title = "MSM-mobile-261109 Admin Portal"
admin.site.index_title = "MSM-mobile-261109 Admin"
# swagger
api_info = openapi.Info(
title="MSM-mobile-261109 API",
default_version="v1",
description="API documentation for MSM-mobile-261109 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] | |
ddfeb229c2eb58e3c70f6c7666511fd98cae0dd1 | 0ced37fd5631850c43319b43aa2ac48a105eeb08 | /package/json_scraper.py | 3c3aabec5aae14fa57f94f0663d4e51004aa6958 | [] | no_license | chomman/earthquake-finder | c0d9b0bd5104b10b0bd7beec5d11f58c0d22e69c | d7f4de33843e7ceed5c2113cdefbb908f11338a2 | refs/heads/master | 2020-12-28T20:41:48.404930 | 2015-03-31T15:14:24 | 2015-03-31T15:14:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | #!/usr/bin/python
## @json_scraper.py
# This file makes a request to an external webpage, and returns the json
# response content.
import requests
## scrape: scrape the content of provided url.
def scrape(url):
# request (get) given url, store json response content
r = requests.get(url)
data = r.json()
# return content
return data
| [
"[email protected]"
] | |
6fa79bddee0f2e98fef1c64301eae7417409cc46 | e474eefcc719c06aad59f8f69b01f903e3e0bbb9 | /src/cmd_tools.py | 404783908ac73bb4ef12e5d306b7b35249476c94 | [] | no_license | YiFeng0755/RepackGUI | 3ef8a46d0d31057993a1c52d2b27c2e59055af6e | 89794d804dd27e6dcff16bafb312579d63a2327c | refs/heads/master | 2020-04-28T04:44:49.062399 | 2018-03-06T04:26:00 | 2018-03-06T04:26:00 | 146,294,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,540 | py | #-*-coding:utf8 -*-
'''
Created on 2014-11-3
@author: wangdongchun
'''
import os
import re
import subprocess
import platform
import my_utils
import env
import log_utils
def dexTrans2Smali(dexFile, targetDir, baksmali = env.TOOL_DEF_BAKSMALI_JARFILE):
'''
@brief 把dex文件反编译成smali格式文件组成的内容
@param[in] dexFile dex文件路径
@param[in] targetDir smali文件存放
@param[in] baksmali反编译需要的辅助jar包
@return >0:失败; 0:成功
'''
dexFile = my_utils.getFullPath(dexFile)
baksmaliFile = my_utils.getToolPath(baksmali)
targetDir = my_utils.getFullPath(targetDir)
if not os.path.exists(targetDir):
os.mkdir(targetDir)
if not os.path.exists(dexFile) or not os.path.exists(baksmaliFile):
my_utils.printf("dexTrans2Smali: file %s or %s not exist." % (baksmali,dexFile))
return 1
cmd = '%s -jar "%s" -o "%s" "%s"' % (my_utils.getJavaPath('java'),
baksmaliFile,
targetDir,
dexFile)
print('--------xxx----' + cmd)
ret = my_utils.execFormatCmd(cmd)
if ret:
#my_utils.printf("dexTrans2Smali: execFormatCmd(%s) failure ret(%d)." % (cmd,ret))
return ret
else:
return 0
def smaliTrans2dex(smaliDir, targetFile, smalijarfile = env.TOOL_DEF_SMALI_JARFILE):
'''
@brief smali文件编译打包成dex文件
@param[in] smaliDir smali文件存放目录
@param[in] targetFile 生成的dex文件路径
@return >0:失败; 0:成功
'''
smaliDir = my_utils.getFullPath(smaliDir)
targetFile = my_utils.getFullPath(targetFile)
smaliFile = my_utils.getToolPath(smalijarfile)
if not os.path.exists(targetFile) or not os.path.exists(smaliFile):
my_utils.printf("smaliTrans2dex: %s or %s file not exist." % (targetFile,smaliFile))
return 1
cmd = '%s -jar -Xms512m -Xmx512m "%s" "%s" -o "%s"' % (my_utils.getJavaPath('java'),
smaliFile,
smaliDir,
targetFile)
ret = my_utils.execFormatCmd(cmd)
if ret:
#my_utils.printf("smaliTrans2dex: execFormatCmd(%s) failure ret(%d)." % (cmd,ret))
return ret
else:
return 0
def signApk(apkFile, keyStore, storepassword, keyalias, aliaspassword):
'''
@brief 签名apk文件
@param[in] apkFile apk文件路径
@param[in] keyStore keystore文件路径
@param[in] storepassword keystore文件路径
@param[in] keyalias keystore文件路径
@param[in] aliaspassword keystore文件路径
@return >0:失败; 0:成功
'''
apkFile = my_utils.getFullPath(apkFile)
aapt = my_utils.getToolPath('aapt')
keyStore = my_utils.getFullPath(keyStore)
if not os.path.exists(apkFile) or not os.path.exists(keyStore):
my_utils.printf("signApk: %s or %s file not exist." % (apkFile,keyStore))
return 2
listcmd = '%s list %s' % (aapt, apkFile)
listcmd = listcmd.decode('utf-8')
output = os.popen(str(listcmd)).read()
for filename in output.split('\n'):
if filename.find('META-INF') == 0:
rmcmd = '"%s" remove "%s" "%s"' % (aapt, apkFile, filename)
print(' --------xxx----' + rmcmd)
bReturn = my_utils.execFormatCmd(rmcmd)
jarsingnCmd = '"%s" -keystore "%s" -storepass "%s" -keypass "%s" "%s" "%s" -sigalg SHA1withRSA -digestalg SHA1' % \
(my_utils.getJavaPath('jarsigner'), #java的执行文件路径
keyStore,
storepassword,
aliaspassword,
apkFile,
keyalias)
print('--------xxx----' + jarsingnCmd)
ret = my_utils.execFormatCmd(str(jarsingnCmd))
if ret:
#my_utils.printf("signApk: execFormatCmd(%s) failure ret(%d)." % (jarsingnCmd,ret))
return ret
else:
return 0
def alignAPK(tempApkFile, apkFile):
'''
@brief 优化Apk二进制文件
@param[in] tempApkFile 源apk文件路径
@param[in] apkFile 目标apk文件名
@return 1:失败; 0:成功
'''
align = my_utils.getToolPath('zipalign')
if platform.system() == 'Linux':
align = 'zipalign'
if not os.path.exists(tempApkFile):
my_utils.printf("alignAPK: %s file not exist." % tempApkFile)
return 1
aligncmd = '"%s" -f 4 "%s" "%s"' % (align, tempApkFile, apkFile)
print('--------xxx----' + aligncmd)
ret = my_utils.execFormatCmd(aligncmd)
if ret:
#my_utils.printf("alignAPK: execFormatCmd(%s) failure ret=%d" % ret)
return 2
else:
return 0
def decompileApk(apkFile, targetDir, tmpPath, apkTool = env.TOOL_DEF_APKTOOL_JARFILE):
'''
@brief 反编译apk文件
@param[in] apkFile apk文件路径
@param[in] targetDir 反编译文件存放目录
@param[in] tmpPath 反编译文件apktool工具
@param[in] apkTool 反编译工具jar包
@return 1:失败; 0:成功
'''
apkFile = my_utils.getFullPath(apkFile)
targetDir = my_utils.getFullPath(targetDir)
apkTool = my_utils.getToolPath(apkTool)
if not os.path.exists(apkFile) or not os.path.exists(apkTool):
my_utils.printf("decompileApk: %s or %s file not exist." % (apkFile,apkTool))
return 1
if os.path.exists(targetDir):
my_utils.delete_file_folder(targetDir)
os.makedirs(targetDir)
# tmpPath 没有用
# cmd = '"{0}" -jar "{1}" -q d --frame-path "{2}" -b -f -o "{3}" "{4}"'.format(
cmd = '"{0}" -jar "{1}" d --frame-path "{2}" -f -o "{3}" "{4}"'.format(
my_utils.getJavaPath('java'),
apkTool,
tmpPath,
targetDir,
apkFile)
log_utils.getLogger().debug(cmd)
ret = my_utils.execFormatCmd(cmd)
if ret:
#my_utils.printf("decompileApk: execFormatCmd failure ret = %d." % ret)
return ret
else:
return 0
# 调用apktool.jar 把反编译后的目录,编译成apkFile
def recompileApk(srcFolder, apkFile, tmpPath, apkTool = env.TOOL_DEF_APKTOOL_JARFILE):
'''重新编译生成Apk文件'''
srcFolder = my_utils.getFullPath(srcFolder)
apkTool = my_utils.getToolPath(apkTool)
if not os.path.exists(apkTool) or not os.path.exists(srcFolder):
my_utils.printf("recompileApk: %s or %s file not exist." % (srcFolder,apkTool))
return 1
apkFile = my_utils.getFullPath(apkFile)
# cmd = '"{0}" -jar "{1}" -q b --frame-path "{2}" -f -o "{3}" "{4}"'.format(
# 调用apktool.jar 把反编译后的目录,编译成apkFile
cmd = '"{0}" -jar "{1}" b --frame-path "{2}" -f -o "{3}" "{4}"'.format(
my_utils.getJavaPath('java'),
apkTool,
tmpPath,
apkFile,
srcFolder)
print('--------xxx----' + cmd.decode('utf-8'))
ret = my_utils.execFormatCmd(cmd)
if ret:
#my_utils.printf("recompileApk: execFormatCmd failure ret = %d." % ret)
return 2
else:
return 0
# 1.调用aapt.exe 将工程的资源编译到R.java文件
def produceNewRFile(packName, decompileFullDir, androidManiFest = 'AndroidManifest.xml'):
'''生成R文件'''
fullPath = decompileFullDir
tempPath = os.path.dirname(decompileFullDir)
tempPath = tempPath + '/tempRFile'
if os.path.exists(tempPath):
my_utils.delete_file_folder(tempPath)
# 在跟反编译目录同目录下新建temRFile
os.makedirs(tempPath)
resPath = os.path.join(decompileFullDir, 'res')
targetResPath = os.path.join(tempPath, 'res')
# 反编译后的res目录 拷贝到 temRFile/res
my_utils.copyFiles(resPath, targetResPath)
# temRFile/gen目录
genPath = os.path.join(tempPath, 'gen')
if not os.path.exists(genPath):
os.mkdir(genPath)
androidPath = my_utils.getToolPath('android.jar')
srcManifest = os.path.join(fullPath, androidManiFest)
# 调用aapt.exe 将工程的资源编译到R.java文件
# 把 temRFile/res下的资源 编译到 temRFile/gen目录生成R.java
aaptPath = my_utils.getToolPath('aapt')
cmd = '%s p -f -m -J "%s" -S "%s" -I "%s" -M "%s"' % (aaptPath,
genPath,
targetResPath,
androidPath,
srcManifest)
print('xxxxxxxxxxxx--' + cmd)
ret = my_utils.execFormatCmd(cmd)
if ret:
#my_utils.printf("produceNewRFile: execFormatCmd(%s) failure ret=%d" % (cmd,ret))
return 1
RPath = packName.replace('.', '/')
RPath = os.path.join(genPath, RPath)
RFile = os.path.join(RPath, 'R.java')
# javac 编译 temRFile/gen/R.java文件
cmd = '"%s" -source %s -target %s -encoding UTF-8 "%s"' % \
(my_utils.getJavaPath('javac'),env.JAVAC_COMPILE_VERSION,env.JAVAC_COMPILE_VERSION,RFile)
print('--------xxx----' + cmd)
ret = my_utils.execFormatCmd(cmd)
if ret:
#my_utils.printf("produceNewRFile: execFormatCmd(%s) failure ret=%d" % (cmd,ret))
return 2
dexPath = os.path.join(tempPath, 'class.dex')
#if platform.system() == 'Windows':
#dxTool = my_utils.getToolPath('dx.bat')
#cmd = '"%s" --dex --output="%s" "%s"' % (dxTool, dexPath, genPath)
#else:
# 调dx.jar 把temRFile/gen/.class文件编译成 temRFile/class.dex文件
dxTool = my_utils.getToolPath('dx.jar')
cmd = '"%s" -jar "%s" --dex --output="%s" "%s"' % \
(my_utils.getJavaPath('java'),dxTool, dexPath, genPath)
ret = my_utils.execFormatCmd(cmd)
if ret:
#my_utils.printf("produceNewRFile: execFormatCmd(%s) failure ret=%d" % (cmd,ret))
return 3
smaliPath = os.path.join(fullPath, 'smali')
# 用baksmali.jar 把 temRFile/class.dex 转成 反编译后的/smali目录
ret = dexTrans2Smali(dexPath, smaliPath, 'baksmali-1.4.1.jar')
if ret:
return 4
else:
return 0
def getVersion(apkFile,vtype):
'''获得sdk或者apk的版本号'''
cmd = my_utils.getToolPath('aapt') + " d badging '" + apkFile + "'"
cmd = cmd.replace('\\', '/')
cmd = re.sub('/+', '/', cmd)
ret = 0
if platform.system() == 'Windows':
st = subprocess.STARTUPINFO
st.dwFlags = subprocess.STARTF_USESHOWWINDOW
st.wShowWindow = subprocess.SW_HIDE
else:
cmd = str(cmd).encode('utf-8')
s = subprocess.Popen(str(cmd), stdout=subprocess.PIPE, shell=True)
info = s.communicate()[0]
versionName = ''
if vtype == 1:#apk version
nPos = info.find('versionName')
nEnd = info.find("'", nPos + 13)
versionName = info[nPos + 13:nEnd]
elif vtype == 2:#sdk version
nPos = info.find('targetSdkVersion')
nEnd = info.find("'", nPos + 18)
versionName = info[nPos + 18:nEnd]
if versionName == '':
versionName = 'Unknown Version'
return versionName
def decodeLuaFile(filePath):
decodeToolPath = os.path.join(env.GOD_TOOL_DIR, 'BinaryEncoder', 'BinaryEncoder.exe')
decodedFilePath = filePath.replace(".lua", "_decoded.lua")
cmd = " ".join([decodeToolPath, "-d", filePath, decodedFilePath])
ret = my_utils.execFormatCmd(cmd)
return ret, decodedFilePath
def encodeLuaFile(filePath):
encodedToolPath = os.path.join(env.GOD_TOOL_DIR, 'BinaryEncoder', 'BinaryEncoder.exe')
encodedFilePath = filePath.replace(".lua", "_encoded.lua")
cmd = " ".join([encodedToolPath, "-e", filePath, encodedFilePath])
ret = my_utils.execFormatCmd(cmd)
return ret, encodedFilePath
if __name__ == '__main__':
decodeLuaFile(r'C:\Users\JonLiang\Desktop\apks\temp\gameConfig.lua'); | [
"[email protected]"
] | |
b844262993e1cf3a0e55258d539ddecd80993328 | 2ad52a65c45051f26fe26631a31f80279522ddb7 | /build/test/catkin_generated/pkg.installspace.context.pc.py | 610ea7253b985ee7cc655143bb6f00b5a64ccb4c | [] | no_license | aryamansriram/Movel_Nav | a64c32528b7ce0a5a19127ba3a9379dca0201356 | 0e5e64232a01771999d34694f3bf6840f0c1e3ee | refs/heads/master | 2023-01-03T20:35:22.041816 | 2020-10-21T13:37:11 | 2020-10-21T13:37:11 | 305,279,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "test"
PROJECT_SPACE_DIR = "/home/rosguy/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
df4a7dfe2740775f79df27bea8aabba64637d720 | 6b97da799cb9b72d711a5e1d6321e4e11f3cbe51 | /bin/iptest3 | 8f85d0e0d8a8e3a6e242b360c552cbf740df3398 | [] | no_license | dx-entity/env_parabola | 3531120d213ade533052161ec70f3a511f2fc90a | f830d5f05a578b1ed2b16f6898fb226e27de6b52 | refs/heads/master | 2021-01-09T20:22:51.509076 | 2016-07-22T06:55:49 | 2016-07-22T06:55:49 | 63,930,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/root/python_pro/env_parabola/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from IPython.testing.iptestcontroller import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
a92d9bc93ed8d9c1567fb32f622b50c221616b5e | b5937928a48340569f673e237e42f32ab62cfd15 | /src/pathCrossing/path.py | 19ce0bdef5e67249b47937b919aa1930eff3039d | [
"CC0-1.0"
] | permissive | rajitbanerjee/leetcode | 79731de57ab4b0edd765b3cbb4aac459973fb22d | 720fcdd88d371e2d6592ceec8370a6760a77bb89 | refs/heads/master | 2021-06-13T11:19:03.905797 | 2021-06-02T14:40:08 | 2021-06-02T14:40:08 | 191,103,205 | 2 | 1 | null | 2020-02-23T23:41:45 | 2019-06-10T05:34:46 | Java | UTF-8 | Python | false | false | 575 | py | class Solution:
def isPathCrossing(self, path: str) -> bool:
x, y = 0, 0
visited = {(x, y)}
for p in path:
if p == 'N':
y += 1
elif p == 'S':
y -= 1
elif p == 'E':
x += 1
else:
x -= 1
if (x, y) in visited:
return True
else:
visited.add((x, y))
return False
if __name__ == '__main__':
path = input("Input: ")
print(f"Output: {Solution().isPathCrossing(path)}")
| [
"[email protected]"
] | |
8399f0725684d5f05d0c7cdd73ca17a6c14c7062 | 403217dc6e0ea465b90d26faaa630dc30b04b396 | /tests/test_transformers.py | 47c941b50561247fa4c2c912717b5c08700f0256 | [
"Python-2.0",
"Apache-2.0"
] | permissive | fuzeman/QueryCondenser | f5708fe855c449e195d20d7db9ca5e7b0b657541 | 624d8db0077e540b4214eb44bb1def4bd659c50a | refs/heads/master | 2016-09-11T03:35:02.571079 | 2013-11-25T04:04:47 | 2013-11-25T04:04:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,422 | py | # Copyright 2013 Dean Gardiner <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from unittest import TestCase
from logr import Logr
from qcond import MergeTransformer, SliceTransformer
from qcond.helpers import itemsMatch
from qcond.transformers.base import Transformer
from qcond.transformers.merge import DNode, print_tree
class TestTransformer(TestCase):
def test_run(self):
transformer = Transformer()
self.assertRaises(NotImplementedError, transformer.run, [])
class TestMergeTransformer(TestCase):
def setUp(self):
Logr.configure(logging.DEBUG)
self.merge = MergeTransformer()
def test_apartment_23(self):
self.assertSequenceEqual(self.merge.run([
"Don't Trust the B---- in Apartment 23",
"Apartment 23",
"Apt 23",
"Don't Trust the B in Apt 23",
"Don't Trust the B- in Apt 23",
"Don't Trust the Bitch in Apartment 23",
"Don't Trust the Bitch in Apt 23",
"Dont Trust the Bitch in Apartment 23"
]), [
'dont trust the',
'dont trust the apartment 23',
'dont trust the apt 23',
'apt 23',
'apartment 23'
])
def test_legend_of_korra(self):
self.assertSequenceEqual(self.merge.run([
"The Legend of Korra",
"The Last Airbender The Legend of Korra",
"Avatar: The Legend of Korra",
"Legend of Korra",
"La Leggenda Di Korra"
]), [
'the',
'the korra',
'avatar the legend of korra',
'la leggenda di korra',
'legend of korra'
])
def test_merge_is_order_independent(self):
root_one = [
self._create_chain(['avatar', 'the', 'legend', 'of', 'korra']),
self._create_chain(['la', 'leggenda', 'di', 'korra']),
self._create_chain(['the', 'last', 'airbender', 'the', 'legend', 'of', 'korra'])
]
self._create_chain(['legend', 'of', 'korra'], root_one[-1])
root_one.append(self._create_chain(['legend', 'of', 'korra']))
result_one = self.merge.merge(root_one)
Logr.debug("-----------------------------------------------------------------")
root_two = [
self._create_chain(['the', 'legend', 'of', 'korra']),
]
self._create_chain(['last', 'airbender', 'the', 'legend', 'of', 'korra'], root_two[-1])
root_two += [
self._create_chain(['legend', 'of', 'korra']),
self._create_chain(['la', 'leggenda', 'di', 'korra']),
self._create_chain(['avatar', 'the', 'legend', 'of', 'korra'])
]
result_two = self.merge.merge(root_two)
Logr.debug("=================================================================")
assert itemsMatch(
self._get_chain_values(result_one),
self._get_chain_values(result_two)
)
def test_merge(self):
pass
def _get_chain_values(self, node_or_nodes):
if type(node_or_nodes) is list:
results = []
for node in node_or_nodes:
results += self._get_chain_values(node)
return results
node = node_or_nodes
if node.right:
return self._get_chain_values(node.right)
score, value, original_value = node.full_value()
return [value]
def _create_chain(self, words, root=None):
if not root:
root = DNode(words[0], None)
words = words[1:]
last_node = root
while len(words):
word = words.pop(0)
node = DNode(word, last_node)
last_node.right.append(node)
last_node = node
return root
class TestSliceTransformer(TestCase):
def setUp(self):
self.slice = SliceTransformer()
def test_apartment_23(self):
self.assertSequenceEqual(self.slice.run([
"Don't Trust the B---- in Apartment 23",
"Apartment 23",
"Apt 23",
"Don't Trust the B in Apt 23",
"Don't Trust the B- in Apt 23",
"Don't Trust the Bitch in Apartment 23",
"Don't Trust the Bitch in Apt 23",
"Dont Trust the Bitch in Apartment 23"
]), [
"Don't Trust the B in Apt 23",
'Dont Trust the Bitch in Apartment 23',
'Apartment 23',
'Apt 23'
])
def test_legend_of_korra(self):
self.assertSequenceEqual(self.slice.run([
"The Legend of Korra",
"The Last Airbender The Legend of Korra",
"Avatar: The Legend of Korra",
"Legend of Korra",
"La Leggenda Di Korra"
]), [
'Legend of Korra',
'La Leggenda Di Korra'
])
| [
"[email protected]"
] | |
1755ace993f4ea02065efd561ec2b726b5d17337 | 838302a39e25067fa7152c1a21574d80dbc25e94 | /routes/urls.py | 482446f2644cfefb444e61bbee0deb991c87a2b7 | [] | no_license | Vadym-Hub/route_search | 53f46b39f588bb9ee53f1f70d09f045f1d466492 | b1c0b5ac754e5e3601ab6815649eda4f50e9ae32 | refs/heads/master | 2021-09-28T01:40:57.271666 | 2020-07-12T23:03:27 | 2020-07-12T23:03:27 | 250,011,206 | 0 | 0 | null | 2021-09-22T18:57:23 | 2020-03-25T15:07:23 | Python | UTF-8 | Python | false | false | 525 | py | from django.urls import path
from .views import HomeView, RouteDetailView, RouteListView, add_route, find_routes, RouteDeleteView
apps_name = 'routes'
urlpatterns = [
path('find/', find_routes, name='find_routes'),
path('add_route/', add_route, name='add_route'),
path('list/', RouteListView.as_view(), name='list'),
path('detail/<int:pk>/', RouteDetailView.as_view(), name='detail'),
path('delete/<int:pk>/', RouteDeleteView.as_view(), name='delete'),
path('', HomeView.as_view(), name='home'),
]
| [
"[email protected]"
] | |
c64a3abd2aad3c7bedc07c54f1a3deb7689b11c4 | 685f4474699d769dae88537c69f5517ac13a8431 | /EL385..py | 4170e4cfd9fa9008ec8bd6c37d95d5916a705ace | [] | no_license | Pumafied/Project-Euler | 7466f48e449b7314598c106398c0be0424ae72d5 | 0c3e80a956893ce1881a9694131d52b156b9d3d8 | refs/heads/master | 2016-09-05T22:45:09.733696 | 2013-04-20T04:46:48 | 2013-04-20T04:46:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | # http://projecteuler.net/problem=385 | [
"[email protected]"
] | |
a72e20d9939dd2d43d0f6b798a108c4a1ceb872e | e99dfc900052272f89d55f2fd284389de2cf6a73 | /apostello/forms.py | e069a875e9ac32cb666be19e6c15e47374ee20b0 | [
"MIT"
] | permissive | armenzg/apostello | a3e6ca3d34917608af79fbab4134ee4de1f5e8ee | 1827547b5a8cf94bf1708bb4029c0b0e834416a9 | refs/heads/master | 2021-01-18T18:16:02.364837 | 2017-03-22T20:34:21 | 2017-03-22T20:34:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,984 | py | from django import forms
from django.forms import ModelMultipleChoiceField
from apostello.models import Keyword, Recipient, RecipientGroup, UserProfile
from apostello.validators import gsm_validator, less_than_sms_char_limit
class SendAdhocRecipientsForm(forms.Form):
"""Send an sms to ad-hoc groups."""
content = forms.CharField(
validators=[gsm_validator, less_than_sms_char_limit],
required=True,
min_length=1,
)
recipients = forms.ModelMultipleChoiceField(
queryset=Recipient.objects.filter(is_archived=False),
required=True,
help_text='',
widget=forms.SelectMultiple(
attrs={
"class": "ui compact search dropdown",
"multiple": "",
}
),
)
scheduled_time = forms.DateTimeField(
required=False,
help_text='Leave this blank to send your message immediately, '
'otherwise select a date and time to schedule your message',
widget=forms.TextInput(
attrs={
'data-field': 'datetime',
'readonly': True,
},
),
)
def clean(self):
"""Override clean method to check SMS cost limit."""
cleaned_data = super(SendAdhocRecipientsForm, self).clean()
if 'recipients' in cleaned_data and 'content' in cleaned_data:
# if we have no recipients, we don't need to check cost limit
Recipient.check_user_cost_limit(
cleaned_data['recipients'],
self.user.profile.message_cost_limit, cleaned_data['content']
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(SendAdhocRecipientsForm, self).__init__(*args, **kwargs)
class SendRecipientGroupForm(forms.Form):
"""Send an sms to pre-defined group."""
content = forms.CharField(
validators=[gsm_validator, less_than_sms_char_limit],
required=True,
min_length=1,
)
recipient_group = forms.ModelChoiceField(
queryset=RecipientGroup.objects.filter(is_archived=False),
required=True,
empty_label='Choose a group...',
widget=forms.Select(
attrs={
"class": "ui fluid dropdown",
"id": "id_recipient_group",
}
),
)
scheduled_time = forms.DateTimeField(
required=False,
help_text='Leave this blank to send your message immediately, '
'otherwise select a date and time to schedule your message',
widget=forms.TextInput(
attrs={
'data-field': 'datetime',
'readonly': True,
},
),
)
def clean(self):
"""Override clean method to check SMS cost limit."""
cleaned_data = super(SendRecipientGroupForm, self).clean()
if 'recipient_group' in cleaned_data and 'content' in cleaned_data:
# if we have no recipient group, we don't need to check cost limit
cleaned_data['recipient_group'].check_user_cost_limit(
self.user.profile.message_cost_limit, cleaned_data['content']
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(SendRecipientGroupForm, self).__init__(*args, **kwargs)
class ManageRecipientGroupForm(forms.ModelForm):
"""
Manage RecipientGroup updates and creation.
__init__ and save are overridden to pull in group members.
"""
class Meta:
model = RecipientGroup
exclude = ['is_archived']
class RecipientForm(forms.ModelForm):
"""Handle Recipients."""
class Meta:
model = Recipient
exclude = ['is_archived', 'is_blocking']
widgets = {
'number': forms.TextInput(attrs={'placeholder': '+447259006790'}),
'groups': forms.SelectMultiple(
attrs={
"class": "ui fluid search dropdown",
"multiple": "",
"id": "groups_dropdown",
}
),
}
class UserChoiceField(ModelMultipleChoiceField):
"""Display emails and user names when selecting users."""
def label_from_instance(self, obj):
"""Display the user's label."""
return '{0} ({1})'.format(obj.email, obj.username)
class KeywordForm(forms.ModelForm):
"""Handle Keywords."""
class Meta:
model = Keyword
exclude = ['is_archived', 'last_email_sent_time']
field_classes = {
'subscribed_to_digest': UserChoiceField,
'owners': UserChoiceField,
}
widgets = {
'keyword':
forms.TextInput(attrs={'placeholder': '(No spaces allowed)'}),
'description': forms.TextInput(
attrs={
'placeholder':
'Please provide a description of your keyword.'
}
),
'custom_response': forms.TextInput(
attrs={
'placeholder':
'eg: Thanks %name%, you have sucessfully signed up.'
}
),
'activate_time': forms.TextInput(
attrs={
'data-field': 'datetime',
'readonly': True,
},
),
'deactivate_time': forms.TextInput(
attrs={
'data-field': 'datetime',
'readonly': True,
},
),
'owners': forms.SelectMultiple(
attrs={
"class": "ui fluid search dropdown",
"multiple": "",
"id": "owners_dropdown",
}
),
'linked_groups': forms.SelectMultiple(
attrs={
"class": "ui fluid search dropdown",
"multiple": "",
"id": "linked_group_dropdown",
}
),
'subscribed_to_digest': forms.SelectMultiple(
attrs={
"class": "ui fluid search dropdown",
"multiple": "",
"id": "digest_dropdown",
}
),
}
class CsvImport(forms.Form):
"""Handle CSV imports."""
csv_data = forms.CharField(
help_text='John, Calvin, +447095237960', widget=forms.Textarea
)
class UserProfileForm(forms.ModelForm):
"""Handle User Permission Updates"""
class Meta:
model = UserProfile
exclude = ['user', ]
class GroupAllCreateForm(forms.Form):
"""Form used to create groups with all recipients.
Should only be used to create, not edit groups.
"""
group_name = forms.CharField(
help_text='Name of group.\n'
'If this group already exists it will be overwritten.',
max_length=150,
)
| [
"[email protected]"
] | |
93fa705b2aa486c2ea927afb7382f4d04a4ab1b2 | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /masterticketsplugin/branches/0.10/setup.py | 959b0c3c08eb24427c6df3a00be9187b87778476 | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from setuptools import setup
setup(
name = 'TracMasterTickets',
version = '1.0',
packages = ['mastertickets'],
package_data = { 'mastertickets': ['htdocs/*.js', 'htdocs/*.css' ] },
author = "Noah Kantrowitz",
author_email = "[email protected]",
description = "Provides support for ticket dependencies and master tickets.",
license = "BSD",
keywords = "trac plugin ticket dependencies master",
url = "http://trac-hacks.org/wiki/MasterTicketsPlugin",
classifiers = [
'Framework :: Trac',
],
install_requires = ['TracWebAdmin'],
entry_points = {
'trac.plugins': [
'mastertickets.web_ui = mastertickets.web_ui',
]
}
)
| [
"coderanger@7322e99d-02ea-0310-aa39-e9a107903beb"
] | coderanger@7322e99d-02ea-0310-aa39-e9a107903beb |
cb90179a0f2c0c6d6d9ecd0add119d15ce349b91 | cdaeb2c9bbb949b817f9139db2d18120c70f1694 | /setup.py | 3464bc1d5f33496f16de775ce95e205c38b6b79e | [
"Apache-2.0"
] | permissive | sreekanthpulagam/rakam-python-client | 665c984ac7a29b57ead6feaeb99a69ba345220e6 | 8bd843208b03726d6ce89ee343b48b889b576e0e | refs/heads/master | 2021-01-24T15:42:36.374366 | 2016-07-19T21:49:26 | 2016-07-19T21:49:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | # coding: utf-8
"""
Rakam API Documentation
An analytics platform API that lets you create your own analytics services.
OpenAPI spec version: 0.5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from setuptools import setup, find_packages
NAME = "rakam_client"
VERSION = "0.5"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Rakam API Documentation",
author_email="[email protected]",
url="",
keywords=["Swagger", "Rakam API Documentation"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
An analytics platform API that lets you create your own analytics services.
"""
)
| [
"[email protected]"
] | |
83aaae77975388840c2ec6d353f47230ec65d254 | 540c2057cb9180f563b8b097a3b369d3e346cc2c | /federatedml/protobuf/generated/mf_model_meta_pb2.py | f9376f4c31aedbd95d2bfc3b5d1b42d205ba391d | [
"Apache-2.0"
] | permissive | AustinNeverPee/FedRec | e35d282d9d80dc1312278b55112072c7fbf24d0c | 24a246239f8179c0d5facc982229d1568d05ae26 | refs/heads/master | 2022-12-25T08:07:26.470641 | 2020-08-30T12:02:46 | 2020-08-30T12:02:46 | 271,752,017 | 0 | 0 | Apache-2.0 | 2020-06-12T08:47:08 | 2020-06-12T08:47:07 | null | UTF-8 | Python | false | true | 11,192 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mf-model-meta.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mf-model-meta.proto',
package='com.webank.ai.fate.core.mlmodel.buffer.mf',
syntax='proto3',
serialized_options=_b('B\020MFModelMetaProto'),
serialized_pb=_b('\n\x13mf-model-meta.proto\x12)com.webank.ai.fate.core.mlmodel.buffer.mf\",\n\tEarlyStop\x12\x12\n\nearly_stop\x18\x01 \x01(\t\x12\x0b\n\x03\x65ps\x18\x02 \x01(\x01\",\n\tOptimizer\x12\x11\n\toptimizer\x18\x01 \x01(\t\x12\x0c\n\x04\x61rgs\x18\x02 \x01(\t\"\xb5\x02\n\rHeteroMFParam\x12\x18\n\x10secure_aggregate\x18\x01 \x01(\x08\x12\x1f\n\x17\x61ggregate_every_n_epoch\x18\x02 \x01(\x05\x12\x12\n\nbatch_size\x18\x03 \x01(\x05\x12\x10\n\x08max_iter\x18\x04 \x01(\x05\x12H\n\nearly_stop\x18\x05 \x01(\x0b\x32\x34.com.webank.ai.fate.core.mlmodel.buffer.mf.EarlyStop\x12\x0f\n\x07metrics\x18\x06 \x03(\t\x12G\n\toptimizer\x18\x07 \x01(\x0b\x32\x34.com.webank.ai.fate.core.mlmodel.buffer.mf.Optimizer\x12\x0c\n\x04loss\x18\x08 \x01(\t\x12\x11\n\tembed_dim\x18\t \x01(\x05\"o\n\x0bMFModelMeta\x12\x16\n\x0e\x61ggregate_iter\x18\x01 \x01(\x05\x12H\n\x06params\x18\x64 \x01(\x0b\x32\x38.com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParamB\x12\x42\x10MFModelMetaProtob\x06proto3')
)
_EARLYSTOP = _descriptor.Descriptor(
name='EarlyStop',
full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.EarlyStop',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='early_stop', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.EarlyStop.early_stop', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eps', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.EarlyStop.eps', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=66,
serialized_end=110,
)
_OPTIMIZER = _descriptor.Descriptor(
name='Optimizer',
full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.Optimizer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='optimizer', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.Optimizer.optimizer', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='args', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.Optimizer.args', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=112,
serialized_end=156,
)
_HETEROMFPARAM = _descriptor.Descriptor(
name='HeteroMFParam',
full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secure_aggregate', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam.secure_aggregate', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aggregate_every_n_epoch', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam.aggregate_every_n_epoch', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam.batch_size', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_iter', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam.max_iter', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='early_stop', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam.early_stop', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metrics', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam.metrics', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam.optimizer', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam.loss', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='embed_dim', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam.embed_dim', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=468,
)
_MFMODELMETA = _descriptor.Descriptor(
name='MFModelMeta',
full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.MFModelMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aggregate_iter', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.MFModelMeta.aggregate_iter', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='params', full_name='com.webank.ai.fate.core.mlmodel.buffer.mf.MFModelMeta.params', index=1,
number=100, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=470,
serialized_end=581,
)
_HETEROMFPARAM.fields_by_name['early_stop'].message_type = _EARLYSTOP
_HETEROMFPARAM.fields_by_name['optimizer'].message_type = _OPTIMIZER
_MFMODELMETA.fields_by_name['params'].message_type = _HETEROMFPARAM
DESCRIPTOR.message_types_by_name['EarlyStop'] = _EARLYSTOP
DESCRIPTOR.message_types_by_name['Optimizer'] = _OPTIMIZER
DESCRIPTOR.message_types_by_name['HeteroMFParam'] = _HETEROMFPARAM
DESCRIPTOR.message_types_by_name['MFModelMeta'] = _MFMODELMETA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EarlyStop = _reflection.GeneratedProtocolMessageType('EarlyStop', (_message.Message,), dict(
DESCRIPTOR = _EARLYSTOP,
__module__ = 'mf_model_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.mf.EarlyStop)
))
_sym_db.RegisterMessage(EarlyStop)
Optimizer = _reflection.GeneratedProtocolMessageType('Optimizer', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZER,
__module__ = 'mf_model_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.mf.Optimizer)
))
_sym_db.RegisterMessage(Optimizer)
HeteroMFParam = _reflection.GeneratedProtocolMessageType('HeteroMFParam', (_message.Message,), dict(
DESCRIPTOR = _HETEROMFPARAM,
__module__ = 'mf_model_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.mf.HeteroMFParam)
))
_sym_db.RegisterMessage(HeteroMFParam)
MFModelMeta = _reflection.GeneratedProtocolMessageType('MFModelMeta', (_message.Message,), dict(
DESCRIPTOR = _MFMODELMETA,
__module__ = 'mf_model_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.mf.MFModelMeta)
))
_sym_db.RegisterMessage(MFModelMeta)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
cb86190241829fe4dbed3dcca133c4bba33f705d | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2021_12_01_preview/operations/_tenant_level_access_review_instance_contacted_reviewers_operations.py | 48d84587e79ccee9f1e63b27a608b56c06fc182f | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 7,033 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(schedule_definition_id: str, id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}/contactedReviewers",
) # pylint: disable=line-too-long
path_format_arguments = {
"scheduleDefinitionId": _SERIALIZER.url("schedule_definition_id", schedule_definition_id, "str"),
"id": _SERIALIZER.url("id", id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class TenantLevelAccessReviewInstanceContactedReviewersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.authorization.v2021_12_01_preview.AuthorizationManagementClient`'s
:attr:`tenant_level_access_review_instance_contacted_reviewers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, schedule_definition_id: str, id: str, **kwargs: Any
) -> Iterable["_models.AccessReviewContactedReviewer"]:
"""Get access review instance contacted reviewers.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param id: The id of the access review instance. Required.
:type id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccessReviewContactedReviewer or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.authorization.v2021_12_01_preview.models.AccessReviewContactedReviewer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-12-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.AccessReviewContactedReviewerListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
schedule_definition_id=schedule_definition_id,
id=id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("AccessReviewContactedReviewerListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}/contactedReviewers"} # type: ignore
| [
"[email protected]"
] | |
24983dba27a4c3513d731d7b06bc5dccdeee9d43 | 7dba60ae27ff247705607839348f017b85f5da16 | /nyumbax/migrations/0002_auto_20210411_0803.py | 2ec99f45ae6bd3b4e3db2f3f8d33a24f6ac451aa | [
"MIT"
] | permissive | BwanaQ/nyumba-kumi | 7edccb6745ede6d9f6faf5bd8c0dcf6e24726991 | c264b0941c77a4d7175a2dc5380723bea1acf380 | refs/heads/master | 2023-04-05T09:32:34.867456 | 2021-04-13T15:54:16 | 2021-04-13T15:54:16 | 356,136,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,845 | py | # Generated by Django 3.2 on 2021-04-11 08:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('nyumbax', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Essential',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('officer', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
],
),
migrations.RemoveField(
model_name='hood',
name='admin',
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('body', models.TextField()),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Business',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
1732ea51eb17a5805ad684399acaf1b1dd263503 | e34ba843cf682892462aec8b477d4a708968286d | /examples/reinforce/play_train_eval.py | 3940862cc92ca0638f291c3fc9e4d0b1fd396006 | [] | no_license | mecha2k/mygo | e088e4abff292aa225dd22655ef9032cd89ddabc | db77aeade0ef25b9cd8d0097aff7dd7cc7d78ef6 | refs/heads/master | 2023-01-21T21:37:57.930762 | 2020-11-26T14:02:33 | 2020-11-26T14:02:33 | 303,343,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,461 | py | import argparse
import datetime
import multiprocessing
import os
import random
import shutil
import time
import tempfile
from collections import namedtuple
from dotenv import load_dotenv
import h5py
import numpy as np
from dlgo import agent
from dlgo import kerasutil
from dlgo import scoring
from dlgo import reinforce
from dlgo.goboard_fast import GameState, Player, Point
COLS = "ABCDEFGHJKLMNOPQRST"
STONE_TO_CHAR = {
None: ".",
Player.black: "x",
Player.white: "o",
}
def avg(items):
if not items:
return 0.0
return sum(items) / float(len(items))
def print_board(board):
for row in range(board.num_rows, 0, -1):
line = []
for col in range(1, board.num_cols + 1):
stone = board.get(Point(row=row, col=col))
line.append(STONE_TO_CHAR[stone])
print("%2d %s" % (row, "".join(line)))
print(" " + COLS[: board.num_cols])
class GameRecord(namedtuple("GameRecord", "moves winner margin")):
pass
def name(player):
if player == Player.black:
return "B"
return "W"
def simulate_game(black_player, white_player, board_size):
moves = []
game = GameState.new_game(board_size)
agents = {
Player.black: black_player,
Player.white: white_player,
}
while not game.is_over():
next_move = agents[game.next_player].select_move(game)
moves.append(next_move)
game = game.apply_move(next_move)
print_board(game.board)
game_result = scoring.compute_game_result(game)
print(game_result)
return GameRecord(
moves=moves,
winner=game_result.winner,
margin=game_result.winning_margin,
)
def get_temp_file():
fd, fname = tempfile.mkstemp(prefix="dlgo-train")
os.close(fd)
return fname
def do_self_play(
board_size,
agent1_filename,
agent2_filename,
num_games,
temperature,
experience_filename,
gpu_frac,
):
kerasutil.set_gpu_memory_target(gpu_frac)
random.seed(int(time.time()) + os.getpid())
np.random.seed(int(time.time()) + os.getpid())
with h5py.File(agent1_filename, "r") as agent1f:
agent1 = agent.load_policy_agent(agent1f)
agent1.set_temperature(temperature)
with h5py.File(agent2_filename, "r") as agent2f:
agent2 = agent.load_policy_agent(agent2f)
collector1 = reinforce.ExperienceCollector()
color1 = Player.black
for i in range(num_games):
print("Simulating game %d/%d..." % (i + 1, num_games))
collector1.begin_episode()
agent1.set_collector(collector1)
if color1 == Player.black:
black_player, white_player = agent1, agent2
else:
white_player, black_player = agent1, agent2
game_record = simulate_game(black_player, white_player, board_size)
if game_record.winner == color1:
print("Agent 1 wins.")
collector1.complete_episode(reward=1)
else:
print("Agent 2 wins.")
collector1.complete_episode(reward=-1)
color1 = color1.other
experience = reinforce.combine_experience([collector1])
print("Saving experience buffer to %s\n" % experience_filename)
with h5py.File(experience_filename, "w") as experience_outf:
experience.serialize(experience_outf)
def generate_experience(learning_agent, reference_agent, exp_file, num_games, board_size, num_workers, temperature):
experience_files = []
workers = []
gpu_frac = 0.95 / float(num_workers)
games_per_worker = num_games // num_workers
for i in range(num_workers):
filename = get_temp_file()
experience_files.append(filename)
worker = multiprocessing.Process(
target=do_self_play,
args=(
board_size,
learning_agent,
reference_agent,
games_per_worker,
temperature,
filename,
gpu_frac,
),
)
worker.start()
workers.append(worker)
# Wait for all workers to finish.
print("Waiting for workers...")
for worker in workers:
worker.join()
# Merge experience buffers.
print("Merging experience buffers...")
first_filename = experience_files[0]
other_filenames = experience_files[1:]
with h5py.File(first_filename, "r") as expf:
combined_buffer = reinforce.load_experience(expf)
for filename in other_filenames:
with h5py.File(filename, "r") as expf:
next_buffer = reinforce.load_experience(expf)
combined_buffer = reinforce.combine_experience([combined_buffer, next_buffer])
print("Saving into %s..." % exp_file)
with h5py.File(exp_file, "w") as experience_outf:
combined_buffer.serialize(experience_outf)
# Clean up.
for fname in experience_files:
os.unlink(fname)
def train_worker(learning_agent, output_file, experience_file, lr, batch_size):
with h5py.File(learning_agent, "r") as learning_agentf:
learning_agent = agent.load_policy_agent(learning_agentf)
with h5py.File(experience_file, "r") as expf:
exp_buffer = reinforce.load_experience(expf)
learning_agent.train(exp_buffer, lr=lr, batch_size=batch_size)
with h5py.File(output_file, "w") as updated_agent_outf:
learning_agent.serialize(updated_agent_outf)
def train_on_experience(learning_agent, output_file, experience_file, lr, batch_size):
# Do the training in the background process. Otherwise some Keras
# stuff gets initialized in the parent, and later that forks, and
# that messes with the workers.
worker = multiprocessing.Process(
target=train_worker, args=[learning_agent, output_file, experience_file, lr, batch_size]
)
worker.start()
worker.join()
def play_games(args):
agent1_fname, agent2_fname, num_games, board_size, gpu_frac = args
kerasutil.set_gpu_memory_target(gpu_frac)
random.seed(int(time.time()) + os.getpid())
np.random.seed(int(time.time()) + os.getpid())
with h5py.File(agent1_fname, "r") as agent1f:
agent1 = agent.load_policy_agent(agent1f)
with h5py.File(agent2_fname, "r") as agent2f:
agent2 = agent.load_policy_agent(agent2f)
wins, losses = 0, 0
color1 = Player.black
for i in range(num_games):
print("Simulating game %d/%d..." % (i + 1, num_games))
if color1 == Player.black:
black_player, white_player = agent1, agent2
else:
white_player, black_player = agent1, agent2
game_record = simulate_game(black_player, white_player, board_size)
if game_record.winner == color1:
print("Agent 1 wins")
wins += 1
else:
print("Agent 2 wins")
losses += 1
print("Agent 1 record: %d/%d" % (wins, wins + losses))
color1 = color1.other
return wins, losses
def evaluate(learning_agent, reference_agent, num_games, num_workers, board_size):
games_per_worker = num_games // num_workers
gpu_frac = 0.95 / float(num_workers)
pool = multiprocessing.Pool(num_workers)
worker_args = [
(
learning_agent,
reference_agent,
games_per_worker,
board_size,
gpu_frac,
)
for _ in range(num_workers)
]
game_results = pool.map(play_games, worker_args)
total_wins, total_losses = 0, 0
for wins, losses in game_results:
total_wins += wins
total_losses += losses
print("FINAL RESULTS:")
print("Learner: %d" % total_wins)
print("Refrnce: %d" % total_losses)
pool.close()
pool.join()
return total_wins
def main():
load_dotenv(verbose=True)
DATA_DIR = os.getenv("DATA_DIR")
AGENT_DIR = os.getenv("AGENT_DIR")
agent_file = AGENT_DIR + "/my_deep_bot.h5"
log_file = DATA_DIR + "/reinforce/play_train.log"
parser = argparse.ArgumentParser()
parser.add_argument("--agent", default=agent_file)
parser.add_argument("--games-per-batch", "-g", type=int, default=2)
parser.add_argument("--work-dir", "-d", default=DATA_DIR + "/reinforce")
parser.add_argument("--num-workers", "-w", type=int, default=1)
parser.add_argument("--temperature", "-t", type=float, default=0.05)
parser.add_argument("--board-size", "-b", type=int, default=19)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--bs", type=int, default=512)
parser.add_argument("--log-file", "-l", default=log_file)
args = parser.parse_args()
logf = open(args.log_file, "a")
logf.write("----------------------\n")
logf.write("Starting from %s at %s\n" % (args.agent, datetime.datetime.now()))
learning_agent = args.agent
reference_agent = args.agent
experience_file = os.path.join(args.work_dir, "exp_temp.hdf5")
tmp_agent = os.path.join(args.work_dir, "agent_temp.hdf5")
working_agent = os.path.join(args.work_dir, "agent_cur.hdf5")
total_games = 0
while True:
print("Reference: %s" % (reference_agent,))
logf.write("Total games so far %d\n" % (total_games,))
generate_experience(
learning_agent,
reference_agent,
experience_file,
num_games=args.games_per_batch,
board_size=args.board_size,
num_workers=args.num_workers,
temperature=args.temperature,
)
train_on_experience(learning_agent, tmp_agent, experience_file, lr=args.lr, batch_size=args.bs)
total_games += args.games_per_batch
wins = evaluate(
learning_agent,
reference_agent,
num_games=480,
num_workers=args.num_workers,
board_size=args.board_size,
)
print("Won %d / 480 games (%.3f)" % (wins, float(wins) / 480.0))
logf.write("Won %d / 480 games (%.3f)\n" % (wins, float(wins) / 480.0))
shutil.copy(tmp_agent, working_agent)
learning_agent = working_agent
if wins >= 262:
next_filename = os.path.join(args.work_dir, "agent_%08d.hdf5" % (total_games,))
shutil.move(tmp_agent, next_filename)
reference_agent = next_filename
logf.write("New reference is %s\n" % next_filename)
else:
print("Keep learning\n")
logf.flush()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d0074cf34faa97e409587457b3a55e2e27a680b9 | e4e1223e28a47bee5c47f3b08e05276996bb3694 | /example/context.py | 101cbd0c816dc15c5e05434adbb53d99e805564c | [
"BSD-3-Clause"
] | permissive | ecarreras/subcmd | 891bc108ea713a2e2f78dfde9a2e7f2661f3c847 | ee0475b82da7125909c6a6828eee115d20e6193c | refs/heads/master | 2020-12-25T04:18:09.850340 | 2012-08-28T09:09:52 | 2012-08-28T09:09:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | #
## BEGIN LICENSE BLOCK
#
# Copyright (c) <2012>, Raul Perez <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
## END LICENSE BLOCK
#
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
| [
"[email protected]"
] | |
e7ed20584cd2a3e9120547bf256c89a97295ec0d | 1a949f20cafe328c5ad145659903e8dc5d974a76 | /subjects/admin.py | 70f7ea47f0a4e6dd08a1ec643b007f0d733a30c9 | [] | no_license | Fabricourt/plotx | 7154be9153ab532796a16a1de3125276913fca97 | b2a526d4a9236217978a48a997b3b425cd40c0a9 | refs/heads/master | 2022-12-11T18:57:36.631087 | 2020-07-07T17:22:50 | 2020-07-07T17:22:50 | 230,000,109 | 0 | 1 | null | 2022-12-08T03:27:54 | 2019-12-24T20:25:39 | JavaScript | UTF-8 | Python | false | false | 458 | py | from django.contrib import admin
from .models import *
class SubjectAdmin(admin.ModelAdmin):
list_display = ('subject_name', 'created_by', 'is_published', )
list_display_links = ('subject_name',)
list_filter = ('subject_name', 'created_by',)
list_editable = ('is_published',)
search_fields = ('subject_name', 'created_by', )
prepopulated_fields = {"slug": ('subject_name',)}
list_per_page = 25
admin.site.register(Subject, SubjectAdmin) | [
"[email protected]"
] | |
397e0436029206c88d0e89238ad11c9b50d2c719 | 80e83dd69395312db092f7b0277310a29afb95b6 | /untitled1/doc-To-Excel/ResolveDocx_JieYing-AddValue.py | d703a0525d96c3bd701c4fe5765107cd7388ac85 | [] | no_license | yif-zhu/Python-Project | 3a102695a7eab2e149e260ccee955de84685b6cb | d55edb652d66e6694a120eb329cd04abba57ba1e | refs/heads/master | 2023-01-21T00:59:27.743845 | 2020-12-04T08:42:56 | 2020-12-04T08:42:56 | 299,492,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,901 | py | # _*_ coding:utf-8 _*_
import os
import os.path
import sys
import re
import xml.etree.ElementTree as XETree
import xml.etree.ElementTree as ET
from docx import Document
from openpyxl import load_workbook
cdfp = None
cwb = None
clws = None
DATANOTFOUND = 0
writeLog = 0
def formatNum(num):
num=str(num)
pattern=r'(\d+)(\d{3})((,\d{3})*)'
while True:
num,count=re.subn(pattern,r'\1,\2\3',num)
if count==0:
break
return num
def arearMapSupInfoToExcelName(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode, sNode):
sheet = cwb[dSheet]
sList = sCols.split(',')
dList = dCols.split(',')
dcolsNames = sNode.attrib['colsNames']
isReplace = 'false'
if 'isReplace' in sNode.attrib:
isReplace = sNode.attrib['isReplace']
dNameList = dcolsNames.split(',')
dRowIncress = 0
sRowNum = 0
itemName = 1
if 'itemName' in sNode.attrib:
itemName = int(sNode.attrib['itemName'])
if dBeginRow =='':
dBeginRow = 3
dValue = str(sheet[dList[0] + str(dBeginRow)].value)
while dValue !='None':
dBeginRow += 1
dValue = str(sheet[dList[0] + str(dBeginRow)].value)
if isReplace == 'true':
while sBeginRow <= sEndRow:
for sCellI in range(len(sList)):
sCol = sList[sCellI]
if sCol == 'space':
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = dNameList[sCellI]
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = ' '
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
elif sCol == 'spaceValue':
scolsValue = sNode.attrib['colsValue'].split(',')
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = dNameList[sCellI]
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = scolsValue[sCellI]
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
else:
isfind = 0
value = table.rows[sBeginRow].cells[int(sCol)].text
if 'dateFormat' in sNode.attrib:
dateFormat = sNode.attrib['dateFormat']
if dateFormat == 'true':
dateCol = sNode.attrib['dateCol']
if dateCol == sCol:
day = sNode.attrib['day']
if value.find('年') >= 0:
value = value.replace('年', '/').replace('月', '/').replace('日', '')
value = value + day
for rCell in sNode:
cTag = rCell.tag
if cTag == ('A'+str(sRowNum)):
for rsCell in rCell:
crTag = rsCell.tag
if crTag == ('B'+str(sCol)):
isfind = 1
crUsing = rsCell.attrib['using']
if crUsing == 'replace':
value = rsCell.attrib['value']
elif crUsing == 'date-day':
day = rsCell.attrib['value']
value = value.replace('年', '/').replace('月', '/')
value = value + day
elif crUsing == 'sum':
rsRowList = rsCell.attrib['cols'].split(',')
value = float(0.00)
for rsRow in range(len(rsRowList)):
value += float(table.rows[sBeginRow].cells[int(rsRowList[rsRow])].text.replace(',', ''))
value = ('%.2f' % value)
value = formatNum(value)
break
if isfind == 1:
break
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = dNameList[sCellI]
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = str(value)
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
sBeginRow += 1
sRowNum += 1
itemName += 1
else:
while sBeginRow <= sEndRow:
for sCellI in range(len(sList)):
sCol = sList[sCellI]
if sCol == 'space':
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = dNameList[sCellI]
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = ' '
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
elif sCol == 'spaceValue':
scolsValue = sNode.attrib['colsValue'].split(',')
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = dNameList[sCellI]
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = scolsValue[sCellI]
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
else:
value = table.rows[sBeginRow].cells[int(sCol)].text
if 'dateFormat' in sNode.attrib:
dateFormat = sNode.attrib['dateFormat']
if dateFormat == 'true':
dateCol = sNode.attrib['dateCol']
if dateCol == sCol:
day = sNode.attrib['day']
if value.find('年') >= 0:
value = value.replace('年', '/').replace('月', '/').replace('日', '')
value = value + day
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = dNameList[sCellI]
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = value
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
sBeginRow += 1
sRowNum += 1
itemName += 1
def arearMapSupInfoToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode, sNode):
sheet = cwb[dSheet]
sList = sCols.split(',')
dList = dCols.split(',')
isReplace = 'false'
if 'isReplace' in sNode.attrib:
isReplace = sNode.attrib['isReplace']
dRowIncress = 0
sRowNum = 0
itemName = 1
if dBeginRow =='':
dBeginRow = 3
dValue = str(sheet[dList[0] + str(dBeginRow)].value)
while dValue !='None':
dBeginRow += 1
dValue = str(sheet[dList[0] + str(dBeginRow)].value)
if isReplace == 'true':
while sBeginRow <= sEndRow:
for sCellI in range(len(sList)):
sCol = sList[sCellI]
isfind = 0
value = table.rows[sBeginRow].cells[int(sCol)].text
if 'dateFormat' in sNode.attrib:
dateFormat = sNode.attrib['dateFormat']
if dateFormat =='true':
dateCol = sNode.attrib['dateCol']
if dateCol == sCol:
day = sNode.attrib['day']
if value.find('年') >= 0:
value = value.replace('年', '/').replace('月', '/').replace('日', '')
value = value + day
for rCell in sNode:
cTag = rCell.tag
if cTag == ('A' + str(sRowNum)):
for rsCell in rCell:
crTag = rsCell.tag
if crTag == ('B' + str(sCol)):
isfind = 1
crUsing = rsCell.attrib['using']
if crUsing == 'replace':
value = rsCell.attrib['value']
elif crUsing =='date-day':
day = rsCell.attrib['value']
value = value.replace('年', '/').replace('月', '/')
value = value + day
elif crUsing == 'sum':
rsRowList = rsCell.attrib['cols'].split(',')
value = float(0.00)
for rsRow in range(len(rsRowList)):
value += float(table.rows[sBeginRow].cells[int(rsRowList[rsRow])].text.replace(',', ''))
value = ('%.2f' % value)
value = formatNum(value)
break
if isfind == 1:
break
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = 'ItemCode' + str(sCellI)
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = str(value)
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
sBeginRow += 1
sRowNum += 1
itemName += 1
else:
while sBeginRow <= sEndRow:
for sCellI in range(len(sList)):
sCol = sList[sCellI]
value = table.rows[sBeginRow].cells[int(sCol)].text
if 'dateFormat' in sNode.attrib:
dateFormat = sNode.attrib['dateFormat']
if dateFormat =='true':
dateCol = sNode.attrib['dateCol']
if dateCol == sCol:
day = sNode.attrib['day']
if value.find('年') >= 0:
value = value.replace('年', '/').replace('月', '/').replace('日', '')
value = value + day
dRow = dBeginRow + dRowIncress
sheet[dList[0] + str(dRow)] = 'ItemCode'+ str(sCellI)
sheet[dList[1] + str(dRow)] = itemName
sheet[dList[2] + str(dRow)] = value
sheet[dList[3] + str(dRow)] = reportType
sheet[dList[4] + str(dRow)] = dataSource
sheet[dList[5] + str(dRow)] = tableCode
dRowIncress += 1
sBeginRow += 1
sRowNum +=1
itemName += 1
def arearMapExtractDataToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, sNode):
sheet = cwb[dSheet]
sList = sCols.split(',')
dList = dCols.split(',')
isReplace = 'false'
if 'isReplace' in sNode.attrib:
isReplace = sNode.attrib['isReplace']
sRowNum = 0
dRowIncress = 0
if isReplace == 'true':
while sBeginRow <= sEndRow:
for sCellI in range(len(sList)):
sCol = sList[sCellI]
value = table.rows[sBeginRow].cells[int(sCol)].text
if 'dateFormat' in sNode.attrib:
dateFormat = sNode.attrib['dateFormat']
if dateFormat =='true':
dateCol = sNode.attrib['dateCol']
if dateCol == sCol:
day = sNode.attrib['day']
if value.find('年') >= 0:
value =value.replace('年', '/').replace('月', '/').replace('日', '')
value = value + day
isfind = 0
for rCell in sNode:
cTag = rCell.tag
if cTag == ('A' + str(sRowNum)):
for rsCell in rCell:
crTag = rsCell.tag
if crTag == ('B' + str(sCol)):
isfind = 1
crUsing = rsCell.attrib['using']
if crUsing =='replace':
value = rsCell.attrib['value']
elif crUsing =='date-day':
day = rsCell.attrib['value']
value = value.replace('年', '/').replace('月', '/')
value = value + day
elif crUsing == 'sum':
rsRowList = rsCell.attrib['cols'].split(',')
value = float(0.00)
for rsRow in range(len(rsRowList)):
value += float(table.rows[sBeginRow].cells[int(rsRowList[rsRow])].text.replace(',', ''))
value = ('%.2f' % value)
value = formatNum(value)
break
if isfind == 1:
break
dRow = dBeginRow + dRowIncress
if dSheet == '资产统计信息':
sheet['A' + str(dRow)] = 'ItemCode'+ str(dRowIncress)
if value != '':
sheet[dList[sCellI] + str(dRow)] = str(value)
dRowIncress += 1
sBeginRow += 1
sRowNum += 1
else:
while sBeginRow <= sEndRow:
for sCellI in range(len(sList)):
sCol = sList[sCellI]
value = table.rows[sBeginRow].cells[int(sCol)].text
if 'dateFormat' in sNode.attrib:
dateFormat = sNode.attrib['dateFormat']
if dateFormat =='true':
dateCol = sNode.attrib['dateCol']
if dateCol == sCol:
day = sNode.attrib['day']
if value.find('年')>=0:
value =value.replace('年', '/').replace('月', '/').replace('日', '')
value = value + day
dRow = dBeginRow + dRowIncress
if dSheet == '资产统计信息':
sheet['A' + str(dRow)] = 'ItemCode'+ str(dRowIncress)
if value != '':
sheet[dList[sCellI] + str(dRow)] = value
dRowIncress += 1
sBeginRow += 1
sRowNum += 1
def cellMapExtractDataToExcel(table, dNode, dSheet):
sheet = cwb[dSheet]
for cell in dNode:
cUsing = cell.attrib['using']
cTag = cell.tag
cText = cell.text
if cUsing == 'replace':
r = int(cText.split(',')[0])
c = int(cText.split(',')[1])
v = table.rows[r].cells[c].text.strip()
if v != '':
sheet[cTag] = v
elif cUsing == 'sum':
dcs = cText.split(';')
sumv = 0
for i in range(len(dcs)):
r = int(dcs[i].split(',')[0])
c = int(dcs[i].split(',')[1])
v = table.rows[r].cells[c].text.strip()
v = v.replace(',', '').replace('-', '')
if v != '':
sumv += float(v)
sheet[cTag] = "{0:.2f}".format(sumv)
def arearMapExtract(table, cfgItem, itemIndex):
global DATANOTFOUND
itemDesc = cfgItem.attrib['desc']
sNode = cfgItem.find('source')
dNode = cfgItem.find('dest')
sAnchor = sNode.attrib['anchor'].strip()
sSkipRows = int(sNode.attrib['skiprows']) if 'skiprows' in sNode.attrib else 0
sAnchorEnd = sNode.attrib['anchorend'].strip()
dLimit = int(dNode.attrib['limited']) if 'limited' in dNode.attrib else 0
sAnchorEndArr = sAnchorEnd.split('$')
sBeginRow = -1
sEndRow = -1
for rIndex, row in enumerate(table.rows):
firstCellText = row.cells[0].text.strip()
if firstCellText == '':
continue
if sBeginRow == -1 and (firstCellText.startswith(sAnchor) or firstCellText.endswith(sAnchor)):
sBeginRow = rIndex + sSkipRows + 1;
elif sBeginRow != -1 and sAnchorEnd != '' and (
(sAnchorEnd.find('$') == -1 and firstCellText.startswith(sAnchorEnd)) or (
sAnchorEnd.find('$') != -1 and firstCellText in sAnchorEndArr)):
sEndRow = rIndex if dLimit == 0 or rIndex + 1 - sBeginRow <= dLimit else sBeginRow + dLimit - 1
break
if sBeginRow != -1 and sEndRow == -1:
rowsCount = len(table.rows)
if dLimit == 0 and sAnchorEnd == '':
sEndRow = rowsCount - 1
break
if dLimit != 0 and sAnchorEnd == '':
sEndRow = sBeginRow + dLimit if sBeginRow + dLimit <= rowsCount - 1 else rowsCount - 1
break
if dLimit != 0 and sAnchorEnd != '' and rIndex - sBeginRow == dLimit - 1:
sEndRow = rIndex
break
if sBeginRow != -1 and sEndRow != -1:
sCols = sNode.attrib['cols']
dCols = dNode.attrib['cols']
dSheet = dNode.attrib['sheet']
dBeginRow = dNode.attrib['beginrow']
if dBeginRow != '':
dBeginRow = int(dBeginRow)
writeSheetLog('{0} 提取: 【{1}】'.format(itemIndex + 1, itemDesc))
writeSheetLog(
'--------源表格起始行:{0},源表格结束行:{1},目标Sheet[{3}]开始行:{2}'.format(sBeginRow, sEndRow, dBeginRow, dSheet))
if 'type' in cfgItem.attrib:
reportType = dNode.attrib['ReportType']
dataSource = dNode.attrib['DataSource']
tableCode = dNode.attrib['TableCode']
if 'colsNames' in sNode.attrib:
arearMapSupInfoToExcelName(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode, sNode)
else:
arearMapSupInfoToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode)
else:
arearMapExtractDataToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, sNode)
writeSheetLog('--------【{0}】数据已提取完成'.format(itemDesc))
if writeLog == 0:
cwb.save(cdfp)
if sBeginRow == -1 and sEndRow == -1:
DATANOTFOUND += 1
writeSheetLog('{1} 【{0}】数据未找到,请检查源文件和配置文件'.format(itemDesc, itemIndex))
def arearMapExtractTable(tables, cfgItem, itemIndex):
global DATANOTFOUND
itemDesc = cfgItem.attrib['desc']
sNode = cfgItem.find('source')
dNode = cfgItem.find('dest')
sAnchor = sNode.attrib['anchor'].strip()
sSkipRows = int(sNode.attrib['skiprows']) if 'skiprows' in sNode.attrib else 0
sAnchorEnd = sNode.attrib['anchorend'].strip()
dLimit = int(dNode.attrib['limited']) if 'limited' in dNode.attrib else 0
sAnchorEndArr = sAnchorEnd.split('$')
sBeginRow = -1
sEndRow = -1
index = int(sNode.attrib['index'].strip())
for tbIndex, table in enumerate(tables):
if tbIndex >= index or index == -1:
for rIndex, row in enumerate(table.rows):
firstCellText = row.cells[0].text.strip()
if firstCellText == '':
continue
if sBeginRow == -1 and (firstCellText.startswith(sAnchor) or firstCellText.endswith(sAnchor)):
sBeginRow = rIndex + sSkipRows + 1;
elif sBeginRow != -1 and sAnchorEnd != '' and (
(sAnchorEnd.find('$') == -1 and firstCellText.startswith(sAnchorEnd)) or (
sAnchorEnd.find('$') != -1 and firstCellText in sAnchorEndArr)):
sEndRow = rIndex if dLimit == 0 or rIndex + 1 - sBeginRow <= dLimit else sBeginRow + dLimit - 1
break
if sBeginRow != -1 and sEndRow == -1:
rowsCount = len(table.rows)
if dLimit == 0 and sAnchorEnd == '':
sEndRow = rowsCount - 1
break
if dLimit != 0 and sAnchorEnd == '':
sEndRow = sBeginRow + dLimit if sBeginRow + dLimit <= rowsCount - 1 else rowsCount - 1
break
if dLimit != 0 and sAnchorEnd != '' and rIndex - sBeginRow == dLimit - 1:
sEndRow = rIndex
break
if sBeginRow != -1 and sEndRow != -1:
sCols = sNode.attrib['cols']
dCols = dNode.attrib['cols']
dSheet = dNode.attrib['sheet']
dBeginRow = dNode.attrib['beginrow']
if dBeginRow != '':
dBeginRow = int(dBeginRow)
writeSheetLog('{0} 提取: 【{1}】'.format(itemIndex + 1, itemDesc))
writeSheetLog(
'--------源表格起始行:{0},源表格结束行:{1},目标Sheet[{3}]开始行:{2}'.format(sBeginRow, sEndRow, dBeginRow, dSheet))
if 'type' in cfgItem.attrib:
reportType = dNode.attrib['ReportType']
dataSource = dNode.attrib['DataSource']
tableCode = dNode.attrib['TableCode']
if 'colsNames' in sNode.attrib:
arearMapSupInfoToExcelName(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode, sNode)
else:
arearMapSupInfoToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, reportType, dataSource, tableCode, sNode)
else:
arearMapExtractDataToExcel(table, sBeginRow, sEndRow, sCols, dSheet, dBeginRow, dCols, sNode)
writeSheetLog('--------【{0}】数据已提取完成'.format(itemDesc))
if writeLog == 0:
cwb.save(cdfp)
break
if sBeginRow == -1 and sEndRow == -1:
DATANOTFOUND += 1
writeSheetLog('{1} 【{0}】数据未找到,请检查源文件和配置文件'.format(itemDesc, itemIndex))
def cellMapExtract(tables, cfgItem, itemIndex):
global DATANOTFOUND
itemDesc = cfgItem.attrib['desc']
sNode = cfgItem.find('source')
dNode = cfgItem.find('dest')
foundTable = 0
sAnchor = sNode.attrib['anchor'].strip()
index = int(sNode.attrib['index'].strip())
for tbIndex, table in enumerate(tables):
if tbIndex >= index or index == -1:
for rIndex, row in enumerate(table.rows):
firstCellText = row.cells[0].text.strip()
if firstCellText == '' or firstCellText != sAnchor:
continue
if firstCellText == sAnchor:
foundTable = 1
break
if foundTable == 1:
dSheet = dNode.attrib['sheet']
writeSheetLog('{0} 提取: 【{1}】'.format(itemIndex + 1, itemDesc))
writeSheetLog('--------开始表格映射映射数据提取')
cellMapExtractDataToExcel(table, dNode, dSheet)
writeSheetLog('--------【{0}】数据已提取完成'.format(itemDesc))
if writeLog == 0:
cwb.save(cdfp)
break
if foundTable == 0:
DATANOTFOUND += 1
writeSheetLog('\033[1;31m {1} 【{0}】数据未找到,请检查源文件和配置文件 \033[0m!'.format(itemDesc, itemIndex + 1))
def extractDocFile(cfgItems, sourceFilePath):
doc = Document(sourceFilePath)
tables = doc.tables
for i in range(len(cfgItems)):
cfgItem = cfgItems[i]
if 'useTableName' not in cfgItem.attrib:
if 'type' in cfgItem.attrib:
itemType = cfgItem.attrib['type']
if itemType == 'cellmap':
cellMapExtract(tables, cfgItem, i)
elif itemType == 'supInfo':
arearMapExtractTable(tables, cfgItem, i)
else:
arearMapExtractTable(tables, cfgItem, i)
else:
if 'type' in cfgItem.attrib:
itemType = cfgItem.attrib['type']
if itemType == 'cellmap':
findTableName(tables, cfgItem, i, 1)
elif itemType == 'supInfo':
findTableName(tables, cfgItem, i, 0)
else:
findTableName(tables, cfgItem, i, 0)
def writeSheetLog(info):
if writeLog == 1 and clws is not None:
clws['A' + str(clws.max_row + 1)] = info
cwb.save(cdfp)
def findTableName(tables, cfgItem, index, typeId):
sNode = cfgItem.find('source')
dNode = cfgItem.find('dest')
itemDesc = cfgItem.attrib['desc']
tableName = sNode.attrib['tableName'].strip()
row_xml = []
for tbIndex, table in enumerate(tables):
xml = table._tblPr.xml
root_elem = ET.fromstring(xml)
for ch in root_elem:
key = ch.tag.split('}')[1]
if key == 'tblCaption':
titleName = str(list(ch.attrib.values())).split('\'')[1]
if titleName == tableName:
if typeId == 1:
dSheet = dNode.attrib['sheet']
writeSheetLog('{0} 提取: 【{1}】'.format(tbIndex + 1, itemDesc))
writeSheetLog('--------开始表格映射映射数据提取')
cellMapExtractDataToExcel(table, dNode, dSheet)
writeSheetLog('--------【{0}】数据已提取完成'.format(itemDesc))
if writeLog == 0:
cwb.save(cdfp)
break
elif typeId == 0:
arearMapExtract(table, cfgItem, index)
for rIndex, row in enumerate(table.rows):
for cell in row.cells:
if len(cell.tables) > 0:
cIndex = 0
isTrue = 0
while cIndex < len(cell.tables):
rXml = cell.tables[cIndex]._tblPr.xml
cIndex += 1
if rXml not in row_xml:
isTrue = 1
row_xml.append(rXml)
continue
if isTrue == 1:
findTableName(cell.tables, cfgItem,index, typeId)
def main():
global DATANOTFOUND
global cdfp
global clws
global cwb
global writeLog
reload(sys)
sys.setdefaultencoding('utf-8')
sourceFilePath = sys.argv[1]
destFileName = sys.argv[3]
configFilePath = sys.argv[2]
mappingTree = XETree.parse(configFilePath)
cfgRoot = mappingTree.getroot()
destFolder = cfgRoot.attrib['destfolder']
templateFilePath = cfgRoot.attrib['template']
writeLog = int(cfgRoot.attrib['writelog']) if 'writelog' in cfgRoot.attrib else 0
cdfp = os.path.join(destFolder, destFileName)
if not os.path.exists(destFolder):
os.makedirs(destFolder)
if os.path.exists(cdfp):
os.remove(cdfp)
open(cdfp, "wb").write(open(templateFilePath, "rb").read())
cwb = load_workbook(cdfp)
if writeLog == 1:
clws = cwb.create_sheet("Extract Log")
cwb.save(cdfp)
extractDocFile(cfgRoot, sourceFilePath)
main()
| [
"[email protected]"
] | |
62b8907735fd3b6d06a37ae9b1f26172eb1cbfb7 | 922e77ac07392028c2d0ebdd2d40766638ad5008 | /detectron2/data/detection_utils.py | e6692e138ffced257458edebfa9ffc6569fff969 | [
"Apache-2.0"
] | permissive | lucascooper/detectron2 | 9acc0a59a5c45b9dc204e60463ad34a9b1f4db5f | 75ebc5340fa31f9ee0f41e9d6b761db249237043 | refs/heads/master | 2022-11-08T00:22:10.768847 | 2020-06-26T19:06:29 | 2020-06-26T19:08:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,489 | py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
import pycocotools.mask as mask_util
import torch
from fvcore.common.file_io import PathManager
from PIL import Image
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
from . import transforms as T
from .catalog import MetadataCatalog
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
# https://www.exiv2.org/tags.html
_EXIF_ORIENT = 274 # exif 'Orientation' tag
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, np.array(_M_YUV2RGB).T)
image = image * 255.0
else:
if format == "L":
image = image[:, :, 0]
image = image.astype(np.uint8)
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
return image
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
exif = image.getexif()
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray): an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = _apply_exif_orientation(image)
return convert_PIL_to_numpy(image, format)
def check_image_size(dataset_dict, image):
"""
Raise an error if the image does not match the size specified in the dict.
"""
if "width" in dataset_dict or "height" in dataset_dict:
image_wh = (image.shape[1], image.shape[0])
expected_wh = (dataset_dict["width"], dataset_dict["height"])
if not image_wh == expected_wh:
raise SizeMismatchError(
"Mismatched (W,H){}, got {}, expect {}".format(
" for image " + dataset_dict["file_name"]
if "file_name" in dataset_dict
else "",
image_wh,
expected_wh,
)
)
# To ensure bbox always remap to original image size
if "width" not in dataset_dict:
dataset_dict["width"] = image.shape[1]
if "height" not in dataset_dict:
dataset_dict["height"] = image.shape[0]
def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
"""
Apply transformations to the proposals in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
proposal_topk (int): only keep top-K scoring proposals
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "proposal_boxes" in dataset_dict:
# Transform proposal boxes
boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("proposal_boxes"),
dataset_dict.pop("proposal_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
boxes = Boxes(boxes)
objectness_logits = torch.as_tensor(
dataset_dict.pop("proposal_objectness_logits").astype("float32")
)
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_size)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
dataset_dict["proposals"] = proposals
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# clip transformed bbox to image size
bbox = transforms.apply_box([bbox])[0].clip(min=0)
annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "segmentation" in annotation:
# each instance contains 1 or more polygons
segm = annotation["segmentation"]
if isinstance(segm, list):
# polygons
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
annotation["segmentation"] = [
p.reshape(-1) for p in transforms.apply_polygons(polygons)
]
elif isinstance(segm, dict):
# RLE
mask = mask_util.decode(segm)
mask = transforms.apply_segmentation(mask)
assert tuple(mask.shape[:2]) == image_size
annotation["segmentation"] = mask
else:
raise ValueError(
"Cannot transform segmentation of type '{}'!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict.".format(type(segm))
)
if "keypoints" in annotation:
keypoints = transform_keypoint_annotations(
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
)
annotation["keypoints"] = keypoints
return annotation
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
"""
Transform keypoint annotations of an image.
If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
Args:
keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
Each point is represented by (x, y, visibility).
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
When `transforms` includes horizontal flip, will use the index
mapping to flip keypoints.
"""
# (N*3,) -> (N, 3)
keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
keypoints_xy = transforms.apply_coords(keypoints[:, :2])
# Set all out-of-boundary points to "unlabeled"
inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
inside = inside.all(axis=1)
keypoints[:, :2] = keypoints_xy
keypoints[:, 2][~inside] = 0
# This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
# Alternative way: check if probe points was horizontally flipped.
# probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
# probe_aug = transforms.apply_coords(probe.copy())
# do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
# If flipped, swap each keypoint with its opposite-handed equivalent
if do_hflip:
assert keypoint_hflip_indices is not None
keypoints = keypoints[keypoint_hflip_indices, :]
# Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
keypoints[keypoints[:, 2] == 0] = 0
return keypoints
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
target.gt_boxes = Boxes(boxes)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
# TODO check type and provide better error
masks = PolygonMasks(segms)
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a full-image segmentation mask "
"as a 2D ndarray.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
return target
def annotations_to_instances_rotated(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Compared to `annotations_to_instances`, this function is for rotated boxes only
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
Containing fields "gt_boxes", "gt_classes",
if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [obj["bbox"] for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = RotatedBoxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
return target
def filter_empty_instances(instances, by_box=True, by_mask=True, box_threshold=1e-5):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
Returns:
Instances: the filtered instances.
"""
assert by_box or by_mask
r = []
if by_box:
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
if instances.has("gt_masks") and by_mask:
r.append(instances.gt_masks.nonempty())
# TODO: can also filter visible keypoints
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
return instances[m]
def create_keypoint_hflip_indices(dataset_names):
"""
Args:
dataset_names (list[str]): list of dataset names
Returns:
ndarray[int]: a vector of size=#keypoints, storing the
horizontally-flipped keypoint indices.
"""
check_metadata_consistency("keypoint_names", dataset_names)
check_metadata_consistency("keypoint_flip_map", dataset_names)
meta = MetadataCatalog.get(dataset_names[0])
names = meta.keypoint_names
# TODO flip -> hflip
flip_map = dict(meta.keypoint_flip_map)
flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return np.asarray(flip_indices)
def gen_crop_transform_with_instance(crop_size, image_size, instance):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
crop_size = np.asarray(crop_size, dtype=np.int32)
bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
def check_metadata_consistency(key, dataset_names):
"""
Check that the datasets have consistent metadata.
Args:
key (str): a metadata key
dataset_names (list[str]): a list of dataset names
Raises:
AttributeError: if the key does not exist in the metadata
ValueError: if the given datasets do not have the same metadata values defined by key
"""
if len(dataset_names) == 0:
return
logger = logging.getLogger(__name__)
entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
for idx, entry in enumerate(entries_per_dataset):
if entry != entries_per_dataset[0]:
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
)
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(
key, dataset_names[0], str(entries_per_dataset[0])
)
)
raise ValueError("Datasets have different metadata '{}'!".format(key))
def build_augmentation(cfg, is_train):
"""
Create a list of :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(
len(min_size)
)
logger = logging.getLogger(__name__)
augmentation = []
augmentation.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
augmentation.append(T.RandomFlip())
logger.info("Augmentations used in training: " + str(augmentation))
return augmentation
build_transform_gen = build_augmentation
"""
Alias for backward-compatibility.
"""
| [
"[email protected]"
] | |
632b44110f8d17e5a87f2169f16492724791a409 | 2c4ba5a56b7a3d3e1c286b678eb8068f51c23046 | /week2/3-Simple-Algorithms/solutions/first_n_perfect.py | 388005a98408f01742f75b53102ab5c4f146e5ab | [] | no_license | OgnyanPenkov/Programming0-1 | 3b69757bd803814585d77479fc987a0ee92d0390 | 8078f316ea2b81216c21cf78e7cf1afc17f54846 | refs/heads/master | 2021-01-21T15:12:20.814368 | 2015-10-07T18:16:39 | 2015-10-07T18:16:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # Проблемът на тази задача е, че не знаем горната граница на интервала
# Например, не знае първите 4 перфектни числа в какъв интервал са
# За това подхождаме по следния начин - while True:
# За всяко число +1 гледаме дали е перфектно
# Aко намерим перфектно, намаляваме търснеата бройка с 1
# Когато търсената бройка стане 0, приключваме
n = input("Enter n: ")
n = int(n)
start = 6
while True:
divisors_sum = 0
divisor = 1
while divisor < start:
if start % divisor == 0:
divisors_sum += divisor
divisor += 1
if divisors_sum == start:
print(start)
n = n - 1
if n == 0:
break
start += 1
| [
"[email protected]"
] | |
04b6c88d1353b5bcedc3f41edb10107f55960060 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/networkx/drawing/layout.py | d6ade95e25422e759c0531fc265903f50288c684 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 18,158 | py | """
******
Layout
******
Node positioning algorithms for graph drawing.
The default scales and centering for these layouts are
typically squares with side [0, 1] or [0, scale].
The two circular layout routines (circular_layout and
shell_layout) have size [-1, 1] or [-scale, scale].
"""
# Authors: Aric Hagberg <[email protected]>,
# Dan Schult <[email protected]>
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import collections
import networkx as nx
__all__ = ['circular_layout',
'random_layout',
'shell_layout',
'spring_layout',
'spectral_layout',
'fruchterman_reingold_layout']
def random_layout(G, dim=2, scale=1., center=None):
"""Position nodes uniformly at random.
For every node, a position is generated by choosing each of dim
coordinates uniformly at random on the default interval [0.0, 1.0),
or on an interval of length `scale` centered at `center`.
NumPy (http://scipy.org) is required for this function.
Parameters
----------
G : NetworkX graph or list of nodes
A position will be assigned to every node in G.
dim : int
Dimension of layout.
scale : float (default 1)
Scale factor for positions
center : array-like (default scale*0.5 in each dim)
Coordinate around which to center the layout.
Returns
-------
pos : dict
A dictionary of positions keyed by node
Examples
--------
>>> G = nx.lollipop_graph(4, 3)
>>> pos = nx.random_layout(G)
"""
import numpy as np
shape = (len(G), dim)
pos = np.random.random(shape) * scale
if center is not None:
pos += np.asarray(center) - 0.5 * scale
return dict(zip(G, pos))
def circular_layout(G, dim=2, scale=1., center=None):
"""Position nodes on a circle.
Parameters
----------
G : NetworkX graph or list of nodes
dim : int
Dimension of layout, currently only dim=2 is supported
scale : float (default 1)
Scale factor for positions, i.e. radius of circle.
center : array-like (default origin)
Coordinate around which to center the layout.
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> pos=nx.circular_layout(G)
Notes
-----
This algorithm currently only works in two dimensions and does not
try to minimize edge crossings.
"""
import numpy as np
if len(G) == 0:
return {}
twopi = 2.0*np.pi
theta = np.arange(0, twopi, twopi/len(G))
pos = np.column_stack([np.cos(theta), np.sin(theta)]) * scale
if center is not None:
pos += np.asarray(center)
return dict(zip(G, pos))
def shell_layout(G, nlist=None, dim=2, scale=1., center=None):
"""Position nodes in concentric circles.
Parameters
----------
G : NetworkX graph or list of nodes
nlist : list of lists
List of node lists for each shell.
dim : int
Dimension of layout, currently only dim=2 is supported
scale : float (default 1)
Scale factor for positions, i.e.radius of largest shell
center : array-like (default origin)
Coordinate around which to center the layout.
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G = nx.path_graph(4)
>>> shells = [[0], [1,2,3]]
>>> pos = nx.shell_layout(G, shells)
Notes
-----
This algorithm currently only works in two dimensions and does not
try to minimize edge crossings.
"""
import numpy as np
if len(G) == 0:
return {}
if nlist is None:
# draw the whole graph in one shell
nlist = [list(G)]
numb_shells = len(nlist)
if len(nlist[0]) == 1:
# single node at center
radius = 0.0
numb_shells -= 1
else:
# else start at r=1
radius = 1.0
# distance between shells
gap = (scale / numb_shells) if numb_shells else scale
radius *= gap
npos={}
twopi = 2.0*np.pi
for nodes in nlist:
theta = np.arange(0, twopi, twopi/len(nodes))
pos = np.column_stack([np.cos(theta), np.sin(theta)]) * radius
npos.update(zip(nodes, pos))
radius += gap
if center is not None:
center = np.asarray(center)
for n,p in npos.items():
npos[n] = p + center
return npos
def fruchterman_reingold_layout(G, dim=2, k=None,
pos=None,
fixed=None,
iterations=50,
weight='weight',
scale=1.0,
center=None):
"""Position nodes using Fruchterman-Reingold force-directed algorithm.
Parameters
----------
G : NetworkX graph
dim : int
Dimension of layout
k : float (default=None)
Optimal distance between nodes. If None the distance is set to
1/sqrt(n) where n is the number of nodes. Increase this value
to move nodes farther apart.
pos : dict or None optional (default=None)
Initial positions for nodes as a dictionary with node as keys
and values as a list or tuple. If None, then use random initial
positions.
fixed : list or None optional (default=None)
Nodes to keep fixed at initial position.
If any nodes are fixed, the scale and center features are not used.
iterations : int optional (default=50)
Number of iterations of spring-force relaxation
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the effective spring constant. If None, edge weights are 1.
scale : float (default=1.0)
Scale factor for positions. The nodes are positioned
in a box of size `scale` in each dim centered at `center`.
center : array-like (default scale/2 in each dim)
Coordinate around which to center the layout.
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> pos=nx.spring_layout(G)
# this function has two names:
# spring_layout and fruchterman_reingold_layout
>>> pos=nx.fruchterman_reingold_layout(G)
"""
import numpy as np
if len(G) == 0:
return {}
if fixed is not None:
nfixed = dict(zip(G, range(len(G))))
fixed = np.asarray([nfixed[v] for v in fixed])
if pos is None:
msg = "Keyword pos must be specified if any nodes are fixed"
raise ValueError(msg)
if pos is not None:
# Determine size of existing domain to adjust initial positions
pos_coords = np.array(list(pos.values()))
min_coords = pos_coords.min(0)
domain_size = pos_coords.max(0) - min_coords
shape = (len(G), dim)
pos_arr = np.random.random(shape) * domain_size + min_coords
for i,n in enumerate(G):
if n in pos:
pos_arr[i] = np.asarray(pos[n])
else:
pos_arr=None
if k is None and fixed is not None:
# Adjust k for domains larger than 1x1
k=domain_size.max()/np.sqrt(len(G))
try:
# Sparse matrix
if len(G) < 500: # sparse solver for large graphs
raise ValueError
A = nx.to_scipy_sparse_matrix(G, weight=weight, dtype='f')
pos = _sparse_fruchterman_reingold(A,dim,k,pos_arr,fixed,iterations)
except:
A = nx.to_numpy_matrix(G, weight=weight)
pos = _fruchterman_reingold(A, dim, k, pos_arr, fixed, iterations)
if fixed is None:
pos = _rescale_layout(pos, scale)
if center is not None:
pos += np.asarray(center) - 0.5 * scale
return dict(zip(G,pos))
spring_layout=fruchterman_reingold_layout
def _fruchterman_reingold(A,dim=2,k=None,pos=None,fixed=None,iterations=50):
# Position nodes in adjacency matrix A using Fruchterman-Reingold
# Entry point for NetworkX graph is fruchterman_reingold_layout()
import numpy as np
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(
"fruchterman_reingold() takes an adjacency matrix as input")
A=np.asarray(A) # make sure we have an array instead of a matrix
if pos is None:
# random initial positions
pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)
else:
# make sure positions are of same type as matrix
pos=pos.astype(A.dtype)
# optimal distance between nodes
if k is None:
k=np.sqrt(1.0/nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
# Calculate domain in case our fixed positions are bigger than 1x1
t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1]))*0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt=t/float(iterations+1)
delta = np.zeros((pos.shape[0],pos.shape[0],pos.shape[1]),dtype=A.dtype)
# the inscrutable (but fast) version
# this is still O(V^2)
# could use multilevel methods to speed this up significantly
for iteration in range(iterations):
# matrix of difference between points
for i in range(pos.shape[1]):
delta[:,:,i]= pos[:,i,None]-pos[:,i]
# distance between points
distance=np.sqrt((delta**2).sum(axis=-1))
# enforce minimum distance of 0.01
distance=np.where(distance<0.01,0.01,distance)
# displacement "force"
displacement=np.transpose(np.transpose(delta)*\
(k*k/distance**2-A*distance/k))\
.sum(axis=1)
# update positions
length=np.sqrt((displacement**2).sum(axis=1))
length=np.where(length<0.01,0.01,length)
delta_pos=np.transpose(np.transpose(displacement)*t/length)
if fixed is not None:
# don't change positions of fixed nodes
delta_pos[fixed]=0.0
pos+=delta_pos
# cool temperature
t-=dt
if fixed is None:
pos = _rescale_layout(pos)
return pos
def _sparse_fruchterman_reingold(A, dim=2, k=None, pos=None, fixed=None,
iterations=50):
# Position nodes in adjacency matrix A using Fruchterman-Reingold
# Entry point for NetworkX graph is fruchterman_reingold_layout()
# Sparse version
import numpy as np
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(
"fruchterman_reingold() takes an adjacency matrix as input")
try:
from scipy.sparse import spdiags,coo_matrix
except ImportError:
raise ImportError("_sparse_fruchterman_reingold() scipy numpy: http://scipy.org/ ")
# make sure we have a LIst of Lists representation
try:
A=A.tolil()
except:
A=(coo_matrix(A)).tolil()
if pos is None:
# random initial positions
pos=np.asarray(np.random.random((nnodes,dim)),dtype=A.dtype)
else:
# make sure positions are of same type as matrix
pos=pos.astype(A.dtype)
# no fixed nodes
if fixed is None:
fixed=[]
# optimal distance between nodes
if k is None:
k=np.sqrt(1.0/nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
# Calculate domain in case our fixed positions are bigger than 1x1
t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1]))*0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt=t/float(iterations+1)
displacement=np.zeros((dim,nnodes))
for iteration in range(iterations):
displacement*=0
# loop over rows
for i in range(A.shape[0]):
if i in fixed:
continue
# difference between this row's node position and all others
delta=(pos[i]-pos).T
# distance between points
distance=np.sqrt((delta**2).sum(axis=0))
# enforce minimum distance of 0.01
distance=np.where(distance<0.01,0.01,distance)
# the adjacency matrix row
Ai=np.asarray(A.getrowview(i).toarray())
# displacement "force"
displacement[:,i]+=\
(delta*(k*k/distance**2-Ai*distance/k)).sum(axis=1)
# update positions
length=np.sqrt((displacement**2).sum(axis=0))
length=np.where(length<0.01,0.01,length)
pos+=(displacement*t/length).T
# cool temperature
t-=dt
if fixed is None:
pos = _rescale_layout(pos)
return pos
def spectral_layout(G, dim=2, weight='weight', scale=1., center=None):
"""Position nodes using the eigenvectors of the graph Laplacian.
Parameters
----------
G : NetworkX graph or list of nodes
dim : int
Dimension of layout
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
the edge weight. If None, then all edge weights are 1.
scale : float optional (default 1)
Scale factor for positions, i.e. nodes placed in a box with
side [0, scale] or centered on `center` if provided.
center : array-like (default scale/2 in each dim)
Coordinate around which to center the layout.
Returns
-------
dict :
A dictionary of positions keyed by node
Examples
--------
>>> G=nx.path_graph(4)
>>> pos=nx.spectral_layout(G)
Notes
-----
Directed graphs will be considered as undirected graphs when
positioning the nodes.
For larger graphs (>500 nodes) this will use the SciPy sparse
eigenvalue solver (ARPACK).
"""
# handle some special cases that break the eigensolvers
import numpy as np
if len(G) <= 2:
if len(G) == 0:
return {}
elif len(G) == 1:
if center is not None:
pos = np.asarray(center)
else:
pos = np.ones((1,dim)) * scale * 0.5
else: #len(G) == 2
pos = np.array([np.zeros(dim), np.ones(dim) * scale])
if center is not None:
pos += np.asarray(center) - scale * 0.5
return dict(zip(G,pos))
try:
# Sparse matrix
if len(G)< 500: # dense solver is faster for small graphs
raise ValueError
A = nx.to_scipy_sparse_matrix(G, weight=weight, dtype='d')
# Symmetrize directed graphs
if G.is_directed():
A = A + np.transpose(A)
pos = _sparse_spectral(A,dim)
except (ImportError, ValueError):
# Dense matrix
A = nx.to_numpy_matrix(G, weight=weight)
# Symmetrize directed graphs
if G.is_directed():
A = A + np.transpose(A)
pos = _spectral(A, dim)
pos = _rescale_layout(pos, scale)
if center is not None:
pos += np.asarray(center) - 0.5 * scale
return dict(zip(G,pos))
def _spectral(A, dim=2):
# Input adjacency matrix A
# Uses dense eigenvalue solver from numpy
try:
import numpy as np
except ImportError:
raise ImportError("spectral_layout() requires numpy: http://scipy.org/ ")
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(\
"spectral() takes an adjacency matrix as input")
# form Laplacian matrix
# make sure we have an array instead of a matrix
A=np.asarray(A)
I=np.identity(nnodes,dtype=A.dtype)
D=I*np.sum(A,axis=1) # diagonal of degrees
L=D-A
eigenvalues,eigenvectors=np.linalg.eig(L)
# sort and keep smallest nonzero
index=np.argsort(eigenvalues)[1:dim+1] # 0 index is zero eigenvalue
return np.real(eigenvectors[:,index])
def _sparse_spectral(A,dim=2):
# Input adjacency matrix A
# Uses sparse eigenvalue solver from scipy
# Could use multilevel methods here, see Koren "On spectral graph drawing"
try:
import numpy as np
from scipy.sparse import spdiags
except ImportError:
raise ImportError("_sparse_spectral() requires scipy & numpy: http://scipy.org/ ")
try:
from scipy.sparse.linalg.eigen import eigsh
except ImportError:
# scipy <0.9.0 names eigsh differently
from scipy.sparse.linalg import eigen_symmetric as eigsh
try:
nnodes,_=A.shape
except AttributeError:
raise nx.NetworkXError(\
"sparse_spectral() takes an adjacency matrix as input")
# form Laplacian matrix
data=np.asarray(A.sum(axis=1).T)
D=spdiags(data,0,nnodes,nnodes)
L=D-A
k=dim+1
# number of Lanczos vectors for ARPACK solver.What is the right scaling?
ncv=max(2*k+1,int(np.sqrt(nnodes)))
# return smallest k eigenvalues and eigenvectors
eigenvalues,eigenvectors=eigsh(L,k,which='SM',ncv=ncv)
index=np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue
return np.real(eigenvectors[:,index])
def _rescale_layout(pos, scale=1.):
# rescale to [0, scale) in each axis
# Find max length over all dimensions
maxlim=0
for i in range(pos.shape[1]):
pos[:,i] -= pos[:,i].min() # shift min to zero
maxlim = max(maxlim, pos[:,i].max())
if maxlim > 0:
for i in range(pos.shape[1]):
pos[:,i] *= scale / maxlim
return pos
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
try:
import scipy
except:
raise SkipTest("SciPy not available")
| [
"[email protected]"
] | |
7393ed5275df359c4798e683f9f52f70ea73ee36 | 5fd4707876cac0a4ca3b14af9a936301c45b5599 | /02_数据结构/fp_15_一个谜题.py | 12cd3f9686732f4027c53318216d150dbc7debc7 | [] | no_license | xuelang201201/FluentPython | 5b0d89bfc6ee1238ad77db9955ec7e8417b418b8 | 7cbedf7c780c2a9e0edac60484f2ad4c385e1dbd | refs/heads/master | 2022-04-26T21:49:16.923214 | 2020-04-27T01:27:50 | 2020-04-27T01:27:50 | 258,290,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | # 一个关于+=的谜题
t = (1, 2, [30, 40])
t[2] += [50, 60]
# 到底会发生下面4中情况中的哪一种?
# a. t变成 (1, 2, [30, 40, 50, 60])。
# b. 因为 tuple 不支持对它的元素赋值,所以会抛出 TypeError 异常。
# c. 以上两个都不是。
# d. a 和 b 都是对的。
# 在终端中运行:没人料到的结果:t[2] 被改动了,但是也有异常抛出
# t
# TypeError Traceback (most recent call last)
# <ipython-input-2-d877fb0e9d36> in <module>
# ----> 1 t[2] += [50, 60]
#
# TypeError: 'tuple' object does not support item assignment
# t
# (1, 2, [30, 40, 50, 60])
# 所以答案是 d
| [
"[email protected]"
] | |
bdea5739deb6de4ea45ee5a8b9375074d1bd4a56 | c1e31f49a59beb6089328d09040f6f48d2e12cde | /lib/python2.7/exportfits.py | 8ac16bd8a03a5dfb2e656ce6285ff75d901e1e57 | [
"Python-2.0"
] | permissive | kernsuite-debian/casalite | 3d81761e0d8ae497f97ea242e98d4357618a7591 | b620981f14f4ba5b77f347f649cd2c16d498db04 | refs/heads/master | 2021-06-22T16:22:51.765703 | 2021-02-25T13:28:05 | 2021-02-25T13:28:05 | 80,822,139 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | #
# This file was generated using xslt from its XML file
#
# Copyright 2009, Associated Universities Inc., Washington DC
#
import sys
import os
from casac import *
import string
from taskinit import casalog
from taskinit import xmlpath
#from taskmanager import tm
import task_exportfits
def exportfits(imagename='', fitsimage='', velocity=False, optical=False, bitpix=-32, minpix=0, maxpix=-1, overwrite=False, dropstokes=False, stokeslast=True, history=True, dropdeg=False):
"""Convert a CASA image to a FITS file
FOR MORE INFORMATION, SEE THE TASK PAGES OF EXPORTFITS IN CASA DOCS:
https://casa.nrao.edu/casadocs/
"""
#
# The following is work around to avoid a bug with current python translation
#
mytmp = {}
mytmp['imagename'] = imagename
mytmp['fitsimage'] = fitsimage
mytmp['velocity'] = velocity
mytmp['optical'] = optical
mytmp['bitpix'] = bitpix
mytmp['minpix'] = minpix
mytmp['maxpix'] = maxpix
mytmp['overwrite'] = overwrite
mytmp['dropstokes'] = dropstokes
mytmp['stokeslast'] = stokeslast
mytmp['history'] = history
mytmp['dropdeg'] = dropdeg
pathname='file://' + xmlpath( ) + '/'
trec = casac.utils().torecord(pathname+'exportfits.xml')
casalog.origin('exportfits')
if trec.has_key('exportfits') and casac.utils().verify(mytmp, trec['exportfits']) :
result = task_exportfits.exportfits(imagename, fitsimage, velocity, optical, bitpix, minpix, maxpix, overwrite, dropstokes, stokeslast, history, dropdeg)
else :
result = False
return result
| [
"[email protected]"
] | |
03751bac302d1bfbd2bf027224f83efe634a666c | 9b01f7d430f7ee87217618cfa4567f42635e8923 | /22-06-2017/cloudformation/nginx-demo-1/ansible/.env/lib/python2.7/site-packages/ansible/executor/task_executor.py | 21a29052d1f8809f341b51f3230f658a4224f5cd | [] | no_license | awsusergroupsantiago/demos | ccb045545d2a407a39d865cf19800d2b6d284b8f | e7f0dc8d9a4e8f2547c33a5a294fd76bf3ac9c9c | refs/heads/master | 2022-04-30T23:43:30.646556 | 2020-08-08T01:35:40 | 2020-08-08T01:35:40 | 95,129,959 | 2 | 0 | null | 2022-03-29T21:54:09 | 2017-06-22T15:29:25 | Python | UTF-8 | Python | false | false | 36,398 | py | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import subprocess
import sys
import time
import traceback
from ansible.compat.six import iteritems, string_types, binary_type
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure
from ansible.executor.task_result import TaskResult
from ansible.module_utils._text import to_bytes, to_text
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.template import Templar
from ansible.utils.encrypt import key_for_hostname
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.ssh_functions import check_for_controlpersist
from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskExecutor']
class TaskExecutor:
'''
This is the main worker class for the executor pipeline, which
handles loading an action plugin to actually dispatch the task to
a given host. This class roughly corresponds to the old Runner()
class.
'''
# Modules that we optimize by squashing loop items into a single call to
# the module
SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS)
def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj, rslt_q):
self._host = host
self._task = task
self._job_vars = job_vars
self._play_context = play_context
self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
self._connection = None
self._rslt_q = rslt_q
self._loop_eval_error = None
self._task.squash()
def run(self):
'''
The main executor entrypoint, where we determine if the specified
task requires looping and either runs the task with self._run_loop()
or self._execute(). After that, the returned results are parsed and
returned as a dict.
'''
display.debug("in run()")
try:
try:
items = self._get_loop_items()
except AnsibleUndefinedVariable as e:
# save the error raised here for use later
items = None
self._loop_eval_error = e
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
# loop through the item results, and remember the changed/failed
# result flags based on any item there.
changed = False
failed = False
for item in item_results:
if 'changed' in item and item['changed']:
changed = True
if 'failed' in item and item['failed']:
failed = True
# create the overall result item, and set the changed/failed
# flags there to reflect the overall result of the loop
res = dict(results=item_results)
if changed:
res['changed'] = True
if failed:
res['failed'] = True
res['msg'] = 'One or more items failed'
else:
res['msg'] = 'All items completed'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
display.debug("calling self._execute()")
res = self._execute()
display.debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
def _clean_res(res, errors='surrogate_or_strict'):
if isinstance(res, UnsafeProxy):
return res._obj
elif isinstance(res, binary_type):
return to_text(res, errors=errors)
elif isinstance(res, dict):
for k in res:
try:
res[k] = _clean_res(res[k], errors=errors)
except UnicodeError:
if k == 'diff':
# If this is a diff, substitute a replacement character if the value
# is undecodable as utf8. (Fix #21804)
display.warning("We were unable to decode all characters, replaced some in an effort to return as much as possible")
res[k] = _clean_res(res[k], errors='surrogate_then_replace')
else:
raise
elif isinstance(res, list):
for idx,item in enumerate(res):
res[idx] = _clean_res(item, errors=errors)
return res
display.debug("dumping result to json")
res = _clean_res(res)
display.debug("done dumping result, returning")
return res
except AnsibleError as e:
return dict(failed=True, msg=to_text(e, nonstring='simplerepr'))
except Exception as e:
return dict(failed=True, msg='Unexpected failure during module execution.', exception=to_text(traceback.format_exc()), stdout='')
finally:
try:
self._connection.close()
except AttributeError:
pass
except Exception as e:
display.debug(u"error closing connection: %s" % to_text(e))
def _get_loop_items(self):
'''
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
'''
# save the play context variables to a temporary dictionary,
# so that we can modify the job vars without doing a full copy
# and later restore them to avoid modifying things too early
play_context_vars = dict()
self._play_context.update_vars(play_context_vars)
old_vars = dict()
for k in play_context_vars:
if k in self._job_vars:
old_vars[k] = self._job_vars[k]
self._job_vars[k] = play_context_vars[k]
# get search path for this task to pass to lookup plugins
self._job_vars['ansible_search_path'] = self._task.get_search_path()
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
items = None
if self._task.loop:
if self._task.loop in self._shared_loader_obj.lookup_loader:
if self._task.loop == 'first_found':
# first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing.
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=False)
loop_terms = [t for t in loop_terms if not templar._contains_vars(t)]
else:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=False)
# get lookup
mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar)
# give lookup task 'context' for subdir (mostly needed for first_found)
for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
if subdir in self._task.action:
break
setattr(mylookup,'_subdir', subdir + 's')
# run lookup
items = mylookup.run(terms=loop_terms, variables=self._job_vars, wantlist=True)
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop)
# now we restore any old job variables that may have been modified,
# and delete them if they were in the play context vars but not in
# the old variables dictionary
for k in play_context_vars:
if k in old_vars:
self._job_vars[k] = old_vars[k]
else:
del self._job_vars[k]
if items:
from ansible.vars.unsafe_proxy import UnsafeProxy
for idx, item in enumerate(items):
if item is not None and not isinstance(item, UnsafeProxy):
items[idx] = UnsafeProxy(item)
# ensure basedir is always in (dwim already searches here but we need to display it)
if self._loader.get_basedir() not in self._job_vars['ansible_search_path']:
self._job_vars['ansible_search_path'].append(self._loader.get_basedir())
return items
def _run_loop(self, items):
'''
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
'''
results = []
# make copies of the job vars and task so we can add the item to
# the variables and re-validate the task with the item variable
#task_vars = self._job_vars.copy()
task_vars = self._job_vars
loop_var = 'item'
label = None
loop_pause = 0
if self._task.loop_control:
# the value may be 'None', so we still need to default it back to 'item'
loop_var = self._task.loop_control.loop_var or 'item'
label = self._task.loop_control.label or ('{{' + loop_var + '}}')
loop_pause = self._task.loop_control.pause or 0
if loop_var in task_vars:
display.warning(u"The loop variable '%s' is already in use. "
u"You should set the `loop_var` value in the `loop_control` option for the task"
u" to something else to avoid variable collisions and unexpected behavior." % loop_var)
ran_once = False
items = self._squash_items(items, loop_var, task_vars)
for item in items:
task_vars[loop_var] = item
# pause between loop iterations
if loop_pause and ran_once:
time.sleep(loop_pause)
else:
ran_once = True
try:
tmp_task = self._task.copy(exclude_parent=True, exclude_tasks=True)
tmp_task._parent = self._task._parent
tmp_play_context = self._play_context.copy()
except AnsibleParserError as e:
results.append(dict(failed=True, msg=to_text(e)))
continue
# now we swap the internal task and play context with their copies,
# execute, and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
res = self._execute(variables=task_vars)
(self._task, tmp_task) = (tmp_task, self._task)
(self._play_context, tmp_play_context) = (tmp_play_context, self._play_context)
# now update the result with the item info, and append the result
# to the list of results
res[loop_var] = item
res['_ansible_item_result'] = True
if label is not None:
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
res['_ansible_item_label'] = templar.template(label)
self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, res), block=False)
results.append(res)
del task_vars[loop_var]
return results
def _squash_items(self, items, loop_var, variables):
'''
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
name = None
try:
# _task.action could contain templatable strings (via action: and
# local_action:) Template it before comparing. If we don't end up
# optimizing it here, the templatable string might use template vars
# that aren't available until later (it could even use vars from the
# with_items loop) so don't make the templated string permanent yet.
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
task_action = self._task.action
if templar._contains_vars(task_action):
task_action = templar.template(task_action, fail_on_undefined=False)
if len(items) > 0 and task_action in self.SQUASH_ACTIONS:
if all(isinstance(o, string_types) for o in items):
final_items = []
for allowed in ['name', 'pkg', 'package']:
name = self._task.args.pop(allowed, None)
if name is not None:
break
# This gets the information to check whether the name field
# contains a template that we can squash for
template_no_item = template_with_item = None
if name:
if templar._contains_vars(name):
variables[loop_var] = '\0$'
template_no_item = templar.template(name, variables, cache=False)
variables[loop_var] = '\0@'
template_with_item = templar.template(name, variables, cache=False)
del variables[loop_var]
# Check if the user is doing some operation that doesn't take
# name/pkg or the name/pkg field doesn't have any variables
# and thus the items can't be squashed
if template_no_item != template_with_item:
for item in items:
variables[loop_var] = item
if self._task.evaluate_conditional(templar, variables):
new_item = templar.template(name, cache=False)
final_items.append(new_item)
self._task.args['name'] = final_items
# Wrap this in a list so that the calling function loop
# executes exactly once
return [final_items]
else:
# Restore the name parameter
self._task.args['name'] = name
#elif:
# Right now we only optimize single entries. In the future we
# could optimize more types:
# * lists can be squashed together
# * dicts could squash entries that match in all cases except the
# name or pkg field.
except:
# Squashing is an optimization. If it fails for any reason,
# simply use the unoptimized list of items.
# Restore the name parameter
if name is not None:
self._task.args['name'] = name
return items
def _execute(self, variables=None):
'''
The primary workhorse of the executor system, this runs the task
on the specified host (which may be the delegated_to host) and handles
the retry/until and block rescue/always execution
'''
if variables is None:
variables = self._job_vars
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
context_validation_error = None
try:
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
self._play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not self._play_context.remote_addr:
self._play_context.remote_addr = self._host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
self._play_context.update_vars(variables)
except AnsibleError as e:
# save the error, which we'll raise later if we don't end up
# skipping this task during the conditional evaluation step
context_validation_error = e
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
try:
if not self._task.evaluate_conditional(templar, variables):
display.debug("when evaluation failed, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional check failed', _ansible_no_log=self._play_context.no_log)
# since we're not skipping, if there was a loop evaluation error
# raised earlier we need to raise it now to halt the execution of
# this task
if self._loop_eval_error is not None:
raise self._loop_eval_error
except AnsibleError:
# skip conditional exception in the case of includes as the vars needed might not be avaiable except in the included tasks or due to tags
if self._task.action not in ['include', 'include_role']:
raise
# if we ran into an error while setting up the PlayContext, raise it now
if context_validation_error is not None:
raise context_validation_error
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action == 'include':
include_variables = self._task.args.copy()
include_file = include_variables.pop('_raw_params', None)
if not include_file:
return dict(failed=True, msg="No include file was specified to the include")
include_file = templar.template(include_file)
return dict(include=include_file, include_variables=include_variables)
# if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
elif self._task.action == 'include_role':
include_variables = self._task.args.copy()
return dict(include_role=self._task, include_variables=include_variables)
# Now we do final validation on the task, which sets all fields to their final values.
self._task.post_validate(templar=templar)
if '_variable_params' in self._task.args:
variable_params = self._task.args.pop('_variable_params')
if isinstance(variable_params, dict):
display.deprecated("Using variables for task params is unsafe, especially if the variables come from an external source like facts")
variable_params.update(self._task.args)
self._task.args = variable_params
# get the connection and the handler for this execution
if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
self._connection = self._get_connection(variables=variables, templar=templar)
hostvars = variables.get('hostvars', None)
if hostvars:
try:
target_hostvars = hostvars.raw_get(self._host.name)
except:
# FIXME: this should catch the j2undefined error here
# specifically instead of all exceptions
target_hostvars = dict()
else:
target_hostvars = dict()
self._connection.set_host_overrides(host=self._host, hostvars=target_hostvars)
else:
# if connection is reused, its _play_context is no longer valid and needs
# to be replaced with the one templated above, in case other data changed
self._connection._play_context = self._play_context
self._handler = self._get_action_handler(connection=self._connection, templar=templar)
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
if omit_token is not None:
self._task.args = dict((i[0], i[1]) for i in iteritems(self._task.args) if i[1] != omit_token)
# Read some values from the task, so that we can modify them if need be
if self._task.until:
retries = self._task.retries
if retries is None:
retries = 3
elif retries <= 0:
retries = 1
else:
retries += 1
else:
retries = 1
delay = self._task.delay
if delay < 0:
delay = 1
# make a copy of the job vars here, in case we need to update them
# with the registered variable value later on when testing conditions
vars_copy = variables.copy()
display.debug("starting attempt loop")
result = None
for attempt in range(1, retries + 1):
display.debug("running the handler")
try:
result = self._handler.run(task_vars=variables)
except AnsibleConnectionFailure as e:
return dict(unreachable=True, msg=to_text(e))
display.debug("handler run complete")
# preserve no log
result["_ansible_no_log"] = self._play_context.no_log
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self._task.register:
vars_copy[self._task.register] = wrap_var(result.copy())
if self._task.async > 0:
if self._task.poll > 0 and not result.get('skipped'):
result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
# ensure no log is preserved
result["_ansible_no_log"] = self._play_context.no_log
# helper methods for use below in evaluating changed/failed_when
def _evaluate_changed_when_result(result):
if self._task.changed_when is not None and self._task.changed_when:
cond = Conditional(loader=self._loader)
cond.when = self._task.changed_when
result['changed'] = cond.evaluate_conditional(templar, vars_copy)
def _evaluate_failed_when_result(result):
if self._task.failed_when:
cond = Conditional(loader=self._loader)
cond.when = self._task.failed_when
failed_when_result = cond.evaluate_conditional(templar, vars_copy)
result['failed_when_result'] = result['failed'] = failed_when_result
else:
failed_when_result = False
return failed_when_result
if 'ansible_facts' in result:
vars_copy.update(result['ansible_facts'])
# set the failed property if the result has a non-zero rc. This will be
# overridden below if the failed_when property is set
if result.get('rc', 0) != 0:
result['failed'] = True
# if we didn't skip this task, use the helpers to evaluate the changed/
# failed_when properties
if 'skipped' not in result:
_evaluate_changed_when_result(result)
_evaluate_failed_when_result(result)
if retries > 1:
cond = Conditional(loader=self._loader)
cond.when = self._task.until
result['attempts'] = attempt
if cond.evaluate_conditional(templar, vars_copy):
break
else:
# no conditional check, or it failed, so sleep for the specified time
if attempt < retries:
result['_ansible_retry'] = True
result['retries'] = retries
display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
self._rslt_q.put(TaskResult(self._host.name, self._task._uuid, result), block=False)
time.sleep(delay)
else:
if retries > 1:
# we ran out of attempts, so mark the result as failed
result['attempts'] = retries - 1
result['failed'] = True
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
if self._task.register:
variables[self._task.register] = wrap_var(result)
if 'ansible_facts' in result:
variables.update(result['ansible_facts'])
# save the notification target in the result, if it was specified, as
# this task may be running in a loop in which case the notification
# may be item-specific, ie. "notify: service {{item}}"
if self._task.notify is not None:
result['_ansible_notify'] = self._task.notify
# add the delegated vars to the result, so we can reference them
# on the results side without having to do any further templating
# FIXME: we only want a limited set of variables here, so this is currently
# hardcoded but should be possibly fixed if we want more or if
# there is another source of truth we can use
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict()).copy()
if len(delegated_vars) > 0:
result["_ansible_delegated_vars"] = dict()
for k in ('ansible_host', ):
result["_ansible_delegated_vars"][k] = delegated_vars.get(k)
# and return
display.debug("attempt loop complete, returning result")
return result
def _poll_async_result(self, result, templar, task_vars=None):
'''
Polls for the specified JID to be complete
'''
if task_vars is None:
task_vars = self._job_vars
async_jid = result.get('ansible_job_id')
if async_jid is None:
return dict(failed=True, msg="No job id was returned by the async task")
# Create a new psuedo-task to run the async_status module, and run
# that (with a sleep for "poll" seconds between each retry) until the
# async time limit is exceeded.
async_task = Task().load(dict(action='async_status jid=%s' % async_jid))
# Because this is an async task, the action handler is async. However,
# we need the 'normal' action handler for the status check, so get it
# now via the action_loader
normal_handler = self._shared_loader_obj.action_loader.get(
'normal',
task=async_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
time_left = self._task.async
while time_left > 0:
time.sleep(self._task.poll)
try:
async_result = normal_handler.run(task_vars=task_vars)
# We do not bail out of the loop in cases where the failure
# is associated with a parsing error. The async_runner can
# have issues which result in a half-written/unparseable result
# file on disk, which manifests to the user as a timeout happening
# before it's time to timeout.
if int(async_result.get('finished', 0)) == 1 or ('failed' in async_result and async_result.get('_ansible_parsed', False)) or 'skipped' in async_result:
break
except Exception as e:
# Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal.
# On an exception, call the connection's reset method if it has one (eg, drop/recreate WinRM connection; some reused connections are in a broken state)
display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e))
display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc()))
try:
normal_handler._connection._reset()
except AttributeError:
pass
time_left -= self._task.poll
if int(async_result.get('finished', 0)) != 1:
if async_result.get('_ansible_parsed'):
return dict(failed=True, msg="async task did not complete within the requested time")
else:
return dict(failed=True, msg="async task produced unparseable results", async_result=async_result)
else:
return async_result
def _get_connection(self, variables, templar):
'''
Reads the connection property for the host, and returns the
correct connection object from the list of connection plugins
'''
if self._task.delegate_to is not None:
# since we're delegating, we don't want to use interpreter values
# which would have been set for the original target host
for i in list(variables.keys()):
if isinstance(i, string_types) and i.startswith('ansible_') and i.endswith('_interpreter'):
del variables[i]
# now replace the interpreter values with those that may have come
# from the delegated-to host
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(self._task.delegate_to, dict())
if isinstance(delegated_vars, dict):
for i in delegated_vars:
if isinstance(i, string_types) and i.startswith("ansible_") and i.endswith("_interpreter"):
variables[i] = delegated_vars[i]
conn_type = self._play_context.connection
if conn_type == 'smart':
conn_type = 'ssh'
if sys.platform.startswith('darwin') and self._play_context.password:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
conn_type = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
if not check_for_controlpersist(self._play_context.ssh_executable):
conn_type = "paramiko"
connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
if self._play_context.accelerate:
# accelerate is deprecated as of 2.1...
display.deprecated('Accelerated mode is deprecated. Consider using SSH with ControlPersist and pipelining enabled instead')
# launch the accelerated daemon here
ssh_connection = connection
handler = self._shared_loader_obj.action_loader.get(
'normal',
task=self._task,
connection=ssh_connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
key = key_for_hostname(self._play_context.remote_addr)
accelerate_args = dict(
password=base64.b64encode(key.__str__()),
port=self._play_context.accelerate_port,
minutes=C.ACCELERATE_DAEMON_TIMEOUT,
ipv6=self._play_context.accelerate_ipv6,
debug=self._play_context.verbosity,
)
connection = self._shared_loader_obj.connection_loader.get('accelerate', self._play_context, self._new_stdin)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
try:
connection._connect()
except AnsibleConnectionFailure:
display.debug('connection failed, fallback to accelerate')
res = handler._execute_module(module_name='accelerate', module_args=accelerate_args, task_vars=variables, delete_remote_tmp=False)
display.debug(res)
connection._connect()
return connection
def _get_action_handler(self, connection, templar):
'''
Returns the correct action plugin to handle the requestion task action
'''
if self._task.action in self._shared_loader_obj.action_loader:
if self._task.async != 0:
raise AnsibleError("async mode is not supported with the %s module" % self._task.action)
handler_name = self._task.action
elif self._task.async == 0:
handler_name = 'normal'
else:
handler_name = 'async'
handler = self._shared_loader_obj.action_loader.get(
handler_name,
task=self._task,
connection=connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
if not handler:
raise AnsibleError("the handler '%s' was not found" % handler_name)
return handler
| [
"[email protected]"
] | |
568bc42695dfdf14190d884be35c1c4afa43689f | d1ac66f9a935fd5515a16a1cc8d4dae0104ea0fe | /src/check_structures.py | 3c3fea7183213683e377187b6fdf992c82b42c34 | [
"MIT"
] | permissive | chemspacelab/meltingpoint | 8b1b1f5a7e6f45fee82c2e8d55db2df29c6ae0bc | e3d8eb61fcb5fa5c9c2a1a03852216e4e625a9c9 | refs/heads/master | 2020-08-30T06:08:17.739191 | 2020-04-17T11:18:32 | 2020-04-17T11:18:32 | 218,285,755 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py |
import qml
from chemhelp import cheminfo
import numpy as np
from rdkit import Chem
import sys
args = sys.argv[1:]
filename = args[0]
molobjs = cheminfo.read_sdffile(filename)
for i, molobj in enumerate(molobjs):
molobj = next(molobjs)
# stat = cheminfo.molobj_optimize(molobj)
# print(stat)
dist = Chem.rdmolops.Get3DDistanceMatrix(molobj)
np.fill_diagonal(dist, 10.0)
min_dist = np.min(dist)
if min_dist < 0.01:
print(i, min_dist)
smi = cheminfo.molobj_to_smiles(molobj)
molobj = cheminfo.conformationalsearch(smi)
dist = Chem.rdmolops.Get3DDistanceMatrix(molobj)
np.fill_diagonal(dist, 10.0)
min_dist = np.min(dist)
print(smi)
print(min_dist)
# atoms, coord = cheminfo.molobj_to_xyz(molobj)
# atoms = list(atoms)
# many_atoms = [atoms]
# mbtypes = qml.representations.get_slatm_mbtypes(many_atoms)
# rep = qml.representations.generate_slatm(coord, atoms, mbtypes)
# print(cheminfo.molobj_to_smiles(molobj))
# print(rep.mean())
| [
"[email protected]"
] | |
dae39314521542665f7fe4ce4c6605824fa4d40c | 728e57a80995d7be98d46295b780d0b433c9e62a | /src/rewriter/rewriter.gyp | 1566c15345faf46401995133dcfc423cc15a2523 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"GPL-1.0-or-later"
] | permissive | SNQ-2001/Mozc-for-iOS | 7936bfd9ff024faacfd2d96af3ec15a2000378a1 | 45b0856ed8a22d5fa6b4471548389cbde4abcf10 | refs/heads/master | 2023-03-17T22:19:15.843107 | 2014-10-04T05:48:29 | 2014-10-04T05:48:42 | 574,371,060 | 0 | 0 | Apache-2.0 | 2022-12-05T06:48:07 | 2022-12-05T06:48:06 | null | UTF-8 | Python | false | false | 4,492 | gyp | # Copyright 2010-2014, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'relative_dir': 'rewriter',
'gen_out_dir': '<(SHARED_INTERMEDIATE_DIR)/<(relative_dir)',
},
'targets': [
{
'target_name': 'rewriter',
'type': 'static_library',
'sources': [
'<(gen_out_dir)/embedded_collocation_data.h',
'<(gen_out_dir)/embedded_collocation_suppression_data.h',
'<(gen_out_dir)/emoji_rewriter_data.h',
'<(gen_out_dir)/emoticon_rewriter_data.h',
'<(gen_out_dir)/reading_correction_data.h',
'<(gen_out_dir)/single_kanji_rewriter_data.h',
'<(gen_out_dir)/symbol_rewriter_data.h',
'<(gen_out_dir)/usage_rewriter_data.h',
'calculator_rewriter.cc',
'collocation_rewriter.cc',
'collocation_util.cc',
'correction_rewriter.cc',
'command_rewriter.cc',
'date_rewriter.cc',
'dice_rewriter.cc',
'dictionary_generator.cc',
'embedded_dictionary.cc',
'emoji_rewriter.cc',
'emoticon_rewriter.cc',
'english_variants_rewriter.cc',
'focus_candidate_rewriter.cc',
'fortune_rewriter.cc',
'language_aware_rewriter.cc',
'normalization_rewriter.cc',
'number_compound_util.cc',
'number_rewriter.cc',
'remove_redundant_candidate_rewriter.cc',
'rewriter.cc',
'single_kanji_rewriter.cc',
'symbol_rewriter.cc',
'transliteration_rewriter.cc',
'unicode_rewriter.cc',
'usage_rewriter.cc',
'user_boundary_history_rewriter.cc',
'user_dictionary_rewriter.cc',
'user_segment_history_rewriter.cc',
'variants_rewriter.cc',
'version_rewriter.cc',
'zipcode_rewriter.cc',
],
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:config_file_stream',
'../composer/composer.gyp:composer',
'../config/config.gyp:character_form_manager',
'../config/config.gyp:config_handler',
'../config/config.gyp:config_protocol',
'../converter/converter_base.gyp:conversion_request',
'../converter/converter_base.gyp:immutable_converter',
'../data_manager/data_manager.gyp:user_pos_manager',
'../dictionary/dictionary.gyp:dictionary',
'../dictionary/dictionary_base.gyp:pos_matcher',
'../session/session_base.gyp:session_protocol',
'../storage/storage.gyp:storage',
'../usage_stats/usage_stats_base.gyp:usage_stats',
'calculator/calculator.gyp:calculator',
'rewriter_base.gyp:gen_rewriter_files#host',
],
'xcode_settings' : {
'SDKROOT': 'iphoneos',
'IPHONEOS_DEPLOYMENT_TARGET': '7.0',
'ARCHS': '$(ARCHS_UNIVERSAL_IPHONE_OS)',
},
'conditions':[
['target_platform=="Android"', {
'sources!': [
'<(gen_out_dir)/usage_rewriter_data.h',
'usage_rewriter.cc',
],
}],
]
},
],
}
| [
"[email protected]"
] | |
86203cbdc8524bc438ae6337aa0c9192f9d21c19 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/152687/plankton-classification-master/code/maxout/train.py | 1a1c2151cd0734ec1b1463ad74ed84d26eb51fef | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,293 | py | import os
import numpy as np
from data import load_images
from realtime_augment import RealtimeAugment
from pylearn2.datasets import preprocessing
from pylearn2.training_algorithms import sgd, learning_rule
from pylearn2.termination_criteria import EpochCounter
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix, DefaultViewConverter
from pylearn2.train import Train
from pylearn2.train_extensions import best_params
from pylearn2.termination_criteria import MonitorBased
from pylearn2.costs.mlp.dropout import Dropout
from pylearn2.models.maxout import MaxoutConvC01B, Maxout
from pylearn2.models import mlp
from pylearn2.space import Conv2DSpace
from pylearn2.utils import serial
from theano import tensor as T
from theano import function
from sklearn.utils import shuffle
from sklearn.cross_validation import StratifiedKFold
#optionally set to False to make predictions on a saved model
retrain = True
#nn params
img_dim = 48
central_window_shape = img_dim
max_epochs = 50
learn_rate = .025
batch_size = 128
momentum_start = .5
momentum_end = .9
momentum_saturate = max_epochs
decay_factor = .025 * learn_rate
decay_saturate = max_epochs
view_converter_dim = 1
axes = ['b',0,1,'c']
view_converter = DefaultViewConverter(shape=[img_dim, img_dim, view_converter_dim], axes=axes)
#image augment params
scale_diff = .2
translation = 9.
center_shape = (img_dim-2, img_dim-2)
preprocess = [preprocessing.GlobalContrastNormalization(sqrt_bias=10.,use_std=True),
preprocessing.LeCunLCN([img_dim, img_dim], batch_size=5000)]
#number of random test augmentations to predict
test_examples = 2
#convolutional layers
l1= MaxoutConvC01B(layer_name='l1',
tied_b=1,
num_channels=32, num_pieces=2, pad=0,
kernel_shape=[4,4], pool_shape=[2,2], pool_stride=[2,2],
max_kernel_norm= 1.9365, irange=.025)
l2= MaxoutConvC01B(layer_name='l2',
tied_b=1,
num_channels=64, num_pieces=2, pad=3,
kernel_shape=[4,4], pool_shape=[2,2], pool_stride=[2,2],
max_kernel_norm= 1.9365, irange=.025)
l3 = MaxoutConvC01B(layer_name='l3',
tied_b=1,
num_channels=128, num_pieces=2, pad=3,
kernel_shape=[3,3], pool_shape=[2,2], pool_stride=[2,2],
max_kernel_norm= 1.9365, irange=.025)
l4 = MaxoutConvC01B(layer_name='l4',
tied_b=1,
num_channels=128, num_pieces=2, pad=3,
kernel_shape=[3,3], pool_shape=[2,2], pool_stride=[2,2],
max_kernel_norm= 1.9365, irange=.025)
l5 = MaxoutConvC01B(layer_name='l5',
tied_b=1,
num_channels=256, num_pieces=2, pad=2,
kernel_shape=[3,3], pool_shape=[2,2], pool_stride=[2,2],
max_kernel_norm= 1.9365, irange=.025)
l6 = MaxoutConvC01B(layer_name='l6',
tied_b=1,
num_channels=256, num_pieces=2, pad=2,
kernel_shape=[3,3], pool_shape=[2,2], pool_stride=[2,2],
max_kernel_norm= 1.9365, irange=.025)
#dense layers
l7 = Maxout(layer_name='l7', num_units=1024, num_pieces=2, irange=.025)
l8 = Maxout(layer_name='l8', num_units=2048, num_pieces=2, irange=.025)
output_layer = mlp.Softmax(layer_name='y', n_classes=121, irange=.01)
layers = [l1,l2,l3,l4,l5, l6,l7, l8, output_layer]
images = []
y = []
file_names = []
dimensions = []
train_labels = [x for x in os.listdir("train") if os.path.isdir("{0}{1}{2}".format("train", os.sep, x))]
train_directories = ["{0}{1}{2}".format("train", os.sep, x) for x in train_labels]
train_labels, train_directories = list(zip(*sorted(zip(train_labels, train_directories), key=lambda x: x[0])))
for idx, folder in enumerate(train_directories):
for f_name_dir in os.walk(folder):
dir_images, fnames, dims = load_images(f_name_dir, img_dim=img_dim)
images = images + dir_images
y = y + [idx for x in dir_images]
dimensions = dimensions + dims
file_names = file_names + fnames
def to_one_hot(l):
out = np.zeros((len(l), len(set(l))))
for idx, label in enumerate(l):
out[idx, label] = 1
return out
y = to_one_hot(y)
def predict(model, X_test):
model.set_batch_size(batch_size)
m = X_test.X.shape[0]
extra = batch_size - m % batch_size
if extra > 0:
X_test.X = np.concatenate([X_test.X, np.zeros((extra, X_test.X.shape[1]), dtype=X_test.X.dtype)], axis=0)
X_m = model.get_input_space().make_theano_batch()
Y = model.fprop(X_m)
f = function([X_m], Y, allow_input_downcast=True)
p = []
for i in range(X_test.X.shape[0] / batch_size):
if i % 100 == 0:
print("predicting batch {0} of {1}".format(i, X_test.X.shape[0] / batch_size))
x_arg = X_test.X[i*batch_size:(i+1)*batch_size,:]
x_arg = X_test.get_topological_view(x_arg)
p.append(f(x_arg.astype(X_m.dtype)))
p = np.concatenate(p)
p = p[:m]
return p
images, y, file_names, dimensions = shuffle(images, y, file_names, dimensions, random_state=7)
folds = 10
fold = 0
kfold = StratifiedKFold([np.argmax(y[i]) for i in range(y.shape[0])], n_folds=folds)
for train_index, test_index in kfold:
save_path = 'valid_best_fold%d.pkl' % fold
print(save_path)
images_train = images[train_index]
y_train = y[train_index]
images_train, y_train = shuffle(images_train, y_train, random_state=7)
X_train = DenseDesignMatrix(X=images_train, y=y_train,view_converter=view_converter)
images_test = images[test_index]
y_test = y[test_index]
X_test = DenseDesignMatrix(X=images_test, y=y_test,view_converter=view_converter)
if retrain:
print("training on", X_train.X.shape, 'testing on', X_test.X.shape)
trainer = sgd.SGD(learning_rate=learn_rate, batch_size=batch_size,
learning_rule=learning_rule.Momentum(momentum_start),
cost=Dropout(
input_include_probs={'l1':1., 'l2':1., 'l3':1., 'l4':1., 'l5':1., 'l6':1.},
input_scales={'l1':1., 'l2':1., 'l3':1., 'l4':1., 'l5':1., 'l6':1.}
),
termination_criterion=EpochCounter(max_epochs=max_epochs),
monitoring_dataset={'train':X_train, 'valid':X_test},
)
input_space = Conv2DSpace(shape=(central_window_shape, central_window_shape),
axes = axes,
num_channels = 1)
ann = mlp.MLP(layers, input_space=input_space)
velocity = learning_rule.MomentumAdjustor(final_momentum=momentum_end,
start=1,
saturate=momentum_saturate)
watcher = best_params.MonitorBasedSaveBest(channel_name='valid_y_nll',
save_path=save_path)
decay = sgd.LinearDecayOverEpoch(start=1, saturate=decay_saturate, decay_factor=decay_factor)
ra = RealtimeAugment(window_shape=[img_dim, img_dim], randomize=[X_train, X_test],
scale_diff=scale_diff, translation=translation, center_shape=center_shape, center=[X_train, X_test],
preprocess=preprocess)
train = Train(dataset=X_train, model=ann, algorithm=trainer,
extensions=[watcher, velocity, decay, ra])
train.main_loop()
print("using model", save_path)
model = serial.load(save_path)
print("loading test set")
for f_name_dir in os.walk("test"):
images_test, fnames, dims_test = load_images(f_name_dir, img_dim=img_dim)
X_test = None
p_test = np.zeros((len(images_test),121), dtype=np.float32)
for example in range(test_examples):
print("creating test augmentation %d" % example)
X_train = DenseDesignMatrix(X=images_train, y=y_train,view_converter=view_converter)
X_test_ = DenseDesignMatrix(X=np.array(images_test), y=np.array((len(images_test),)), view_converter=view_converter)
ra = RealtimeAugment(window_shape=[img_dim, img_dim], randomize=[X_train, X_test_],
scale_diff=scale_diff, translation=translation, center_shape=center_shape, center=[X_train, X_test_],
preprocess=preprocess)
ra.setup(None,None,None)
preds = predict(model, X_test_)
p_test += preds
p_test /= test_examples
print("writing sub to file")
with open('sub.csv', 'w') as sub:
sub.write("image," + ",".join(train_labels) + "\n")
for idx, fname in enumerate(fnames):
p_row = p_test[idx]
sub.write("{0},{1}\n".format(fname, ",".join([str(x) for x in p_row])))
quit() | [
"[email protected]"
] | |
0cc9f793bbbacb2d71d1f077caac1470878e96ee | badb121a8c72debc539e7b9caf17b5c4cd875897 | /setup.py | d788bb0448c2f1d68b5d93911c2598c8b73aeadc | [
"MIT"
] | permissive | shlpu/DeepNeuro | 8721e1f83b30031a422e6cf112c31879edfa0feb | 6b239942589d1b05e2384019c442508f5d02beb3 | refs/heads/master | 2020-03-16T07:12:25.128910 | 2018-04-10T22:32:29 | 2018-04-10T22:32:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,506 | py | """DeepNeuro: A deep learning python package for neuroimaging data.
Created by the Quantitative Tumor Imaging Lab at the Martinos Center
(Harvard-MIT Program in Health, Sciences, and Technology / Massachussets General Hospital).
"""
DOCLINES = __doc__.split("\n")
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path
if sys.version_info[:2] < (2, 7):
raise RuntimeError("Python version 2.7 or greater required.")
setup(
name = 'deepneuro',
# packages = ['qtim_tools'], # this must be the same as the name above
version = '0.1.1',
description = DOCLINES[0],
packages = find_packages(),
entry_points = {
"console_scripts": ['segment_gbm = deepneuro.pipelines.Segment_GBM.cli:main',
'skull_strip = deepneuro.pipelines.Skull_Stripping.cli:main'],
},
author = 'Andrew Beers',
author_email = '[email protected]',
url = 'https://github.com/QTIM-Lab/DeepNeuro', # use the URL to the github repo
download_url = 'https://github.com/QTIM-Lab/DeepNeuro/tarball/0.1.1',
keywords = ['neuroimaging', 'neuroncology', 'neural networks', 'neuroscience', 'neurology', 'deep learning', 'fmri', 'pet', 'mri', 'dce', 'dsc', 'dti', 'machine learning', 'computer vision', 'learning', 'keras', 'theano', 'tensorflow', 'nfiti', 'nrrd', 'dicom'],
install_requires=['keras', 'pydicom', 'pynrrd', 'nibabel', 'numpy', 'scipy', 'scikit-image==0.12.3'],
classifiers = [],
) | [
"[email protected]"
] | |
54b5f9999727867f92c0a86ef38bb2f8502bf5aa | 56fc8fe58ec8d576ec857f19a8adc43b49e19125 | /DjangoDrf/DjangoDrf/urls.py | c75f1d5a486221814c1b64622ac12621ad0426fc | [] | no_license | Qpigzhu/Drf | 53ae3dfd7d2715ea49bbfca02ada1a9239cb25a2 | e4faa165a81abe8e641b992b6f86cc46cb01ac16 | refs/heads/master | 2022-12-13T16:30:33.868771 | 2018-12-12T02:34:11 | 2018-12-12T02:34:11 | 161,421,986 | 0 | 0 | null | 2022-12-08T01:20:24 | 2018-12-12T02:32:20 | JavaScript | UTF-8 | Python | false | false | 2,253 | py | """DjangoDrf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include,re_path
from django.views.static import serve
import xadmin
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
from rest_framework_jwt.views import obtain_jwt_token
from .settings import MEDIA_ROOT
from goods.views import GoodsList,CategoryViewset
from users.views import SmsCodeView,UserView
#使用router注册Url
router = DefaultRouter()
router.register('goods',GoodsList,base_name="GoodsList")
# 配置Category的url
router.register('categorys', CategoryViewset, base_name="categories")
#发送验证码
router.register("code",SmsCodeView,base_name="sms_code")
#用户
router.register("users",UserView,base_name="users")
urlpatterns = [
# path('admin/', admin.site.urls),
#rest_framewor的Url,方便测试
path('api-auth/', include('rest_framework.urls')),
#xadmin后台Url
path('xadmin/', xadmin.site.urls),
# 富文本相关url
path('ueditor/', include('DjangoUeditor.urls')),
# 处理图片显示的url,使用Django自带serve,传入参数告诉它去哪个路径找,我们有配置好的路径MEDIAROOT
re_path('media/(?P<path>.*)', serve, {"document_root": MEDIA_ROOT}),
#rest_framework自动化文档,1.11版本中注意此处前往不要加$符号
path('docs/',include_docs_urls(title='mtianyan生鲜超市文档')),
#食品列表
# path('goods/',GoodsList.as_view(),name="食品列表"),
#使用router注册Url
path('', include(router.urls)),
#使用jwt来登录
path('login/',obtain_jwt_token),
]
| [
"[email protected]"
] | |
4fdd0d8c6ae5710146751a450372bd9c6f5b06d7 | 1934761958bbb6082beebe887af36d7579d73fd5 | /sandbox/test_concatenation.py | e25fa9c7ed94080b29d6fbe34cc88e58924e3439 | [
"MIT"
] | permissive | sjoerdapp/tacoma | 92d16e0beb93a3fc0dd0a745bccc35e050fa5dbe | 3c63d51e2b9b021f95a6945716f50b557dd41d52 | refs/heads/master | 2020-04-10T14:01:22.713198 | 2018-12-07T17:10:09 | 2018-12-07T17:10:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | import tacoma as tc
print("===== edge_lists => edge_lists =====")
L = tc.edge_lists()
L.N = 4
L.t = [0.0,1.0,2.0]
L.tmax = 3.0
L.edges = [
[
(0,1)
],
[
(1,2), (0,2)
],
[
(0,1)
],
]
L2 = tc.edge_lists()
L2.N = 4
L2.t = [0.0,1.0,2.0]
L2.tmax = 3.0
L2.edges = [
[
(3,1)
],
[
(3,2), (0,2)
],
]
new = tc.concatenate([L,L2,L])
print(new.N)
print(new.t)
print(new.tmax)
print(new.edges)
print("===== edge_changes => edge_changes =====")
C = tc.edge_changes()
C.N = 4
C.edges_initial = [ (0,1) ]
C.t0 = 1.0
C.tmax = 3.0
C.t = [ 2.0, ]
C.edges_in = [
[
(1,2), (0,2)
],
]
C.edges_out = [
[
(0,1)
],
]
C2 = tc.edge_changes()
C2.N = 4
C2.edges_initial = [ (3,1) ]
C2.t0 = 1.0
C2.tmax = 3.0
C2.t = [ 2.0, ]
C2.edges_in = [
[
(3,2), (0,2)
],
]
C2.edges_out = [
[
(3,1)
],
]
new = tc.concatenate([C,C2,C])
print(new.N)
print(new.t0)
print(new.t)
print(new.tmax)
print(new.edges_initial)
print(new.edges_in)
print(new.edges_out)
| [
"[email protected]"
] | |
75b3a9895a158f61870ce7f6948ee8c615166ebf | ad1fc1783487a70b3b10e9d3927cd864d15a6056 | /pytablewriter/style/__init__.py | 01aa4c10be5158f44fcc371276ce1231fe78cce7 | [
"MIT"
] | permissive | Diwahars/pytablewriter | ea1d53669d7f0507c35332cb296fc9c0015473d0 | 6275405d75cb091c9e225e278b0d1230736fb9e8 | refs/heads/master | 2020-04-16T09:41:39.070414 | 2019-01-09T13:38:03 | 2019-01-09T13:38:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | # encoding: utf-8
from __future__ import absolute_import
from ._font import FontSize, FontWeight
from ._style import Align, Style, ThousandSeparator
from ._styler import (
HtmlStyler,
LatexStyler,
NullStyler,
TextStyler,
MarkdownStyler,
ReStructuredTextStyler,
)
from dataproperty import Format
| [
"[email protected]"
] | |
0f9324881116da5ccc6d95e209e3b1eca52c64cb | d0bdf444c71b724ecfd59b5bc6850962c56494cb | /labs/07-resampling_and_the_bootstrap/tests/q0_2.py | cb757a625b8c22fcb31df3b7f2d4ca35b9ab74d6 | [] | no_license | ucsd-ets/dsc10-su20-public | 10e3d0ff452b337f222baee330fe60d1465b0071 | 38787e6cc3e6210b4cc8a46350e5120845971c9f | refs/heads/master | 2022-12-13T23:28:20.512649 | 2020-09-03T19:28:06 | 2020-09-03T19:28:06 | 275,905,339 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | test = {
'hidden': False,
'name': '0.2',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> p_50 == 76
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"[email protected]"
] | |
911268cc7033fdb9e4f5dfa358b0e1e352f93e23 | ae39044997354b7270c6f35957bdd5efdcfbd2ee | /21.类.py/carse.py | c25056e3efde01e2d06ef62660cd80b401ed2c88 | [] | no_license | text007/learngit | a2a7d8c872f17103a388f77370dcd07d6eb477c9 | 6f3429ecab51f738a99b2ec6637cd21603f48ec4 | refs/heads/master | 2020-06-18T13:18:34.563100 | 2019-10-08T13:08:09 | 2019-10-08T13:08:09 | 156,345,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py |
'''一个类'''
# 导入另外一个类
from cars import Car
class Battery(): # 定义一个类
def __init__(self, battery_size = 70): # 定义一个方法,传入一个形参,如果没有传入参数值,则默认值
self.battery_size = battery_size # 初始化
def describe_battery(self): # 定义一个方法
print(str(self.battery_size)) # 打印
def get_range(self): # 定义一个方法
if self.battery_size == 70: # 当
range = 240
elif self.battery_size == 85: # 当
range = 270
message = 'T ' + str(range)
message += ' m'
print(message) # 打印
class ElectricCar(Car): # 定义子类,()中必须包含父类名称
def __init__(self, make, model, year): # 接受创建父类实例所需信息
# 初始化父类属性
# super():特殊方法;将父类与子类关联
super().__init__(make, model, year)
self.battery = Battery() # 子类包含这个属性,父类不包含
| [
"[email protected]"
] | |
4ed203fe5bd61e1e611d29326dfa8157d106a3bd | 292726345fae67a78771477e164441a716e0c22b | /setup.py | 90234c02a27b4334d60238f950aa900379015c21 | [] | no_license | anirban89/mypackage | d9bf8c7b0d2cc9dbd4ac4b5493cd650bf390f8a7 | 7b38e6db6c9e60cf2edb2b34ebe649ec3b7f0737 | refs/heads/master | 2021-01-13T02:26:17.878874 | 2014-10-02T00:29:34 | 2014-10-02T00:29:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from setuptools import setup
setup(
name = 'mypackage',
version = 0.1,
packages = ['mypackage'],
install_requires = ['numpy']
)
| [
"[email protected]"
] | |
07382111d4dd14b487f8ddc2f8632c66c9034b55 | 60d2c390736f5dce1cd0c9d4249a0ab95bdae802 | /worker/vtmis/vtmis/vtmis.py | 6c886d68c3dbbe2f79a09e4295661bcb96a4eb5f | [
"Apache-2.0"
] | permissive | tsmolka/stoq-plugins-public | d996b0be051ce0bac453af7380e7cbfecc03ff93 | a8d3351fe55fc72891c395d6767188746bf381cf | refs/heads/master | 2020-12-28T22:22:15.077514 | 2016-07-13T17:57:43 | 2016-07-13T17:57:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,148 | py | # Copyright 2014-2015 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
========
Interact with VTMIS public and private API
"""
import argparse
from queue import Queue
from threading import Thread
from datetime import timedelta
from datetime import datetime
from stoq.args import StoqArgs
from stoq.plugins import StoqWorkerPlugin
class VtmisScan(StoqWorkerPlugin):
def __init__(self):
super().__init__()
def activate(self, stoq):
self.stoq = stoq
# VTMIS API calls are not consistent, so let's map them out so the
# code can be as simple as possible. The primary key below will
# be appened to the root VTMIS API URI and the underscores ("_")
# replaced with a "/". As an example, the key "ip-address_report"
# will be translated as:
# https://www.virustotal.com/vtapi/v2/ip-address/report/
self.api_calls = {'file_report': {'key': 'resource', 'allinfo': True, 'method': 'get', 'private': False},
'file_behaviour': {'key': 'hash', 'allinfo': True, 'method': 'get', 'private': True},
'file_network-traffic': {'key': 'hash', 'allinfo': False, 'method': 'get', 'private': True},
'file_feed': {'key': 'package', 'allinfo': False, 'method': 'get', 'private': True},
'file_download': {'key': 'hash', 'allinfo': False, 'method': 'get', 'private': True},
'file_scan': {'key': False, 'allinfo': False, 'method': 'post', 'private': False},
'file_rescan': {'key': 'resource', 'allinfo': False, 'method': 'post', 'private': False},
'file_search': {'key': 'query', 'allinfo': False, 'method': 'get', 'private': True},
'file_clusters': {'key': 'date', 'allinfo': False, 'method': 'get', 'private': True},
'url_report': {'key': 'resource', 'allinfo': True, 'method': 'get', 'private': False},
'url_scan': {'key': 'url', 'allinfo': False, 'method': 'post', 'private': False},
'url_feed': {'key': 'package', 'allinfo': False, 'method': 'get', 'private': True},
'ip-address_report': {'key': 'ip', 'allinfo': False, 'method': 'get', 'private': False},
'domain_report': {'key': 'domain', 'allinfo': False, 'method': 'get', 'private': False},
'comments_get': {'key': 'resource', 'allinfo': False, 'method': 'get', 'private': True}
}
parser = argparse.ArgumentParser()
parser = StoqArgs(parser)
worker_opts = parser.add_argument_group("Plugin Options")
worker_opts.add_argument("-a", "--apikey",
dest='apikey',
help="VTMIS API Key")
worker_opts.add_argument("-r", "--resource",
dest='api_resource',
default=False,
help="VTMIS API Resource to interact with")
worker_opts.add_argument("-q", "--query",
dest='query_value',
default=False,
help="Value to query using the specified API resource")
worker_opts.add_argument("-l", "--list",
dest='list_resources',
default=False,
action='store_true',
help="List all VTMIS API resources available")
worker_opts.add_argument("-s", "--alerts",
dest='do_alerts',
default=False,
action='store_true',
help="Check for alerts via the API")
worker_opts.add_argument("-d", "--download",
dest='download_samples',
default=self.download_samples,
action='store_true',
help="Download samples from alerts and file feed")
worker_opts.add_argument("--download-path",
dest='download_path',
default=False,
help="Directory to save download samples, if supported")
worker_opts.add_argument("-c", "--feed-connector",
dest='feed_connector',
help="Connector to utilize to save original JSON feed content")
worker_opts.add_argument("-f", "--save-feed",
dest='feed_save',
default=self.feed_save,
action='store_true',
help="Save original JSON feed content")
worker_opts.add_argument("-p", "--feed-path",
dest='feed_path',
help="Directory where the feed content is saved, if supported")
worker_opts.add_argument("-m", "--max-threads",
dest='max_threads',
help="Max number of threads when processing feeds")
options = parser.parse_args(self.stoq.argv[2:])
super().activate(options=options)
if self.list_resources:
print("VTMIS API Resources Available:")
for key, value in self.api_calls.items():
print("\t- {}".format(key))
print("\nUsage: stoq-cli.py vtmis -r file_search -q 7896b9b34bdbedbe7bdc6d446ecb09d5")
print(" OR stoq-cli.py vtmis -r domain_report -q www.google.com")
exit(0)
return True
def scan(self, payload, **kwargs):
"""
Interact with public and private VTMIS API
:param **kwargs resource: VTMIS API resource to query
:param **kwargs query: Query VTMIS for a specific item
:returns: Results from specified API call
:type: dict
"""
super().scan()
results = None
resource = kwargs.get('resource', self.api_resource)
query = kwargs.get('query', self.query_value)
if not query:
if not resource:
resource = "file_report"
query = kwargs.get('sha1', None)
if resource == "alerts" or self.do_alerts:
results = self.alerts()
elif resource.endswith("_feed"):
results = []
for date in self.generate_dates(query):
results.append(self.call_api(resource, date, payload))
else:
results = self.call_api(resource, query, payload)
return results
def call_api(self, resource, query=None, payload=None):
# make sense of the API resource provided
if resource in self.api_calls:
api = self.api_calls[resource]
# Replace any _ with / so we can build a valid URL
uri = resource.replace("_", "/")
url = "{}/{}".format(self.api_url, uri)
else:
self.stoq.log.warn("Invalid API resource:{}".format(resource))
return None
# Start building the parameters of our API call
params = {'apikey': self.apikey}
# Determine what key is required, if any
if api['key']:
if query:
params[api['key']] = query
# Some API calls provide additional context, if using the private API
if api['allinfo']:
params['allinfo'] = 1
# Determine whether this API call requires a POST or GET, and whether
# whether we are uploading a file or not.
if api['method'] == 'get':
response = self.stoq.get_file(url, params=params)
elif api['method'] == 'post':
if payload:
uuid = self.stoq.get_uuid
files = {'file': (uuid, payload)}
response = self.stoq.post_file(url, files=files, params=params)
else:
response = self.stoq.post_file(url, params=params)
if resource == 'file_download':
return self.save_download(response)
elif resource.endswith("_feed"):
self.process_feed(response, resource, query)
return True
try:
return self.stoq.loads(response)
except:
return None
def alerts(self):
processed_hashes = []
ids = []
results = []
url = "{}{}{}&output=json".format(self.alerts_url,
self.alerts_uri,
self.apikey)
response = self.stoq.get_file(source=url)
alerts = self.stoq.loads(response)
# Ensure we have results, otherwise just return None
try:
alert_list = alerts['notifications']
except TypeError:
return None
for alert in alert_list:
if alert['sha1'] not in processed_hashes:
# Check to see if we need to download the file, if so, do it.
if self.download_samples:
self.call_api('file_download', query=alert['sha1'])
# Keep track of the hashes we've processed so we don't handle
# dupes
processed_hashes.append(alert['sha1'])
results.append(alert)
# Track the IDs so we can delete them when done
ids.append(alert['id'])
# Delete the alert from the feed so we don't handle it again
self.delete_alert(ids)
return results
def delete_alert(self, ids):
# Split the IDs into lists of 100, the maximum allowed per the API
delete_ids = (ids[pos:pos + 100] for pos in range(0, len(ids), 100))
# Construct the URL
url = "{}{}{}".format(self.alerts_url,
self.delete_alerts_uri,
self.apikey)
# Iterate over the lists and post the content to delete the alerts
for delete_id in delete_ids:
self.stoq.post_file(url=url, data=str(delete_id))
def save_download(self, payload, filename=None, feed=False):
if payload and self.archive_connector and not feed:
if self.download_path:
path = self.download_path
archive = False
else:
path = None
archive = True
self.connectors[self.archive_connector].save(payload,
archive=archive,
binary=True,
path=path,
filename=filename)
elif payload and self.feed_connector and feed:
self.load_connector(self.feed_connector)
self.connectors[self.feed_connector].save(payload,
archive=False,
binary=True,
path=self.feed_path,
filename=filename)
else:
self.stoq.log.error("No connector or payload defined. Unable to save payload.")
def generate_dates(self, query):
"""
Generate dates that are valid for VTMIS feeds.
"""
current_time = datetime.now()
if query.endswith("h"):
max_time = int(query[:-1]) + 1
for i in range(1, max_time):
delta = current_time - timedelta(hours=i)
yield delta.strftime("%Y%m%dT%H")
elif query.endswith("m"):
# VT recommends pulling no sooner than 5 minutes to allow for
# processing on their side. Let's take that into consideration
# when the user makes a call and automatically add 5 minutes.
max_time = int(query[:-1]) + 5
for i in range(5, max_time):
delta = current_time - timedelta(minutes=i)
yield delta.strftime("%Y%m%dT%H%M")
else:
yield query
def process_feed(self, payload, resource, query):
# Set saveresults to False as we don't return anything of use
# when handling feeds. All of the results are saved outside of the
# normal workflow.
self.saveresults = False
# Generate the filename
index = "vtmis_{}".format(resource)
filename = "{}-{}.tar.bz2".format(resource, query)
queue = Queue()
max_threads = int(self.max_threads)
for i in range(max_threads):
proc = Thread(target=self._save_feed, args=(queue, index, resource))
proc.setDaemon(True)
proc.start()
# Do we want to save the raw JSON feed that is initially downloaded?
if self.feed_save:
self.save_download(payload, filename=filename, feed=True)
self.load_extractor("decompress")
tar_files = self.extractors['decompress'].extract(payload)
for tar_file in tar_files:
raw_content = self.extractors['decompress'].extract(tar_file[1])
for content in raw_content:
lines = content[1].decode().split("\n")
compressed_filename = content[0]['filename']
self.stoq.log.info("Processing {} items from {}".format(len(lines), compressed_filename))
for line in lines:
line = self.stoq.loads(line)
queue.put(line)
queue.join()
def _save_feed(self, queue, index, resource):
while True:
try:
result = queue.get()
# Check to see if we should download each sample file
if self.download_samples and resource == 'file_feed':
file_link = result['link']
file_payload = self.stoq.get_file(file_link)
self.save_download(file_payload)
self.connectors[self.output_connector].save(result, index=index)
except Exception as err:
self.stoq.log.error("Unable to process VTMIS feed: {}".format(str(err)))
queue.put(result)
queue.task_done()
| [
"[email protected]"
] | |
f705ee4bc444b70f5af055fe8b3972fe83329f2c | 8a6c18088c50bc782df58e176663114d91ffc47c | /src/teams/migrations/0048_auto_20180814_1950.py | c59f9f8cb4135dcc034d5f69b80569efbd596119 | [
"BSD-3-Clause"
] | permissive | flummer/bornhack-website | 14cc55f34b85740d32567d6a3934e865f2549381 | c40f225f0993a6edd25dc608de1f6467f7d8e5a1 | refs/heads/master | 2020-04-29T13:23:44.167064 | 2019-05-12T15:30:01 | 2019-05-12T15:30:01 | 176,167,685 | 0 | 0 | BSD-3-Clause | 2019-03-17T22:19:27 | 2019-03-17T22:19:27 | null | UTF-8 | Python | false | false | 359 | py | # Generated by Django 2.1 on 2018-08-14 17:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('teams', '0047_taskcomment'),
]
operations = [
migrations.RenameField(
model_name='taskcomment',
old_name='content',
new_name='comment',
),
]
| [
"[email protected]"
] | |
07151db73c571183b2efe81f155b7ee8f4d0d5aa | 9b8e2992a38f591032997b5ced290fe1acc3ad94 | /untitled1.py | 5168631138eec406cbc548e97cca8ab1abb1401a | [] | no_license | girishdhegde/aps-2020 | c694443c10d0d572c8022dad5a6ce735462aaa51 | fb43d8817ba16ff78f93a8257409d77dbc82ced8 | refs/heads/master | 2021-08-08T04:49:18.876187 | 2021-01-02T04:46:20 | 2021-01-02T04:46:20 | 236,218,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | import cv2
import numpy as np
def dense():
global cap,currentframe
if denseflag==1:
x=currentframe
x1=x-100
y1=x+100
count=x1
cap.set(1,x1)
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
while(count<y1):
ret, frame2 = cap.read()
next1 = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs,next1, None, 0.5, 3, 15, 3, 5, 1.2, 0)
a=flow[0]
a=np.sum(np.square(a))
b=flow[1]
b=np.sum(np.square(b))
z=np.sqrt(a+b)
data.append([count,z])
print(count)
#cv2.imshow('frame1',frame1)
#k = cv2.waitKey(30) & 0xff
#if k == 27:
# break
prvs = next1
count+=1
List = [data[f][1] for f in range(len(data))]
high=List.index(max(List))
print(high)
cap.set(1,data[high][0])
currentframe = data[high][0]
ret, frame1 = cap.read()
cv2.destroyAllWindows()
cv2.imshow('frame',frame1)
else :
print("Check mark optical flow")
temp=[]
data=[]
def left():
global currentframe,high,List,temp,cap
if denseflag==1:
if(high!=0):
temp=[List[f] for f in range(0,high)]
high=temp.index(max(temp))
high=List.index(temp[high])
print(data[high][0])
cap.set(1,data[high][0])
currentframe = data[high][0]
ret, frame1 = cap.read()
cv2.destroyAllWindows()
cv2.imshow('frame',frame1)
else:
print("Go right")
else :
print("Check mark optical flow")
def right():
global high,List,cap,currentframe
if denseflag==1:
if(high!=199):
temp=[List[f] for f in range(high+1,200)]
high=temp.index(max(temp))
high=List.index(temp[high])
print(data[high][0])
cap.set(1,data[high][0])
currentframe = data[high][0]
ret, frame1 = cap.read()
cv2.destroyAllWindows()
cv2.imshow('frame',frame1)
else:
print("Go left")
else :
print("Check mark optical flow")
| [
"[email protected]"
] | |
5294c57910e749afa0feb0dc04adc4fd5fdc14aa | a7685d315e6616cc2b6d43587bb19ead4324fb2a | /cci_salesman/wizard/extract_participations.py | 11c7dc952971d632537da32cf14d109bdf728a5d | [] | no_license | philmervdm/modules_cci_odoo8 | 472ea68de409e876722413afdd873d6a7827744e | 603144219a86e805f7603cfafc0fb05a78166eef | refs/heads/master | 2021-01-09T20:02:58.326569 | 2017-05-06T15:45:03 | 2017-05-06T15:45:03 | 60,332,279 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,601 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import pooler
import datetime
def _club_active_participations(self, cr, uid, data, context):
club_id = data['id']
# cr.execute('SELECT p.id FROM cci_club_participation as p, cci_club_participation_state as s WHERE p.group_id = %s AND ( p.date_out is null OR p.date_out > %s ) #AND p.state_id = s.id AND s.current', (club_id, datetime.date.today() ))
# res = cr.fetchall()
# part_ids = [x[0] for x in res]
# value = {
# 'domain': [('id', 'in', part_ids)],
# 'name': 'Active Participations',
# 'view_type': 'form',
# 'view_mode': 'tree,form',
# 'res_model': 'cci_club.participation',
# 'context': {},
# 'type': 'ir.actions.act_window'
# }
# THE FOLLOWING WAY IS MORE DYNAMIC
value = {
'domain': [('group_id', '=', club_id),('state_id.current','=',True),'|',('date_out','=',False),('date_out','>',datetime.date.today().strftime('%Y-%m-%d') )],
'name': 'Active Participations',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'cci_club.participation',
'context': {},
'type': 'ir.actions.act_window'
}
return value
class wizard_club_active_participations(wizard.interface):
states = {
'init': {
'actions': [],
'result': {
'type': 'action',
'action': _club_active_participations,
'state': 'end'
}
},
}
wizard_club_active_participations("wizard_cci_club_active_participations")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
37aad6a8d126f7a2f34d49e6e6fd4bddd5f08cb1 | 7f529df5381361874d51c8cb7d8678e088dbe71d | /aea/protocols/default/__init__.py | 52e51b51e36ef3b50d6e1ccddf33b0782f9c9c80 | [
"Apache-2.0"
] | permissive | cyenyxe/agents-aea | 914546708ce3e2e913ce1bb48bc8928289738c9a | c2aec9127028ae13def3f69fbc80d35400de1565 | refs/heads/master | 2021-01-07T05:36:27.879856 | 2020-02-07T19:28:01 | 2020-02-07T19:28:01 | 241,594,907 | 0 | 0 | Apache-2.0 | 2020-03-05T14:53:54 | 2020-02-19T10:35:49 | null | UTF-8 | Python | false | false | 872 | py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the support resources for the default protocol."""
| [
"[email protected]"
] | |
125494d60a6fda1d0624163876564440784178ed | f8666599b83d34c861651861cc7db5b3c434fc87 | /plotly/validators/carpet/baxis/tickformatstop/_name.py | 8ff0b7a47aa7b5fed8b166c536845b675ff38361 | [
"MIT"
] | permissive | mode/plotly.py | 8b66806e88c9f1820d478bab726f0bea81884432 | c5a9ac386a40df2816e6c13264dadf14299401e4 | refs/heads/master | 2022-08-26T00:07:35.376636 | 2018-09-26T19:08:54 | 2018-09-26T19:19:31 | 60,372,968 | 1 | 1 | MIT | 2019-11-13T23:03:22 | 2016-06-03T19:34:55 | Python | UTF-8 | Python | false | false | 492 | py | import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='name',
parent_name='carpet.baxis.tickformatstop',
**kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| [
"[email protected]"
] | |
d15d67d65624be46de1b72401f49d991cdb8c86e | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-projectman/huaweicloudsdkprojectman/v4/model/batch_delete_iterations_v4_request.py | 49a9950000cd6af7e35e6cdf50daf4cd5933de11 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,126 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchDeleteIterationsV4Request:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'project_id': 'str',
'body': 'BatchDeleteIterationsV4RequestBody'
}
attribute_map = {
'project_id': 'project_id',
'body': 'body'
}
def __init__(self, project_id=None, body=None):
"""BatchDeleteIterationsV4Request
The model defined in huaweicloud sdk
:param project_id: devcloud项目的32位id
:type project_id: str
:param body: Body of the BatchDeleteIterationsV4Request
:type body: :class:`huaweicloudsdkprojectman.v4.BatchDeleteIterationsV4RequestBody`
"""
self._project_id = None
self._body = None
self.discriminator = None
self.project_id = project_id
if body is not None:
self.body = body
@property
def project_id(self):
"""Gets the project_id of this BatchDeleteIterationsV4Request.
devcloud项目的32位id
:return: The project_id of this BatchDeleteIterationsV4Request.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this BatchDeleteIterationsV4Request.
devcloud项目的32位id
:param project_id: The project_id of this BatchDeleteIterationsV4Request.
:type project_id: str
"""
self._project_id = project_id
@property
def body(self):
"""Gets the body of this BatchDeleteIterationsV4Request.
:return: The body of this BatchDeleteIterationsV4Request.
:rtype: :class:`huaweicloudsdkprojectman.v4.BatchDeleteIterationsV4RequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchDeleteIterationsV4Request.
:param body: The body of this BatchDeleteIterationsV4Request.
:type body: :class:`huaweicloudsdkprojectman.v4.BatchDeleteIterationsV4RequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchDeleteIterationsV4Request):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
740028a049c72a4eb04c08359edfac9f378d6525 | c0340c511cff5b40b4681c4d3238d807624c0323 | /models/corpus_reader/corpusIterator.py | 9ae01c2a3c9b76ef214d4727d895cd74c0b05141 | [] | no_license | m-hahn/grammar-optim | 5fa7ade47d2ad91f517c887ee2c65af24059069d | 07a1a80692a504bcafc8120a21c4dc9066b495ee | refs/heads/master | 2022-08-30T06:54:42.749264 | 2022-08-05T12:09:28 | 2022-08-05T12:09:28 | 156,456,167 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,334 | py | import os
import random
import sys
header = ["index", "word", "lemma", "posUni", "posFine", "morph", "head", "dep", "_", "_"]
def readUDCorpus(language, partition):
basePaths = ["/u/scr/mhahn/grammar-optim_ADDITIONAL/corpora/"]
files = []
while len(files) == 0:
if len(basePaths) == 0:
print("No files found")
raise IOError
basePath = basePaths[0]
del basePaths[0]
files = os.listdir(basePath)
files = list(filter(lambda x:x.startswith("UD_"+language), files))
data = []
for name in files:
if "Sign" in name:
print("Skipping "+name)
continue
assert ("Sign" not in name)
if "Chinese-CFL" in name:
print("Skipping "+name)
continue
suffix = name[len("UD_"+language):]
subDirectory =basePath+"/"+name
subDirFiles = os.listdir(subDirectory)
partitionHere = partition
candidates = list(filter(lambda x:"-ud-"+partitionHere+"." in x and x.endswith(".conllu"), subDirFiles))
if len(candidates) == 0:
print("Did not find "+partitionHere+" file in "+subDirectory)
continue
if len(candidates) == 2:
candidates = list(filter(lambda x:"merged" in x, candidates))
assert len(candidates) == 1, candidates
try:
dataPath = subDirectory+"/"+candidates[0]
with open(dataPath, "r") as inFile:
newData = inFile.read().strip().split("\n\n")
assert len(newData) > 1
data = data + newData
except IOError:
print("Did not find "+dataPath)
assert len(data) > 0, (language, partition, files)
print >> sys.stderr, "Read "+str(len(data))+ " sentences from "+str(len(files))+" "+partition+" datasets."
return data
class CorpusIterator():
def __init__(self, language, partition="train", storeMorph=False, splitLemmas=False, shuffleData=True, shuffleDataSeed=None, splitWords=False):
assert not splitLemmas
self.splitLemmas = splitLemmas
self.splitWords = splitWords
assert not self.splitWords
self.storeMorph = storeMorph
data = readUDCorpus(language, partition)
if shuffleData:
if shuffleDataSeed is None:
random.shuffle(data)
else:
random.Random(shuffleDataSeed).shuffle(data)
self.data = data
self.partition = partition
self.language = language
assert len(data) > 0, (language, partition)
def permute(self):
random.shuffle(self.data)
def length(self):
return len(self.data)
def processSentence(self, sentence):
sentence = list(map(lambda x:x.split("\t"), sentence.split("\n")))
result = []
for i in range(len(sentence)):
# print sentence[i]
if sentence[i][0].startswith("#"):
continue
if "-" in sentence[i][0]: # if it is NUM-NUM
continue
if "." in sentence[i][0]:
continue
sentence[i] = dict([(y, sentence[i][x]) for x, y in enumerate(header)])
sentence[i]["head"] = int(sentence[i]["head"])
sentence[i]["index"] = int(sentence[i]["index"])
sentence[i]["word"] = sentence[i]["word"].lower()
if self.splitLemmas:
sentence[i]["lemmas"] = sentence[i]["lemma"].split("+")
if self.storeMorph:
sentence[i]["morph"] = sentence[i]["morph"].split("|")
if self.splitWords:
sentence[i]["words"] = sentence[i]["word"].split("_")
sentence[i]["dep"] = sentence[i]["dep"].lower()
if self.language == "LDC2012T05" and sentence[i]["dep"] == "hed":
sentence[i]["dep"] = "root"
if self.language == "LDC2012T05" and sentence[i]["dep"] == "wp":
sentence[i]["dep"] = "punct"
result.append(sentence[i])
# print sentence[i]
return result
def getSentence(self, index):
result = self.processSentence(self.data[index])
return result
def iterator(self, rejectShortSentences = False):
for sentence in self.data:
if len(sentence) < 3 and rejectShortSentences:
continue
yield self.processSentence(sentence)
| [
"[email protected]"
] | |
27c7fe3ed3a9f243315dd8256f5390ab76485e06 | e81576012330e6a6024d14f3e241f88ca34b73cd | /python_code/vnev/Lib/site-packages/jdcloud_sdk/services/mps/apis/GetNotificationRequest.py | 49630052bdeca358968876ff0e2634d9dd20ad88 | [
"MIT"
] | permissive | Ureimu/weather-robot | eba6a84147755aa83c941a306bac1a7c4e95e23e | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | refs/heads/master | 2021-01-15T07:23:42.274413 | 2020-03-23T02:30:19 | 2020-03-23T02:30:19 | 242,912,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class GetNotificationRequest(JDCloudRequest):
"""
获取媒体处理通知
"""
def __init__(self, parameters, header=None, version="v1"):
super(GetNotificationRequest, self).__init__(
'/regions/{regionId}/notification', 'GET', header, version)
self.parameters = parameters
class GetNotificationParameters(object):
def __init__(self, regionId, ):
"""
:param regionId: region id
"""
self.regionId = regionId
| [
"[email protected]"
] | |
578ab4564ac917e59f31823eb1d6cfb9f28fc608 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_21885.py | 8917a8ab7563dc2340287d482527305078ed27d8 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | # Making a call to a Chef server from Windows PE
> perl Configure VC-WIN32 no-asm no-shared
> ms\do_ms
> nmake -f ms\ntdll.mak
| [
"[email protected]"
] | |
4fcebeee0e3e255674d7dfda68f199803b482d48 | c7e9ec5ce6627f6f68bab1b86a27a4516595154d | /maintenance/06migratedeletes.py | c9d6b4ad448306f6ddbcd4e2633be4ec2113805c | [] | no_license | michaelcrubenstein/consentrecords | 7b79e82c9ad4b5efcfbb44a50ff1d4cadf7180e2 | 992fe78c68d1d5c083f9e2cc0e3e9aa24363b93d | refs/heads/master | 2021-01-23T19:28:13.807809 | 2018-07-03T16:10:34 | 2018-07-03T16:10:34 | 41,223,029 | 1 | 1 | null | 2018-07-03T16:10:35 | 2015-08-22T20:21:26 | JavaScript | UTF-8 | Python | false | false | 1,765 | py | # Migrate translation objects to translation types.
import datetime
import django
import tzlocal
import getpass
import sys
from django.db import transaction
from django.contrib.auth import authenticate
from django.db.models import F
from django.db.models import Count
from consentrecords.models import TransactionState, Terms, Instance, Value, DeletedValue, DeletedInstance
from consentrecords.models import UserInfo, NameList
from consentrecords.models import AccessRecord
from consentrecords import pathparser
if __name__ == "__main__":
django.setup()
timezoneoffset = -int(tzlocal.get_localzone().utcoffset(datetime.datetime.now()).total_seconds()/60)
if len(sys.argv) > 1:
username = sys.argv[1]
else:
username = input('Email Address: ')
password = getpass.getpass("Password: ")
user = authenticate(username=username, password=password)
if not user:
raise ValueError("user was not authenticated")
with transaction.atomic():
transactionState = TransactionState(user, timezoneoffset)
Terms.initialize(transactionState)
i = Instance.objects.filter(deletedinstance__isnull=False).count()
j = Value.objects.filter(deletedvalue__isnull=False).count()
for x in Instance.objects.filter(deletedinstance__isnull=False):
x.deleteTransaction = x.deletedinstance.transaction
x.save()
for x in Value.objects.filter(deletedvalue__isnull=False):
x.deleteTransaction = x.deletedvalue.transaction
x.save()
print("migrate %s instances" % i)
print("migrate %s values" % j)
input('Confirm transaction: ')
print("Complete.")
| [
"[email protected]"
] | |
0c35e06c4e5be85693d075e16977c37d18936c4b | 14aab11a9bd38acaaf3ed959ce736a3e1f1e3bad | /contrast/4/p4/mininet/delay.py | dc4aa0dd140d190cd83feac0ad77c10c8e299be3 | [] | no_license | chenyuchuting0912/SwitchML | 4eae7d3a3f40c93156ebf039e34df67df430c286 | d24ee879b3feadf308b4fdf52d090d0d21d1ee80 | refs/heads/master | 2020-06-03T17:41:13.993330 | 2020-01-09T02:39:47 | 2020-01-09T02:39:47 | 191,668,879 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | #!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSController
from mininet.node import CPULimitedHost, Host, Node
from mininet.node import OVSKernelSwitch, UserSwitch
from mininet.node import IVSSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel, info
from mininet.link import TCLink, Intf
from subprocess import call
def myNetwork():
net = Mininet( topo=None,
build=False,
ipBase='10.0.0.0/8')
info( '*** Adding controller\n' )
info( '*** Add switches\n')
s1 = net.addSwitch('s1', cls=OVSKernelSwitch)
info( '*** Add hosts\n')
h2 = net.addHost('h2', cls=Host, ip='10.0.0.2', defaultRoute=None)
h3 = net.addHost('h3', cls=Host, ip='10.0.0.3', defaultRoute=None)
h1 = net.addHost('h1', cls=Host, ip='10.0.0.1', defaultRoute=None)
info( '*** Add links\n')
h3s1 = {'bw':10,'delay':'5ms','loss':0,'max_queue_size':1000}
net.addLink(h3, s1, cls=TCLink , **h3s1)
h1s1 = {'bw':10,'delay':'5ms','loss':0,'max_queue_size':1000}
net.addLink(h1, s1, cls=TCLink , **h1s1)
h2s1 = {'bw':10,'delay':'5ms','loss':0,'max_queue_size':1000}
net.addLink(h2, s1, cls=TCLink , **h2s1)
info( '*** Starting network\n')
net.build()
info( '*** Starting controllers\n')
for controller in net.controllers:
controller.start()
info( '*** Starting switches\n')
net.get('s1').start([])
info( '*** Post configure switches and hosts\n')
CLI(net)
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
myNetwork()
| [
"[email protected]"
] | |
045862281c288c88f92463538e884c0427ee8453 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/choropleth/_reversescale.py | 8af580fb6748504b61ef7f5ef3b1ef25f5da02e4 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 431 | py | import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name='reversescale', parent_name='choropleth', **kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='style',
**kwargs
)
| [
"[email protected]"
] | |
1f84f12505ff59602d122001d005785a8f5bce8d | adbedf9626c52748aa048f2b17c18d25262b4d56 | /robot_framework_baseline_comparator/BaselineComparator/html_baseline.py | 34eefcebeb0891f7888d2198c466c78c88472e5c | [] | no_license | sanjitroy1992/robot_framework_custom_libraries | 3ef91ea6d4705215f86c83d276d67ce7c5af673a | e5fde8f428a4d46d5cacb2c5369f9c59529f5c91 | refs/heads/master | 2022-11-06T09:11:02.148601 | 2020-06-29T09:35:46 | 2020-06-29T09:35:46 | 274,330,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,893 | py |
from Libraries.Common.BaselineComparator.HTMLTBodyComparator import HTMLBodyComparator
from Libraries.Common.BaselineComparator.HTMLTBodyComparator import HTMLFooterComparator
from Libraries.Common.BaselineComparator.HTMLTBodyComparator import HTMLHeaderComparator
from Libraries.Utilities import ReportBuilder
from itertools import zip_longest
import lxml.html
class HTMLBaseline(object):
def __init__(self):
self.success = True
self.error_msgs = set()
@staticmethod
def convert_csv_to_html(header, body):
table = lxml.html.fromstring('<table>')
header_lxml = HTMLBaseline._list_to_tbody(header)
body_lxml = HTMLBaseline._list_to_tbody(body)
table.append(header_lxml)
table.append(body_lxml)
return lxml.html.tostring(table, encoding='unicode')
@staticmethod
def _list_to_tbody(list_of_rows):
tbody = lxml.html.fromstring('<tbody>')
for row in list_of_rows:
tr = lxml.html.fromstring('<tr>')
for cell_text in row:
td = lxml.html.fromstring('<td>')
if cell_text is None:
td.text = "missed_or_extra_cell"
else:
td.text = cell_text.strip()
tr.append(td)
tbody.append(tr)
return tbody
def _open_file(self, baseline_file):
with open(baseline_file, "rb") as f:
return f.read()
def compare_html_pivot_baseline_to_application(self, baseline, app):
self.success = True
self.error_msgs = set()
report = ReportBuilder()
tbody, thead = self._prepare_table_for_pivot(report)
app, baseline = self._prepere_lxml_baseline_and_app(app, baseline)
self._compare_pivot_upper_part(app, baseline, report, thead)
self._compare_pivot_lower_part(app, baseline, report, tbody)
return self.success, self.error_msgs
def _compare_pivot_lower_part(self, app, baseline, report, tbody):
for row_left, row_right in zip_longest(self._compare_pivot_part(app, baseline, report, 3),
self._compare_pivot_part(app, baseline, report, 4)):
self._merge_pivot_parts(report, row_left, row_right, tbody)
def _merge_pivot_parts(self, report, row_left, row_right, tbody):
if row_left is None:
row_left = report.create_row()
td = report.create_td()
row_left.append(td)
right_tds = row_right.getchildren()
for td in right_tds:
row_left.append(td)
tbody.append(row_left)
def _compare_pivot_upper_part(self, app, baseline, report, thead):
for row_left, row_right in zip_longest(self._compare_pivot_part(app, baseline, report, 1),
self._compare_pivot_part(app, baseline, report, 2)):
self._merge_pivot_parts(report, row_left, row_right, thead)
def _prepere_lxml_baseline_and_app(self, app, baseline):
baseline = lxml.html.fromstring(baseline)
app = lxml.html.fromstring(app)
for tr in baseline.xpath(r".//tr[not(.//td[text()])]"):
tr.drop_tree()
for tr in app.xpath(r".//tr[not(.//td[text()])]"):
tr.drop_tree()
return app, baseline
def _prepare_table_for_pivot(self, report):
table = report.create_table()
thead = report.create_thead()
tbody = report.create_tbody()
table.append(thead)
table.append(tbody)
return tbody, thead
def _compare_pivot_part(self, app, baseline, report, index):
baseline_lxml = baseline.xpath('.//td[@id="no-boarder{}"]'.format(index))[0]
application_lxml = app.xpath('.//td[@id="no-boarder{}"]'.format(index))[0]
body_comparator = HTMLBodyComparator(baseline_lxml, application_lxml)
comparison = list(body_comparator.icompare())
self.success = self.success and body_comparator.success
self.error_msgs.update(body_comparator.error_msgs)
return comparison
def compare_html_baseline_to_app(self, baseline, app, skip_columns_names=None, sort_column_names=None,
key_column_names=None, missing_columns=None):
table_comparison = self._compare_table_to_baseline(baseline, app, skip_columns_names,
sort_column_names, key_column_names, missing_columns)
print("*HTML* {}".format(table_comparison))
return self.success, self.messages
def _compare_table_to_baseline(self, baseline, app, skip_columns_names=None, sort_column_names=None,
key_column_names=None, missing_columns=[]):
report = ReportBuilder()
table = report.create_table()
app, baseline = self._prepere_lxml_baseline_and_app(app, baseline)
baseline_header = [i.text_content() for i in
baseline.xpath(".//tbody")[0].xpath(".//tr")[0].xpath(".//td[not(@id)]")]
if missing_columns:
missing_columns_index = [baseline_header.index(i) for i in missing_columns]
else:
missing_columns_index = None
header_comparator = HTMLHeaderComparator(self._get_tbody_by_index(baseline, 0),
self._get_tbody_by_index(app, 0),
skip_columns_names,
key_column_names
)
body_comparator = HTMLBodyComparator(self._get_tbody_by_index(baseline, 1),
self._get_tbody_by_index(app, 1),
header_comparator.get_columns_indexes(skip_columns_names),
header_comparator.get_columns_indexes(sort_column_names),
header_comparator.get_columns_indexes(key_column_names),
baseline_header,
missing_columns_index
)
footer_comparator = HTMLFooterComparator(self._get_tbody_by_index(baseline, 2),
self._get_tbody_by_index(app, 2),
header_comparator.get_columns_indexes(skip_columns_names)
)
header_comparison = header_comparator.compare()
body_comparison, unique_key_found = body_comparator.compare()
if unique_key_found:
header_comparator = HTMLHeaderComparator(self._get_tbody_by_index(baseline, 0),
self._get_tbody_by_index(app, 0),
skip_columns_names,
key_column_names=unique_key_found
)
header_comparison = header_comparator.compare()
footer_comparison = footer_comparator.compare()
table.append(header_comparison)
table.append(body_comparison)
table.append(footer_comparison)
self.success = all([header_comparator.success, body_comparator.success, footer_comparator.success])
self.messages = set()
self.messages.update(header_comparator.error_msgs, body_comparator.error_msgs, footer_comparator.error_msgs)
return report
@staticmethod
def _get_tbody_by_index(app, index):
try:
app_part = app.xpath(".//tbody")[index]
except IndexError:
app_part = lxml.html.fromstring("<p></p>")
return app_part
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.