blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fcd760062115725444cb9e272a9278e304d34a66 | 83924510277a85ab7719598877f1cc56741854d3 | /qatrack/notifications/service_log_scheduling/tasks.py | 1fe758d5a5cd4ff71c7b2b8867ad29e161950829 | [
"MIT"
] | permissive | tguiot/qatrackplus | 3cc07ed6320c9f92a2d848e3429c89f2f0051712 | c587cb2ddbfbc116a3ce5124537b2160af09d8e1 | refs/heads/master | 2023-06-01T01:30:53.619027 | 2021-07-02T10:35:52 | 2021-07-02T10:35:52 | 381,009,929 | 0 | 0 | NOASSERTION | 2021-07-02T10:35:53 | 2021-06-28T11:39:21 | null | UTF-8 | Python | false | false | 2,897 | py | import logging
from django.conf import settings
from django.utils import timezone
from django_q.models import Schedule
from django_q.tasks import schedule
from qatrack.notifications.models import ServiceEventSchedulingNotice
from qatrack.qatrack_core.email import send_email_to_users
from qatrack.qatrack_core.tasks import run_periodic_scheduler
logger = logging.getLogger('django-q')
def run_scheduling_notices():
run_periodic_scheduler(
ServiceEventSchedulingNotice,
"run_service_log_scheduling_notices",
schedule_service_event_scheduling_notice,
time_field="time",
recurrence_field="recurrences",
)
def schedule_service_event_scheduling_notice(notice, send_time):
logger.info("Service Event Scheduling notification %s for %s" % (notice.pk, send_time))
name = "Send notification %d %s" % (notice.pk, send_time.isoformat())
schedule(
"qatrack.notifications.service_log_scheduling.tasks.send_scheduling_notice",
notice.id,
name,
name=name,
schedule_type=Schedule.ONCE,
repeats=1,
next_run=send_time,
task_name=name,
)
def send_scheduling_notice(notice_id, task_name=""):
notice = ServiceEventSchedulingNotice.objects.filter(id=notice_id).first()
if notice:
if not notice.send_required():
logger.info("Send of ServiceEventSchedulingNotice %s requested, but no Service Event Schedules to notify about" % notice_id) # noqa: E501
return
recipients = notice.recipients.recipient_emails()
if not recipients:
logger.info("Send of ServiceEventSchedulingNotice %s requested, but no recipients" % notice_id)
return
else:
logger.info("Send of ServiceEventSchedulingNotice %s requested, but no such ServiceEventSchedulingNotice exists" % notice_id) # noqa: E501
return
try:
send_email_to_users(
recipients,
"service_log_scheduling/email.html",
context={'notice': notice},
subject_template="service_log_scheduling/subject.txt",
text_template="service_log_scheduling/email.txt",
)
logger.info("Sent ServiceEventSchedulingNotice %s at %s" % (notice_id, timezone.now()))
try:
Schedule.objects.get(name=task_name).delete()
except: # noqa: E722 # pragma: nocover
logger.exception("Unable to delete Schedule.name = %s after successful send" % task_name)
except: # noqa: E722 # pragma: nocover
logger.exception(
"Error sending email for ServiceEventSchedulingNotice %s at %s." % (notice_id, timezone.now())
)
fail_silently = getattr(settings, "EMAIL_FAIL_SILENTLY", True)
if not fail_silently:
raise
finally:
notice.last_sent = timezone.now()
notice.save()
| [
"[email protected]"
] | |
c5c2760d2fc07bd602141fc4bf0326bacc3903b6 | 1b2369715f47c9276f3dd458541d0b62cf5ba237 | /core/templator.py | e520946dd891c205f610bf01a342298ab7428e33 | [] | no_license | Virucek/gb_framework | 5a68cdf4f09867db3704ec589e937ddbe68b27f0 | 50893554c80583243ed301ab52e4bc46875ad241 | refs/heads/main | 2023-02-13T14:01:57.808400 | 2021-01-04T22:20:07 | 2021-01-04T22:20:07 | 319,729,864 | 0 | 0 | null | 2021-01-04T22:20:20 | 2020-12-08T18:44:10 | Python | UTF-8 | Python | false | false | 630 | py | import os
from jinja2 import Template, Environment, FileSystemLoader
def render(template_name, folder='templates', **kwargs):
# file = os.path.join('templates', template_name)
# with open(file, encoding='utf-8') as file:
# template = Template(file.read())
env = Environment()
env.loader = FileSystemLoader(folder)
template = env.get_template(template_name)
if 'context' in kwargs: # Если в качестве аргумент был передан context - используется именно он.
return template.render(**kwargs['context'])
return template.render(**kwargs)
| [
"[email protected]"
] | |
e2d5e9208c5d6b345a80365e423c67dd11e07d48 | 4f510470b3093ab2c60f929221af82c79b121ca7 | /ML/SCIENCE/day07/bi.py | c23037e4121ee3dd657696790e8fe62a76e9e0f4 | [] | no_license | q737645224/python3 | ce98926c701214f0fc7da964af45ba0baf8edacf | 4bfabe3f4bf5ba4133a16102c51bf079d500e4eb | refs/heads/master | 2020-03-30T07:11:17.202996 | 2018-10-30T06:14:51 | 2018-10-30T06:14:51 | 150,921,088 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as mp
outcomes = np.random.binomial(9, 0.5, 10000)
chips = [1000]
for outcome in outcomes:
if outcome >= 5:
chips.append(chips[-1] + 1)
else:
chips.append(chips[-1] - 1)
chips = np.array(chips)
mp.figure('Binomial', facecolor='lightgray')
mp.title('Binomial', fontsize=20)
mp.xlabel('Round', fontsize=14)
mp.ylabel('Chip', fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
# 根据最后筹码的数量,决定线条颜色
if chips[-1] < chips[0]:
color = 'limegreen'
elif chips[-1] > chips[0]:
color = 'orangered'
else:
color = 'dodgerblue'
mp.plot(chips, c=color, label='Chip')
mp.legend()
mp.show()
| [
"[email protected]"
] | |
1126c2887c7bf5192117f4eaef69b0120a23243f | 29b1b15e4fef90717ff7bf8b13ab9a23cdc17c51 | /postsproject/testapp/views.py | cc8ee2c2ba5629d911722b4ea30e6bdf9aaf35a5 | [] | no_license | deepawalekedar319/DjangoProjects | 93fe59812593a1e1b8f542c8c5b1642bc95f6da4 | 1780b703a3022ea17dc188ad98b0f17bb14fa12f | refs/heads/main | 2023-09-03T04:48:21.201822 | 2021-11-08T05:28:00 | 2021-11-08T05:28:00 | 425,706,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | from django.shortcuts import render , get_object_or_404
from testapp.models import Posts
def post_view(request):
post_list = Posts.objects.all()
return render(request,'testapp/posts_list.html',{'post_list':post_list})
def post_detail_view(request , year , month , day , post):
post = get_object_or_404(post , slug = post , status = 'published' , publish__year = year, publish__month = month, publish__day = day)
return render(request , 'testapp/post_detail.html',{'post':post})
| [
"[email protected]"
] | |
06f63f7eb1f9eb33e61b437d376faf87f4639603 | a4191cc76c1d733c58bbb6692a75b0885bb74e13 | /Control-Planning-master/Planning/planning_tools/carla_global_path.py | 35d4fa8e7156ab76d6e1b702c5d87efd4b43ee7e | [] | no_license | js7850/sonjunseong | 69173d67e34ce2085d2e0617fbefa02cbc6676b5 | 0d8bb7c87fac07634abd4b002f1111108b42e939 | refs/heads/main | 2023-07-16T15:28:00.459349 | 2021-08-28T13:08:19 | 2021-08-28T13:08:19 | 400,545,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | import rospy
from sensor_msgs.msg import JointState
from geometry_msgs.msg import PoseStamped
import csv
import matplotlib
import threading
import math
rospy.init_node("carla_glodal_path")
class publisher:
def __init__(self):
self.x=0
self.y=0
self.nodes = []
self.node_x = []
self.node_y = []
self.path_x = []
self.path_y = []
self.hz = rospy.Rate(20)
self.get_global_path()
self.path_pub = rospy.Publisher("/hdmap/path", JointState, queue_size = 1)
self.utm_sub = rospy.Subscriber("utm_fix", PoseStamped, self.utm_CallBack)
self.id = 0
self.len = 30
Th1 = threading.Thread(target=self.publishing)
Th1.daemon = True
Th1.start()
rospy.spin()
def utm_CallBack(self, data):
self.x = data.pose.position.x
self.y = data.pose.position.y
def get_global_path(self):
# name =raw_input("input carla path :")
name = "E6_path1.csv"
with open(name, "r") as f:
reader = csv.reader(f, delimiter = ',')
for rows in reader:
x = float(rows[0])
y = float(rows[1])
self.nodes.append([x,y])
def publishing(self):
while self.x==0 or self.y==0:
rospy.loginfo("Waiting for start")
while not rospy.is_shutdown():
id = 0
msg = JointState()
for i in self.nodes:
#print( math.sqrt((i[0]-self.x)**2+(i[1] - self.y)**2), i, self.x, self.y)
if math.sqrt((i[0]-self.x)**2+(i[1] - self.y)**2) < 5:
self.id = id
break
id+=1
else:
rospy.loginfo("Path is gone")
continue
k=2
for i in range(self.id+k, self.id + self.len+k):
x = 0
y = 0
try:
x = self.nodes[i][0]
y = self.nodes[i][1]
except:
rospy.loginfo("# WARNING: Path end")
break
msg.position.append(x)
msg.velocity.append(y)
rospy.loginfo("publishing {}".format(self.id))
rospy.sleep(0.05)
self.path_pub.publish(msg)
if __name__ == "__main__":
a = publisher()
| [
"[email protected]"
] | |
c139c36375ea4de3c9eed6ff3bcc1abc1ea29fd7 | e1ef59f60ecc011305e50d12f3fa480937b61e34 | /Problem Solving/Implementations/Utopian Tree.py | 8c3cddf1addfff4bddbb325756151b9b8708273f | [] | no_license | prashanthr11/HackerRank | 7ef3c32c3b697f54880fcd5a607245d313b12e05 | 2a01cb28f2f1a8ef616026a126d95bc9e76dd903 | refs/heads/master | 2021-07-08T21:17:13.273389 | 2020-12-27T19:00:25 | 2020-12-27T19:00:25 | 222,383,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the utopianTree function below.
def utopianTree(n):
height = 1
for i in range(n):
if i % 2:
height += 1
else:
height *= 2
return height
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = utopianTree(n)
fptr.write(str(result) + '\n')
fptr.close()
| [
"[email protected]"
] | |
868926954c42f7c803e11a7d7309f3de7949bd8e | f2b31c29d30959ab484187ed5754552a644c0256 | /setup.py | 847e732192b6e4fa4a48c36cf0cb2976f239929d | [
"Apache-2.0"
] | permissive | pkimber/old_moderate | d385dd679909eaf3249204f39626a7f711465cc9 | 761233f9a9c660026f5197d0b5812bf5db28afbe | refs/heads/master | 2016-09-05T15:34:53.960675 | 2014-01-07T14:34:53 | 2014-01-07T14:34:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | import os
from distutils.core import setup
def read_file_into_string(filename):
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
def get_readme():
for name in ('README', 'README.rst', 'README.md'):
if os.path.exists(name):
return read_file_into_string(name)
return ''
setup(
name='pkimber-moderate',
packages=['moderate', 'moderate.tests', 'moderate.management', 'moderate.management.commands'],
version='0.0.06',
description='Moderate',
author='Patrick Kimber',
author_email='[email protected]',
url='[email protected]:pkimber/moderate.git',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Office/Business :: Scheduling',
],
long_description=get_readme(),
) | [
"[email protected]"
] | |
911812232fc6d726abaf7d55d5532bc31478dddd | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/andyyang/titanic-survival-project/titanic-survival-project.py | 7c134a9a6ff8bbf40ffc7c24c0e56fa07c3b4c8e | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,700 | py | #!/usr/bin/env python
# coding: utf-8
# ## Introduction
# The Data set I analyzed is **Titanic Data**.
# At first we need understand data, then ask question.
# We can find the descriptions of this csv file.
# These are definitions of variables.
#
# VARIABLE DESCRIPTIONS:
# survival Survival
# (0 = No; 1 = Yes)
# pclass Passenger Class
# (1 = 1st; 2 = 2nd; 3 = 3rd)
# name Name
# sex Sex
# age Age
# sibsp Number of Siblings/Spouses Aboard
# parch Number of Parents/Children Aboard
# ticket Ticket Number
# fare Passenger Fare
# cabin Cabin
# embarked Port of Embarkation
# (C = Cherbourg; Q = Queenstown; S = Southampton)
#
# ## Ask Question
# Then we can ask questions.
# As kaggle suggested some groups of people more likely to survive, like **children, women, and the upper-class.**
# So I will ask, **Is these factors really relate to the survive rate?**
# **Add: Do different Sex in same class have different survive rate ?**
# **Or same Sex have different survive rate in different class?**
#
# And more, when I seach the structure and cabins' location of titanic online,
# I find the factor of cabin may also connect to the survive rate,
# such as some cabins is far from boat deck,and living with crowd of people.
# Therefore, I will ask, ~~**Will people living in different cabin have different survive rate?**~~
# **Revise: What is connection between fare and survive rate?**
#
# Let's wrangle data.
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'pylab inline')
# Get a glimpse of data
titanic_df = pd.read_csv('../input/train.csv')
titanic_df.head()
# In[ ]:
# Check the information of our data
titanic_df.info()
# ## Revision Question
# As we see, unfortunately, there are too few data about the cabins.
# And some of them even have several cabins' name in it.
# We need change question, or consider a way to solve it.
#
# At first, I try to solve it.
# As different class people will live in the different area and different room. Like [there](http://www.dummies.com/education/history/titanic-facts-the-layout-of-the-ship/) said.
# And for different class, the ticket price is also [differernt](http://www.dummies.com/education/history/suites-and-cabins-for-passengers-on-the-titanic/), like 3-8 pounds for 3rd class and 12 pounds for 2nd class.
# So, I come up with an idea. Can we guess their room from their ticket price?
# However, when search information about coordinate room for different classes,
# I find in some floor's room, like D, E, and F floor, is hard to determine which class lives here.
# But for 1st class, they mainly live from A to E, 2nd class D to F, and 3rd class F to G.
#
# Therefore, people with different fare will live in different area.
# I change my Question to **What is connection between fare and survive rate?**
#
# Also, We can find only 714 age data here, so we need considerate a way deal with it.
# Like we can fill them with random number in a particular range, or we can try some other ways.
# In[ ]:
#Af first fill lost data, fill age data
# Get avarage, std to calculate the limitaton of random number
# Get NAN number to determine how many data need to generate
fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,4))
# Plot original age data
titanic_df['Age'].dropna().hist(bins=70, ax=axis1, ls='solid', lw=0.2, ec='black')
average_age = titanic_df.Age.mean()
std_age = titanic_df.Age.std()
nan_age_number = titanic_df.Age.isnull().sum()
# Generate
rand_age = np.random.randint(average_age - std_age, average_age + std_age,
size = nan_age_number)
# Fill in
titanic_df.loc[np.isnan(titanic_df['Age']), 'Age'] = rand_age
# Plot result
titanic_df['Age'].hist(bins=70, ax=axis2, ls='solid', lw=0.2, ec='black')
axis1.set_title('Before Fill In')
axis1.set_xlabel('Age')
axis1.set_ylabel('People Number')
axis2.set_title('After Fill In')
axis2.set_xlabel('Age')
axis2.set_ylabel('People Number')
# In[ ]:
# At first drop data it seems useless for this analysis
# they are ID, name, ticket number, embark place, cabin, SibSp, and Parch
titanic_df = titanic_df.drop(['PassengerId','Name','Ticket','Embarked','Cabin','SibSp','Parch'],axis = 1)
titanic_df.head()
# In[ ]:
# At first let's analyse from sex and age view
# Divide children from male and female type
titanic_df.loc[titanic_df['Age'] <= 16, 'Sex'] = 'child'
titanic_df = titanic_df.drop(['Age'],axis=1)
titanic_df.head()
# In[ ]:
# Give mroe descriptive labels for Survived and Pclass
titanic_df['Survival'] = titanic_df.Survived.map({0:'Died',1:'Survived'})
titanic_df['Class'] = titanic_df.Pclass.map({1:'1st Class',2:'2nd Class',3:'3rd Class'})
# Child and not child
titanic_df['Child'] = titanic_df.Sex.map({'child':'Is Child','female':'Not Child','male':'Not Child'})
titanic_df.head()
# In[ ]:
# Draw pictures to see more clearly of the relations
# about sex and age factor
sns.factorplot(data=titanic_df,x='Sex',y='Survived',kind="violin",size=4,aspect=3)
plt.yticks([0,1], ['Died', 'Survived'])
# Plot basic information about sex and age
fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,5))
sns.countplot(data=titanic_df, x='Sex',ax=axis1)
sns.countplot(data=titanic_df,x='Survived',hue='Sex',order=[0,1],ax=axis2)
plt.xticks([0,1], ['Died', 'Survived'])
fig, (axis3,axis4) = plt.subplots(1,2,figsize=(15,5))
# Group data by sex and whether child
sex_survi_groups = titanic_df[['Sex','Survived']].groupby(['Sex'],as_index=True)
#Divide into three groups
men_group = sex_survi_groups.get_group('male')
women_group = sex_survi_groups.get_group('female')
children_group = sex_survi_groups.get_group('child')
# Plot survive rate between different sex
sns.barplot(data=titanic_df[['Sex','Survived']],x='Sex',y='Survived',order=['male','female'],ax=axis3)
axis3.set_ylabel("Survival Rate")
# Draw Child and Non-Child plot
sns.barplot(data=titanic_df[['Child', 'Survived']],x='Child',y='Survived',order=['Is Child','Not Child'],ax=axis4)
axis4.set_ylabel("Survival Rate")
axis3.set_title('Survive rate compare by Sex')
axis4.set_title('Survive rate compare by whether child')
# In[ ]:
# Statistic Hypothesis Test
# Chi-Square Test for Independence
# State the hypothesis: H0: Gender and survival rate are independent
from scipy.stats import chi2_contingency
men_women_group = pd.concat([men_group, women_group])
gender_pivot = pd.pivot_table(data=men_women_group[['Survived','Sex']],index='Survived',columns=['Sex'],
aggfunc=len)
chi2, p_value, dof, expected = chi2_contingency(gender_pivot)
print("Results of Chi-Squared test on Sex to Survival.")
print("Chi-Square Score = %s"%str(chi2))
print("Pvalue = %s\n"%str(p_value))
# In[ ]:
# Test for child and non-child
child_pivot = pd.pivot_table(data=titanic_df[['Survived','Child']],index='Survived',columns=['Child'],
aggfunc=len)
chi2, p_value, dof, expected = chi2_contingency(child_pivot)
print("Results of Chi-Squared test on Child to Survival.")
print("Chi-Square Score = %s"%str(chi2))
print("Pvalue = %s\n"%str(p_value))
# ## Analyze above test about Sex and Children
# We can see that for men and women Chi-Square Score is a large number, and p value is very small, even far small than 0.01.
# Therefore we can confidently reject our null hypothesis to say **Gender and survival rate is related**.
#
# For child and non-child, even it is not significant than sex.
# We can still say we have 99% confident say **whether child or not relate to survival rate**, as small pvalue.
# In[ ]:
# Then let's analyze class factor
sns.factorplot(data=titanic_df,x='Class',y='Survived',kind="violin", order=['1st Class','2nd Class','3rd Class'],size=4,aspect=3)
plt.yticks([0,1],['Died','Survived'])
# Group by class and take mean
class_survi_prec = titanic_df[['Class','Survived']].groupby(['Class'],as_index=False).mean()
# Compare number and survived rate between three classes
fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,5))
sns.countplot(data=titanic_df, x='Class',order=['1st Class','2nd Class','3rd Class'],ax=axis1)
sns.barplot(data=class_survi_prec,x='Class',y='Survived', order=['1st Class','2nd Class','3rd Class'],ax=axis2)
axis2.set_ylabel('Survival Rate')
# In[ ]:
# Statistic Hypothesis Test:
# H0: Class and Survival rate are independent
class_pivot = pd.pivot_table(data=titanic_df[['Survived','Class']],index='Survived',columns=['Class'],
aggfunc=len)
chi2, p_value, dof, expected = chi2_contingency(class_pivot)
print("Results of Chi-Squared test on Class to Survival.")
print("Chi-Square Score = %s"%str(chi2))
print("Pvalue = %s\n"%str(p_value))
# ## Analyze above test about Pclass
# At first we can see from graphs that there are actually some difference between three classes.
# 1st class have highest survive rates, 2nd class follow, and then 3rd class.
# Especially, 3rd class is very different from the upper two classes.
# **3rd class has much lower survive rate than other classes.**
#
# To confirm this observation, we carry on some tests.
# At first carry on Chi-Square Test on these three classes, we have a high Chi-Square score and a very low p-value.
# So we can confidently reject its H0, and say **Class actually relate to survive rates**.
#
# We can conclude that **Class actually affect survive rate, particularly between upper two classes and Class 3**.
# In[ ]:
# Last let's analyze fare factor
# Try to plot on a logarithmic x-axis as comment suggests, but it not looks so good
# fig = titanic_df['Fare'].plot(kind='hist', figsize=(15,3), bins=100, logx=True,
# ls='solid', lw=1, ec='black')
fig = titanic_df['Fare'].plot(kind='hist', figsize=(15,3), bins=100, ls='solid', lw=0.5, ec='black')
ax = fig.axes
ax.set_xlabel('Fare')
ax.set_ylabel('People Number')
ax.set_title('People Distribution with Fare')
# We clear out people have very high fare
normal_people = titanic_df[['Fare','Survived']][titanic_df['Fare']<200]
fare_survi_group = normal_people[['Fare','Survived']].groupby(['Survived'],as_index=False)
# Survive condition for people with normal fare
figure(2)
sns.factorplot(data=normal_people,x='Survived',y='Fare',aspect=2)
plt.xticks([0,1],['Died','Survived'])
# In[ ]:
# Statitic Test, variable is continuous, so we choose T-test
# H0: People survived and not survived have same fare, mean(survive_fare)=mean(non_survive_fare)
from scipy.stats import ttest_ind
ttest_ind(fare_survi_group.get_group(0)['Fare'],fare_survi_group.get_group(1)['Fare'])
# In[ ]:
# Obviously We can guess fare is related to passenger class
# from scatter Plot we can see only first class have very high fare
titanic_df.plot.scatter(x='Pclass',y='Fare')
plt.xticks([1,2,3],['1st Class','2nd Class','3rd Class'])
# We calculate their correlation to confirm
titanic_df[['Fare', 'Pclass']].corr(method='spearman')
# ## Analyze about fare
# At first, we can find there are some people with very high fare, and we clear them out for a fair analysis.
# Then from bar chart, we can find people survived have higher mean fare than people not survived.
#
# We can do t-test to confirm this.
# From T-test, p value is so small that we can confidently say people survied and not survied have different fare.
# And more, **people survived have higher fare than people not survived.**
#
# However, normally only a man with high class can afford a high fare.
# So when we calculate the correlation between Fare and Pclass, there are actually some relation between them.
# **People with lower class have lower fare**.
#
# One of the sad fact is, if we look at 2nd Class and 3rd Class passengers, we can find the fare they pay have no much difference. But we will know in the latter part, their survival rate have a very big difference, especially females and children. That's because this kind of class separation.
# In[ ]:
# To explore more details
# let's see sex distrubution in different classes
figure(figsize=(8,5))
sns.countplot(data=titanic_df,x='Class',hue='Sex',order=['1st Class','2nd Class','3rd Class'])
# In[ ]:
# From above we could see class 3 have large percent of men
# So we can guess the low survived rate of men is caused by class3 men
# the survive rate in higher class between sex may not very distinct
# Draw chart of different classes's survive rate detail
class_sex_group = titanic_df[['Sex','Class','Survived']].groupby(['Sex','Class'],as_index=False)
class_sex_survive_prec = class_sex_group.mean()
figure(figsize=(8,5))
fig = sns.barplot(data=class_sex_survive_prec, x='Sex',y='Survived',hue='Class', order=['male','female','child'])
fig.axes.set_ylabel('Survival Rate')
# In[ ]:
# Between class1 and class2 women they have similar survive rates
# Chi-Square test
# H0 = For Class1 and Class2 female, the survive rate and class is independent
female_class1_class2 = titanic_df[(titanic_df['Sex']=='female') & ((titanic_df['Class']=='1st Class') | (titanic_df['Class']=='2nd Class') )]
class_pivot = pd.pivot_table(data=female_class1_class2[['Survived','Class']],index='Survived',columns=['Class'],
aggfunc=len)
chi2, p_value, dof, expected = chi2_contingency(class_pivot)
print("Results of Chi-Squared test on Class to Survival on upper two classes female.")
print("Chi-Square Score = %s"%str(chi2))
print("Pvalue = %s\n"%str(p_value))
# In[ ]:
# Also between class1 and class2 child they have much similar survive rates
# Do test
child_class1_class2 = titanic_df[(titanic_df['Sex']=='child') & ((titanic_df['Class']=='1st Class') | (titanic_df['Class']=='2nd Class') )]
class_pivot = pd.pivot_table(data=child_class1_class2[['Survived','Class']],index='Survived',columns=['Class'],
aggfunc=len)
chi2, p_value, dof, expected = chi2_contingency(class_pivot)
print("Results of Chi-Squared test on Class to Survival on upper two classes child.")
print("Chi-Square Score = %s"%str(chi2))
print("Pvalue = %s\n"%str(p_value))
# In[ ]:
# And class2 and class3 male they also have similar survive rate
male_class2_class3 = titanic_df[(titanic_df['Sex']=='male') & ((titanic_df['Class']=='3rd Class') | (titanic_df['Class']=='2nd Class') )]
class_pivot = pd.pivot_table(data=male_class2_class3[['Survived','Class']],index='Survived',columns=['Class'],
aggfunc=len)
print("Results of Chi-Squared test on Class to Survival on lower two classes male.")
print("Chi-Square Score = %s"%str(chi2))
print("Pvalue = %s\n"%str(p_value))
# ## Analysis of class and sex detail
# From chart, we can see **women is actually have higher survive rate than men, even in different classes.**
# And **1st class have higher survive rate for men, 3rd class children and women have lower survive rate.**
#
# However, when we test class 1 female and class 2 female, class 1 child and class 2 child, as well as class 2 male and class 3 male,
# we can't reject the hypothesis in high significance.
# So we can conclude even in the whole higher class have higher survive rate,
# **for women and children class 1 and class 2 have no much diffrerence;**
# **for male class 2 and class 3 have no much difference.**
# ## Summary
# ### First Question: Sex and Age factors with Survive rate
# From this violin chart, we can see clearly the survived distribution of male, female, and child.
# 
# We can see the opposite condition of male and female, **most of the males died and most of the females lived**.
# For children, it is nearly half and half.
# We can look at more details at bar tables.
# 
# It exactly shows female have higher survival rate than male and children have higher survival rate than non-children.
#
# We can also use statistical hypothesis test confirm this.
# We use Chi-Square-test, get Chi-Square Score and pvalue. These actually reject null hypothesis, and confirm our hypothesis in a high significance.
#
# ### Second Question: Class factor with Survive rate
# As above, we show violin plot first.
# We can see most of the 1st class survived, most of 3rd class died, and nearly half of 2nd class survived.
# 
# Then show people number of the three classes and respective survive rate.
# 
# It seems higher class have higher survive rate, and 3rd class has most of the people, but lowest survive rate.
#
# Using Chi-Square-test analyse three class, It shows **Class actually relate to survive rate, especially between class 3 and upper classes**.
#
# ### Third Question: Fare fator with Survive rate
# At first, show people distribution of different fares.
# 
# We can see there are some outliners pay a lot of fare.
# For fair, we clean them out, and plot mean fare for survive and non-survive groups.
# 
# It shows clearly **people survived with higher fare.**
# T-test also confirms our idea.
#
# ### Forth Question: When going to each class and each sex, above conclusions still work?
# At first, plot bar chart for each sex and coordinate class.
# Some interesting things emerge out.
# 
# For male, its no longer higher class with higher survival rate,
# 2nd class men even have lower survival rate than 3rd class men.
# For female and child, 1st class and 2nd class seems have similar survive rate.
#
# To confirm our observation, carry on Chi-Square-test between 1st and 2nd class female and child, and 2nd and 3rd class men.
# If we hold 99% significance, all the three null hypothesis can't be rejected.
# Therefore, what we conclude above don't work here.
#
# ## Conclusion
# 1. **Women have higher survival rate than men and children, no matter whole people or separate classes.**
# 2. **Children have higher survival rate than whole adult group.**
# 3. **From the whole view, higher class has higher survival rate.**
# 4. **Survived people have higher price of their fares.**
# 5. **If we look at details, some of above conclusions fail.**
# 1. **For 1st and 2nd classes, women and children have similar survival rate.**
# 2. **For 2nd and 3rd classes, men have similar survival rate.**
# ## Reference
#
# 1. [Kaggle: Titanic variable descriptions](https://www.kaggle.com/c/titanic/data)
# 2. [Wikipedia: Titanic sinking details](https://en.wikipedia.org/wiki/Sinking_of_the_RMS_Titanic)
# 3. [The design of Titanic](http://www.ultimatetitanic.com/design/#.WDl3Et8xA8o)
# 4. [Suites and cabins for passengers on the Titanic](http://www.dummies.com/education/history/suites-and-cabins-for-passengers-on-the-titanic/)
# 5. [Pandas documents](http://pandas.pydata.org/pandas-docs/stable/index.html)
# 6. [Room allocation in Titanic](http://www.dummies.com/education/history/titanic-facts-the-layout-of-the-ship/)
# 7. [Ticket price for different class](http://www.dummies.com/education/history/suites-and-cabins-for-passengers-on-the-titanic/)
# 8. [Advanced Functionality in Seabron](http://blog.insightdatalabs.com/advanced-functionality-in-seaborn/)
# 9. [Panda dataframe add colums](http://stackoverflow.com/questions/12555323/adding-new-column-to-existing-dataframe-in-python-pandas)
# 10. [Udacity Inferential Statistics](https://classroom.udacity.com/courses/ud201)
# 11. [Kaggle a journey through titanic notebook](https://www.kaggle.com/omarelgabry/titanic/a-journey-through-titanic/notebook)
# 12. [Markdown Cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)
# 13. [How to deal with SettingWithCopyWarning in Pandas?](http://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas)
# 14. [What statistical analysis should I use?](http://www.ats.ucla.edu/stat/stata/whatstat/whatstat.htm)
# 15. [Seaborn Documents](http://seaborn.pydata.org/index.html)
# 16. [Scipy stats](https://docs.scipy.org/doc/scipy/reference/stats.html)
# 17. [Matplotlib figure document](http://matplotlib.org/api/figure_api.html)
# 18. [Pandas dataframe add a field based on multiple if statements](http://stackoverflow.com/questions/21733893/pandas-dataframe-add-a-field-based-on-multiple-if-statements)
# 19. [Correlation(Pearson, Kendall, Spearman)](http://www.statisticssolutions.com/correlation-pearson-kendall-spearman/)
| [
"[email protected]"
] | |
ff1f8886eaa249084c6246e1aa3f939e2a40708b | 38a9e2780ac8b800c336207a5c0a621eb1277a53 | /tests/test_planners/test_planner_latin_hypercube.py | 6c1b96b0e2543493d9ea123d6a11489bec8a8f51 | [
"MIT"
] | permissive | priyansh-1902/olympus | 2454850413bb0562a1bfe20ab35fa7e770367323 | f57ad769918c0d5d805c439ab5ffbd180af698fa | refs/heads/main | 2023-06-21T05:58:49.118264 | 2021-08-07T22:19:41 | 2021-08-07T22:19:41 | 342,454,516 | 0 | 0 | MIT | 2021-08-07T22:19:41 | 2021-02-26T03:43:08 | Python | UTF-8 | Python | false | false | 1,148 | py | #!/usr/bin/env python
import pytest
from olympus import Observations, ParameterVector
from olympus.planners import LatinHypercube
# use parametrize to test multiple configurations of the planner
#@pytest.mark.parametrize("disp, eps, ftol, gtol, maxcor, maxfun, maxiter, maxls",
# [(None, 1e-8, 2.220446049250313e-9, 1e-5, 10, 15000, 15000, 20),
# (True, 1e-9, 2.220446049250313e-10, 1e-6, 15, 20000, 20000, 30)])
def test_planner_ask_tell(two_param_space):#, disp, eps, ftol, gtol, maxcor, maxfun, maxiter, maxls):
# planner = Lbfgs(disp=disp, eps=eps, ftol=ftol, gtol=gtol, maxcor=maxcor, maxfun=maxfun, maxiter=maxiter, maxls=maxls)
planner = LatinHypercube()
planner.set_param_space(param_space=two_param_space)
param = planner.ask()
value = ParameterVector().from_dict({'objective': 0.})
obs = Observations()
obs.add_observation(param, value)
planner.tell(observations=obs)
if __name__ == '__main__':
from olympus import Parameter, ParameterSpace
param_space = ParameterSpace()
param_space.add(Parameter(name='param_0'))
param_space.add(Parameter(name='param_1'))
test_planner_ask_tell(param_space)
| [
"[email protected]"
] | |
c06d4f9d071a700255fe8bf3776aadae2ec31041 | 29c3595a4e1f8de9382650610aee5a13e2a135f6 | /venv/Lib/site-packages/twisted/internet/defer.py | 254bc9e1dac9e254c90db12291cd31402cf64fe4 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | zoelesv/Smathchat | 1515fa56fbb0ad47e1859f6bf931b772446ea261 | 5cee0a8c4180a3108538b4e4ce945a18726595a6 | refs/heads/main | 2023-08-04T14:47:21.185149 | 2023-08-02T15:53:20 | 2023-08-02T15:53:20 | 364,627,392 | 9 | 1 | MIT | 2023-08-02T15:53:21 | 2021-05-05T15:42:47 | Python | UTF-8 | Python | false | false | 73,792 | py | # -*- test-case-name: twisted.test.test_defer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for results that aren't immediately available.
Maintainer: Glyph Lefkowitz
@var _NO_RESULT: The result used to represent the fact that there is no
result. B{Never ever ever use this as an actual result for a Deferred}. You
have been warned.
@var _CONTINUE: A marker left in L{Deferred.callback}s to indicate a Deferred
chain. Always accompanied by a Deferred instance in the args tuple pointing
at the Deferred which is chained to the Deferred which has this marker.
"""
import traceback
import types
import warnings
from asyncio import iscoroutine
from functools import wraps
from sys import exc_info, implementation, version_info
from typing import Optional
import attr
from incremental import Version
# Twisted imports
from twisted.logger import Logger
from twisted.python import failure, lockfile
from twisted.python.compat import cmp, comparable
from twisted.python.deprecate import deprecated, warnAboutFunction
try:
from contextvars import copy_context as _copy_context
_contextvarsSupport = True
except ImportError:
_contextvarsSupport = False
class _NoContext:
@staticmethod
def run(f, *args, **kwargs):
return f(*args, **kwargs)
# typing ignored due to:
# https://github.com/python/typeshed/issues/4249
def _copy_context(): # type: ignore[misc]
return _NoContext
log = Logger()
class AlreadyCalledError(Exception):
pass
class CancelledError(Exception):
"""
This error is raised by default when a L{Deferred} is cancelled.
"""
class TimeoutError(Exception):
"""
This error is raised by default when a L{Deferred} times out.
"""
class NotACoroutineError(TypeError):
"""
This error is raised when a coroutine is expected and something else is
encountered.
"""
def logError(err):
"""
Log and return failure.
This method can be used as an errback that passes the failure on to the
next errback unmodified. Note that if this is the last errback, and the
deferred gets garbage collected after being this errback has been called,
the clean up code logs it again.
"""
log.failure(None, err)
return err
def succeed(result: object) -> "Deferred":
"""
Return a L{Deferred} that has already had C{.callback(result)} called.
This is useful when you're writing synchronous code to an
asynchronous interface: i.e., some code is calling you expecting a
L{Deferred} result, but you don't actually need to do anything
asynchronous. Just return C{defer.succeed(theResult)}.
See L{fail} for a version of this function that uses a failing
L{Deferred} rather than a successful one.
@param result: The result to give to the Deferred's 'callback'
method.
"""
d = Deferred()
d.callback(result)
return d
def fail(result: object = None) -> "Deferred":
"""
Return a L{Deferred} that has already had C{.errback(result)} called.
See L{succeed}'s docstring for rationale.
@param result: The same argument that L{Deferred.errback} takes.
@raise NoCurrentExceptionError: If C{result} is L{None} but there is no
current exception state.
"""
d = Deferred()
d.errback(result)
return d
def execute(callable, *args, **kw):
"""
Create a L{Deferred} from a callable and arguments.
Call the given function with the given arguments. Return a L{Deferred}
which has been fired with its callback as the result of that invocation
or its C{errback} with a L{Failure} for the exception thrown.
"""
try:
result = callable(*args, **kw)
except BaseException:
return fail()
else:
return succeed(result)
def maybeDeferred(f, *args, **kw):
"""
Invoke a function that may or may not return a L{Deferred}.
Call the given function with the given arguments. If the returned
object is a L{Deferred}, return it. If the returned object is a L{Failure},
wrap it with L{fail} and return it. Otherwise, wrap it in L{succeed} and
return it. If an exception is raised, convert it to a L{Failure}, wrap it
in L{fail}, and then return it.
@type f: Any callable
@param f: The callable to invoke
@param args: The arguments to pass to C{f}
@param kw: The keyword arguments to pass to C{f}
@rtype: L{Deferred}
@return: The result of the function call, wrapped in a L{Deferred} if
necessary.
"""
try:
result = f(*args, **kw)
except BaseException:
return fail(failure.Failure(captureVars=Deferred.debug))
if isinstance(result, Deferred):
return result
elif isinstance(result, failure.Failure):
return fail(result)
else:
return succeed(result)
@deprecated(
Version("Twisted", 17, 1, 0),
replacement="twisted.internet.defer.Deferred.addTimeout",
)
def timeout(deferred):
deferred.errback(failure.Failure(TimeoutError("Callback timed out")))
def passthru(arg):
return arg
def setDebugging(on):
"""
Enable or disable L{Deferred} debugging.
When debugging is on, the call stacks from creation and invocation are
recorded, and added to any L{AlreadyCalledError}s we raise.
"""
Deferred.debug = bool(on)
def getDebugging():
"""
Determine whether L{Deferred} debugging is enabled.
"""
return Deferred.debug
# See module docstring.
_NO_RESULT = object()
_CONTINUE = object()
class Deferred:
"""
This is a callback which will be put off until later.
Why do we want this? Well, in cases where a function in a threaded
program would block until it gets a result, for Twisted it should
not block. Instead, it should return a L{Deferred}.
This can be implemented for protocols that run over the network by
writing an asynchronous protocol for L{twisted.internet}. For methods
that come from outside packages that are not under our control, we use
threads (see for example L{twisted.enterprise.adbapi}).
For more information about Deferreds, see doc/core/howto/defer.html or
U{http://twistedmatrix.com/documents/current/core/howto/defer.html}
When creating a Deferred, you may provide a canceller function, which
will be called by d.cancel() to let you do any clean-up necessary if the
user decides not to wait for the deferred to complete.
@ivar called: A flag which is C{False} until either C{callback} or
C{errback} is called and afterwards always C{True}.
@type called: L{bool}
@ivar paused: A counter of how many unmatched C{pause} calls have been made
on this instance.
@type paused: L{int}
@ivar _suppressAlreadyCalled: A flag used by the cancellation mechanism
which is C{True} if the Deferred has no canceller and has been
cancelled, C{False} otherwise. If C{True}, it can be expected that
C{callback} or C{errback} will eventually be called and the result
should be silently discarded.
@type _suppressAlreadyCalled: L{bool}
@ivar _runningCallbacks: A flag which is C{True} while this instance is
executing its callback chain, used to stop recursive execution of
L{_runCallbacks}
@type _runningCallbacks: L{bool}
@ivar _chainedTo: If this L{Deferred} is waiting for the result of another
L{Deferred}, this is a reference to the other Deferred. Otherwise,
L{None}.
"""
called = False
paused = False
_debugInfo = None
_suppressAlreadyCalled = False
# Are we currently running a user-installed callback? Meant to prevent
# recursive running of callbacks when a reentrant call to add a callback is
# used.
_runningCallbacks = False
# Keep this class attribute for now, for compatibility with code that
# sets it directly.
debug = False
_chainedTo = None # type: Optional[Deferred]
def __init__(self, canceller=None):
"""
Initialize a L{Deferred}.
@param canceller: a callable used to stop the pending operation
scheduled by this L{Deferred} when L{Deferred.cancel} is invoked.
The canceller will be passed the deferred whose cancelation is
requested (i.e., self).
If a canceller is not given, or does not invoke its argument's
C{callback} or C{errback} method, L{Deferred.cancel} will
invoke L{Deferred.errback} with a L{CancelledError}.
Note that if a canceller is not given, C{callback} or
C{errback} may still be invoked exactly once, even though
defer.py will have already invoked C{errback}, as described
above. This allows clients of code which returns a L{Deferred}
to cancel it without requiring the L{Deferred} instantiator to
provide any specific implementation support for cancellation.
New in 10.1.
@type canceller: a 1-argument callable which takes a L{Deferred}. The
return result is ignored.
"""
self.callbacks = []
self._canceller = canceller
if self.debug:
self._debugInfo = DebugInfo()
self._debugInfo.creator = traceback.format_stack()[:-1]
def addCallbacks(
self,
callback,
errback=None,
callbackArgs=None,
callbackKeywords=None,
errbackArgs=None,
errbackKeywords=None,
):
"""
Add a pair of callbacks (success and error) to this L{Deferred}.
These will be executed when the 'master' callback is run.
@return: C{self}.
@rtype: a L{Deferred}
"""
assert callable(callback)
assert errback is None or callable(errback)
cbs = (
(callback, callbackArgs, callbackKeywords),
(errback or (passthru), errbackArgs, errbackKeywords),
)
self.callbacks.append(cbs)
if self.called:
self._runCallbacks()
return self
def addCallback(self, callback, *args, **kw):
"""
Convenience method for adding just a callback.
See L{addCallbacks}.
"""
return self.addCallbacks(callback, callbackArgs=args, callbackKeywords=kw)
def addErrback(self, errback, *args, **kw):
"""
Convenience method for adding just an errback.
See L{addCallbacks}.
"""
return self.addCallbacks(
passthru, errback, errbackArgs=args, errbackKeywords=kw
)
def addBoth(self, callback, *args, **kw):
"""
Convenience method for adding a single callable as both a callback
and an errback.
See L{addCallbacks}.
"""
return self.addCallbacks(
callback,
callback,
callbackArgs=args,
errbackArgs=args,
callbackKeywords=kw,
errbackKeywords=kw,
)
def addTimeout(self, timeout, clock, onTimeoutCancel=None):
"""
Time out this L{Deferred} by scheduling it to be cancelled after
C{timeout} seconds.
The timeout encompasses all the callbacks and errbacks added to this
L{defer.Deferred} before the call to L{addTimeout}, and none added
after the call.
If this L{Deferred} gets timed out, it errbacks with a L{TimeoutError},
unless a cancelable function was passed to its initialization or unless
a different C{onTimeoutCancel} callable is provided.
@param timeout: number of seconds to wait before timing out this
L{Deferred}
@type timeout: L{int}
@param clock: The object which will be used to schedule the timeout.
@type clock: L{twisted.internet.interfaces.IReactorTime}
@param onTimeoutCancel: A callable which is called immediately after
this L{Deferred} times out, and not if this L{Deferred} is
otherwise cancelled before the timeout. It takes an arbitrary
value, which is the value of this L{Deferred} at that exact point
in time (probably a L{CancelledError} L{Failure}), and the
C{timeout}. The default callable (if none is provided) will
translate a L{CancelledError} L{Failure} into a L{TimeoutError}.
@type onTimeoutCancel: L{callable}
@return: C{self}.
@rtype: a L{Deferred}
@since: 16.5
"""
timedOut = [False]
def timeItOut():
timedOut[0] = True
self.cancel()
delayedCall = clock.callLater(timeout, timeItOut)
def convertCancelled(value):
# if C{deferred} was timed out, call the translation function,
# if provdied, otherwise just use L{cancelledToTimedOutError}
if timedOut[0]:
toCall = onTimeoutCancel or _cancelledToTimedOutError
return toCall(value, timeout)
return value
self.addBoth(convertCancelled)
def cancelTimeout(result):
# stop the pending call to cancel the deferred if it's been fired
if delayedCall.active():
delayedCall.cancel()
return result
self.addBoth(cancelTimeout)
return self
def chainDeferred(self, d):
"""
Chain another L{Deferred} to this L{Deferred}.
This method adds callbacks to this L{Deferred} to call C{d}'s callback
or errback, as appropriate. It is merely a shorthand way of performing
the following::
self.addCallbacks(d.callback, d.errback)
When you chain a deferred d2 to another deferred d1 with
d1.chainDeferred(d2), you are making d2 participate in the callback
chain of d1. Thus any event that fires d1 will also fire d2.
However, the converse is B{not} true; if d2 is fired d1 will not be
affected.
Note that unlike the case where chaining is caused by a L{Deferred}
being returned from a callback, it is possible to cause the call
stack size limit to be exceeded by chaining many L{Deferred}s
together with C{chainDeferred}.
@return: C{self}.
@rtype: a L{Deferred}
"""
d._chainedTo = self
return self.addCallbacks(d.callback, d.errback)
def callback(self, result):
"""
Run all success callbacks that have been added to this L{Deferred}.
Each callback will have its result passed as the first argument to
the next; this way, the callbacks act as a 'processing chain'. If
the success-callback returns a L{Failure} or raises an L{Exception},
processing will continue on the *error* callback chain. If a
callback (or errback) returns another L{Deferred}, this L{Deferred}
will be chained to it (and further callbacks will not run until that
L{Deferred} has a result).
An instance of L{Deferred} may only have either L{callback} or
L{errback} called on it, and only once.
@param result: The object which will be passed to the first callback
added to this L{Deferred} (via L{addCallback}).
@raise AlreadyCalledError: If L{callback} or L{errback} has already been
called on this L{Deferred}.
"""
assert not isinstance(result, Deferred)
self._startRunCallbacks(result)
def errback(self, fail=None):
"""
Run all error callbacks that have been added to this L{Deferred}.
Each callback will have its result passed as the first
argument to the next; this way, the callbacks act as a
'processing chain'. Also, if the error-callback returns a non-Failure
or doesn't raise an L{Exception}, processing will continue on the
*success*-callback chain.
If the argument that's passed to me is not a L{failure.Failure} instance,
it will be embedded in one. If no argument is passed, a
L{failure.Failure} instance will be created based on the current
traceback stack.
Passing a string as `fail' is deprecated, and will be punished with
a warning message.
An instance of L{Deferred} may only have either L{callback} or
L{errback} called on it, and only once.
@param fail: The L{Failure} object which will be passed to the first
errback added to this L{Deferred} (via L{addErrback}).
Alternatively, a L{Exception} instance from which a L{Failure} will
be constructed (with no traceback) or L{None} to create a L{Failure}
instance from the current exception state (with a traceback).
@raise AlreadyCalledError: If L{callback} or L{errback} has already been
called on this L{Deferred}.
@raise NoCurrentExceptionError: If C{fail} is L{None} but there is
no current exception state.
"""
if fail is None:
fail = failure.Failure(captureVars=self.debug)
elif not isinstance(fail, failure.Failure):
fail = failure.Failure(fail)
self._startRunCallbacks(fail)
def pause(self):
"""
Stop processing on a L{Deferred} until L{unpause}() is called.
"""
self.paused = self.paused + 1
def unpause(self):
"""
Process all callbacks made since L{pause}() was called.
"""
self.paused = self.paused - 1
if self.paused:
return
if self.called:
self._runCallbacks()
def cancel(self):
"""
Cancel this L{Deferred}.
If the L{Deferred} has not yet had its C{errback} or C{callback} method
invoked, call the canceller function provided to the constructor. If
that function does not invoke C{callback} or C{errback}, or if no
canceller function was provided, errback with L{CancelledError}.
If this L{Deferred} is waiting on another L{Deferred}, forward the
cancellation to the other L{Deferred}.
"""
if not self.called:
canceller = self._canceller
if canceller:
canceller(self)
else:
# Arrange to eat the callback that will eventually be fired
# since there was no real canceller.
self._suppressAlreadyCalled = True
if not self.called:
# There was no canceller, or the canceller didn't call
# callback or errback.
self.errback(failure.Failure(CancelledError()))
elif isinstance(self.result, Deferred):
# Waiting for another deferred -- cancel it instead.
self.result.cancel()
def _startRunCallbacks(self, result):
if self.called:
if self._suppressAlreadyCalled:
self._suppressAlreadyCalled = False
return
if self.debug:
if self._debugInfo is None:
self._debugInfo = DebugInfo()
extra = "\n" + self._debugInfo._getDebugTracebacks()
raise AlreadyCalledError(extra)
raise AlreadyCalledError
if self.debug:
if self._debugInfo is None:
self._debugInfo = DebugInfo()
self._debugInfo.invoker = traceback.format_stack()[:-2]
self.called = True
self.result = result
self._runCallbacks()
def _continuation(self):
"""
Build a tuple of callback and errback with L{_CONTINUE}.
"""
return ((_CONTINUE, (self,), None), (_CONTINUE, (self,), None))
def _runCallbacks(self):
"""
Run the chain of callbacks once a result is available.
This consists of a simple loop over all of the callbacks, calling each
with the current result and making the current result equal to the
return value (or raised exception) of that call.
If L{_runningCallbacks} is true, this loop won't run at all, since
it is already running above us on the call stack. If C{self.paused} is
true, the loop also won't run, because that's what it means to be
paused.
The loop will terminate before processing all of the callbacks if a
L{Deferred} without a result is encountered.
If a L{Deferred} I{with} a result is encountered, that result is taken
and the loop proceeds.
@note: The implementation is complicated slightly by the fact that
chaining (associating two L{Deferred}s with each other such that one
will wait for the result of the other, as happens when a Deferred is
returned from a callback on another L{Deferred}) is supported
iteratively rather than recursively, to avoid running out of stack
frames when processing long chains.
"""
if self._runningCallbacks:
# Don't recursively run callbacks
return
# Keep track of all the Deferreds encountered while propagating results
# up a chain. The way a Deferred gets onto this stack is by having
# added its _continuation() to the callbacks list of a second Deferred
# and then that second Deferred being fired. ie, if ever had _chainedTo
# set to something other than None, you might end up on this stack.
chain = [self]
while chain:
current = chain[-1]
if current.paused:
# This Deferred isn't going to produce a result at all. All the
# Deferreds up the chain waiting on it will just have to...
# wait.
return
finished = True
current._chainedTo = None
while current.callbacks:
item = current.callbacks.pop(0)
callback, args, kw = item[isinstance(current.result, failure.Failure)]
args = args or ()
kw = kw or {}
# Avoid recursion if we can.
if callback is _CONTINUE:
# Give the waiting Deferred our current result and then
# forget about that result ourselves.
chainee = args[0]
chainee.result = current.result
current.result = None
# Making sure to update _debugInfo
if current._debugInfo is not None:
current._debugInfo.failResult = None
chainee.paused -= 1
chain.append(chainee)
# Delay cleaning this Deferred and popping it from the chain
# until after we've dealt with chainee.
finished = False
break
try:
current._runningCallbacks = True
try:
current.result = callback(current.result, *args, **kw)
if current.result is current:
warnAboutFunction(
callback,
"Callback returned the Deferred "
"it was attached to; this breaks the "
"callback chain and will raise an "
"exception in the future.",
)
finally:
current._runningCallbacks = False
except BaseException:
# Including full frame information in the Failure is quite
# expensive, so we avoid it unless self.debug is set.
current.result = failure.Failure(captureVars=self.debug)
else:
if isinstance(current.result, Deferred):
# The result is another Deferred. If it has a result,
# we can take it and keep going.
resultResult = getattr(current.result, "result", _NO_RESULT)
if (
resultResult is _NO_RESULT
or isinstance(resultResult, Deferred)
or current.result.paused
):
# Nope, it didn't. Pause and chain.
current.pause()
current._chainedTo = current.result
# Note: current.result has no result, so it's not
# running its callbacks right now. Therefore we can
# append to the callbacks list directly instead of
# using addCallbacks.
current.result.callbacks.append(current._continuation())
break
else:
# Yep, it did. Steal it.
current.result.result = None
# Make sure _debugInfo's failure state is updated.
if current.result._debugInfo is not None:
current.result._debugInfo.failResult = None
current.result = resultResult
if finished:
# As much of the callback chain - perhaps all of it - as can be
# processed right now has been. The current Deferred is waiting on
# another Deferred or for more callbacks. Before finishing with it,
# make sure its _debugInfo is in the proper state.
if isinstance(current.result, failure.Failure):
# Stash the Failure in the _debugInfo for unhandled error
# reporting.
current.result.cleanFailure()
if current._debugInfo is None:
current._debugInfo = DebugInfo()
current._debugInfo.failResult = current.result
else:
# Clear out any Failure in the _debugInfo, since the result
# is no longer a Failure.
if current._debugInfo is not None:
current._debugInfo.failResult = None
# This Deferred is done, pop it from the chain and move back up
# to the Deferred which supplied us with our result.
chain.pop()
def __str__(self) -> str:
"""
Return a string representation of this C{Deferred}.
"""
cname = self.__class__.__name__
result = getattr(self, "result", _NO_RESULT)
myID = id(self)
if self._chainedTo is not None:
result = " waiting on Deferred at 0x{:x}".format(id(self._chainedTo))
elif result is _NO_RESULT:
result = ""
else:
result = " current result: {!r}".format(result)
return "<{} at 0x{:x}{}>".format(cname, myID, result)
__repr__ = __str__
def __iter__(self):
return self
@failure._extraneous
def send(self, value=None):
if self.paused:
# If we're paused, we have no result to give
return self
result = getattr(self, "result", _NO_RESULT)
if result is _NO_RESULT:
return self
if isinstance(result, failure.Failure):
# Clear the failure on debugInfo so it doesn't raise "unhandled
# exception"
self._debugInfo.failResult = None
result.value.__failure__ = result
raise result.value
else:
raise StopIteration(result)
# For PEP-492 support (async/await)
__await__ = __iter__
__next__ = send
def asFuture(self, loop):
"""
Adapt this L{Deferred} into a L{asyncio.Future} which is bound to C{loop}.
@note: converting a L{Deferred} to an L{asyncio.Future} consumes both
its result and its errors, so this method implicitly converts
C{self} into a L{Deferred} firing with L{None}, regardless of what
its result previously would have been.
@since: Twisted 17.5.0
@param loop: The asyncio event loop to bind the L{asyncio.Future} to.
@type loop: L{asyncio.AbstractEventLoop} or similar
@return: A Future which will fire when the Deferred fires.
@rtype: L{asyncio.Future}
"""
try:
createFuture = loop.create_future
except AttributeError:
from asyncio import Future
def createFuture():
return Future(loop=loop)
future = createFuture()
def checkCancel(futureAgain):
if futureAgain.cancelled():
self.cancel()
def maybeFail(failure):
if not future.cancelled():
future.set_exception(failure.value)
def maybeSucceed(result):
if not future.cancelled():
future.set_result(result)
self.addCallbacks(maybeSucceed, maybeFail)
future.add_done_callback(checkCancel)
return future
@classmethod
def fromFuture(cls, future):
"""
Adapt an L{asyncio.Future} to a L{Deferred}.
@note: This creates a L{Deferred} from a L{asyncio.Future}, I{not} from
a C{coroutine}; in other words, you will need to call
L{asyncio.ensure_future},
L{asyncio.loop.create_task} or create an
L{asyncio.Task} yourself to get from a C{coroutine} to a
L{asyncio.Future} if what you have is an awaitable coroutine and
not a L{asyncio.Future}. (The length of this list of techniques is
exactly why we have left it to the caller!)
@since: Twisted 17.5.0
@param future: The Future to adapt.
@type future: L{asyncio.Future}
@return: A Deferred which will fire when the Future fires.
@rtype: L{Deferred}
"""
def adapt(result):
try:
extracted = result.result()
except BaseException:
extracted = failure.Failure()
adapt.actual.callback(extracted)
futureCancel = object()
def cancel(reself):
future.cancel()
reself.callback(futureCancel)
self = cls(cancel)
adapt.actual = self
def uncancel(result):
if result is futureCancel:
adapt.actual = Deferred()
return adapt.actual
return result
self.addCallback(uncancel)
future.add_done_callback(adapt)
return self
@classmethod
def fromCoroutine(cls, coro):
"""
Schedule the execution of a coroutine that awaits on L{Deferred}s,
wrapping it in a L{Deferred} that will fire on success/failure of the
coroutine.
Coroutine functions return a coroutine object, similar to how
generators work. This function turns that coroutine into a Deferred,
meaning that it can be used in regular Twisted code. For example::
import treq
from twisted.internet.defer import Deferred
from twisted.internet.task import react
async def crawl(pages):
results = {}
for page in pages:
results[page] = await treq.content(await treq.get(page))
return results
def main(reactor):
pages = [
"http://localhost:8080"
]
d = Deferred.fromCoroutine(crawl(pages))
d.addCallback(print)
return d
react(main)
@since: Twisted 21.2.0
@param coro: The coroutine object to schedule.
@type coro: A Python 3.5+ C{async def} coroutine or a Python 3.4+
C{yield from} coroutine.
@raise ValueError: If C{coro} is not a coroutine or generator.
@rtype: L{Deferred}
"""
if not iscoroutine(coro) and not isinstance(coro, types.GeneratorType):
raise NotACoroutineError("{!r} is not a coroutine".format(coro))
return _cancellableInlineCallbacks(coro)
def _cancelledToTimedOutError(value, timeout):
"""
A default translation function that translates L{Failure}s that are
L{CancelledError}s to L{TimeoutError}s.
@param value: Anything
@type value: Anything
@param timeout: The timeout
@type timeout: L{int}
@rtype: C{value}
@raise TimeoutError: If C{value} is a L{Failure} that is a L{CancelledError}.
@raise Exception: If C{value} is a L{Failure} that is not a L{CancelledError},
it is re-raised.
@since: 16.5
"""
if isinstance(value, failure.Failure):
value.trap(CancelledError)
raise TimeoutError(timeout, "Deferred")
return value
def ensureDeferred(coro):
"""
Schedule the execution of a coroutine that awaits/yields from L{Deferred}s,
wrapping it in a L{Deferred} that will fire on success/failure of the
coroutine. If a Deferred is passed to this function, it will be returned
directly (mimicing C{asyncio}'s C{ensure_future} function).
See L{Deferred.fromCoroutine} for examples of coroutines.
@param coro: The coroutine object to schedule, or a L{Deferred}.
@type coro: A Python 3.5+ C{async def} C{coroutine}, a Python 3.4+
C{yield from} using L{types.GeneratorType}, or a L{Deferred}.
@rtype: L{Deferred}
"""
if isinstance(coro, Deferred):
return coro
else:
try:
return Deferred.fromCoroutine(coro)
except NotACoroutineError:
# It's not a coroutine. Raise an exception, but say that it's also
# not a Deferred so the error makes sense.
raise NotACoroutineError(
"{!r} is not a coroutine or a Deferred".format(coro)
)
class DebugInfo:
"""
Deferred debug helper.
"""
failResult = None
def _getDebugTracebacks(self):
info = ""
if hasattr(self, "creator"):
info += " C: Deferred was created:\n C:"
info += "".join(self.creator).rstrip().replace("\n", "\n C:")
info += "\n"
if hasattr(self, "invoker"):
info += " I: First Invoker was:\n I:"
info += "".join(self.invoker).rstrip().replace("\n", "\n I:")
info += "\n"
return info
def __del__(self):
"""
Print tracebacks and die.
If the *last* (and I do mean *last*) callback leaves me in an error
state, print a traceback (if said errback is a L{Failure}).
"""
if self.failResult is not None:
# Note: this is two separate messages for compatibility with
# earlier tests; arguably it should be a single error message.
log.critical("Unhandled error in Deferred:", isError=True)
debugInfo = self._getDebugTracebacks()
if debugInfo:
format = "(debug: {debugInfo})"
else:
format = None
log.failure(format, self.failResult, debugInfo=debugInfo)
@comparable
class FirstError(Exception):
"""
First error to occur in a L{DeferredList} if C{fireOnOneErrback} is set.
@ivar subFailure: The L{Failure} that occurred.
@type subFailure: L{Failure}
@ivar index: The index of the L{Deferred} in the L{DeferredList} where
it happened.
@type index: L{int}
"""
def __init__(self, failure, index):
Exception.__init__(self, failure, index)
self.subFailure = failure
self.index = index
def __repr__(self) -> str:
"""
The I{repr} of L{FirstError} instances includes the repr of the
wrapped failure's exception and the index of the L{FirstError}.
"""
return "FirstError[#%d, %r]" % (self.index, self.subFailure.value)
def __str__(self) -> str:
"""
The I{str} of L{FirstError} instances includes the I{str} of the
entire wrapped failure (including its traceback and exception) and
the index of the L{FirstError}.
"""
return "FirstError[#%d, %s]" % (self.index, self.subFailure)
def __cmp__(self, other):
"""
Comparison between L{FirstError} and other L{FirstError} instances
is defined as the comparison of the index and sub-failure of each
instance. L{FirstError} instances don't compare equal to anything
that isn't a L{FirstError} instance.
@since: 8.2
"""
if isinstance(other, FirstError):
return cmp((self.index, self.subFailure), (other.index, other.subFailure))
return -1
class DeferredList(Deferred):
"""
L{DeferredList} is a tool for collecting the results of several Deferreds.
This tracks a list of L{Deferred}s for their results, and makes a single
callback when they have all completed. By default, the ultimate result is a
list of (success, result) tuples, 'success' being a boolean.
L{DeferredList} exposes the same API that L{Deferred} does, so callbacks and
errbacks can be added to it in the same way.
L{DeferredList} is implemented by adding callbacks and errbacks to each
L{Deferred} in the list passed to it. This means callbacks and errbacks
added to the Deferreds before they are passed to L{DeferredList} will change
the result that L{DeferredList} sees (i.e., L{DeferredList} is not special).
Callbacks and errbacks can also be added to the Deferreds after they are
passed to L{DeferredList} and L{DeferredList} may change the result that
they see.
See the documentation for the C{__init__} arguments for more information.
@ivar _deferredList: The L{list} of L{Deferred}s to track.
"""
fireOnOneCallback = False
fireOnOneErrback = False
def __init__(
self,
deferredList,
fireOnOneCallback=False,
fireOnOneErrback=False,
consumeErrors=False,
):
"""
Initialize a DeferredList.
@param deferredList: The list of deferreds to track.
@type deferredList: L{list} of L{Deferred}s
@param fireOnOneCallback: (keyword param) a flag indicating that this
L{DeferredList} will fire when the first L{Deferred} in
C{deferredList} fires with a non-failure result without waiting for
any of the other Deferreds. When this flag is set, the DeferredList
will fire with a two-tuple: the first element is the result of the
Deferred which fired; the second element is the index in
C{deferredList} of that Deferred.
@type fireOnOneCallback: L{bool}
@param fireOnOneErrback: (keyword param) a flag indicating that this
L{DeferredList} will fire when the first L{Deferred} in
C{deferredList} fires with a failure result without waiting for any
of the other Deferreds. When this flag is set, if a Deferred in the
list errbacks, the DeferredList will errback with a L{FirstError}
failure wrapping the failure of that Deferred.
@type fireOnOneErrback: L{bool}
@param consumeErrors: (keyword param) a flag indicating that failures in
any of the included L{Deferred}s should not be propagated to
errbacks added to the individual L{Deferred}s after this
L{DeferredList} is constructed. After constructing the
L{DeferredList}, any errors in the individual L{Deferred}s will be
converted to a callback result of L{None}. This is useful to
prevent spurious 'Unhandled error in Deferred' messages from being
logged. This does not prevent C{fireOnOneErrback} from working.
@type consumeErrors: L{bool}
"""
self._deferredList = list(deferredList)
self.resultList = [None] * len(self._deferredList)
Deferred.__init__(self)
if len(self._deferredList) == 0 and not fireOnOneCallback:
self.callback(self.resultList)
# These flags need to be set *before* attaching callbacks to the
# deferreds, because the callbacks use these flags, and will run
# synchronously if any of the deferreds are already fired.
self.fireOnOneCallback = fireOnOneCallback
self.fireOnOneErrback = fireOnOneErrback
self.consumeErrors = consumeErrors
self.finishedCount = 0
index = 0
for deferred in self._deferredList:
deferred.addCallbacks(
self._cbDeferred,
self._cbDeferred,
callbackArgs=(index, SUCCESS),
errbackArgs=(index, FAILURE),
)
index = index + 1
def _cbDeferred(self, result, index, succeeded):
"""
(internal) Callback for when one of my deferreds fires.
"""
self.resultList[index] = (succeeded, result)
self.finishedCount += 1
if not self.called:
if succeeded == SUCCESS and self.fireOnOneCallback:
self.callback((result, index))
elif succeeded == FAILURE and self.fireOnOneErrback:
self.errback(failure.Failure(FirstError(result, index)))
elif self.finishedCount == len(self.resultList):
self.callback(self.resultList)
if succeeded == FAILURE and self.consumeErrors:
result = None
return result
def cancel(self):
"""
Cancel this L{DeferredList}.
If the L{DeferredList} hasn't fired yet, cancel every L{Deferred} in
the list.
If the L{DeferredList} has fired, including the case where the
C{fireOnOneCallback}/C{fireOnOneErrback} flag is set and the
L{DeferredList} fires because one L{Deferred} in the list fires with a
non-failure/failure result, do nothing in the C{cancel} method.
"""
if not self.called:
for deferred in self._deferredList:
try:
deferred.cancel()
except BaseException:
log.failure("Exception raised from user supplied canceller")
def _parseDListResult(l, fireOnOneErrback=False):
if __debug__:
for success, value in l:
assert success
return [x[1] for x in l]
def gatherResults(deferredList, consumeErrors=False):
"""
Returns, via a L{Deferred}, a list with the results of the given
L{Deferred}s - in effect, a "join" of multiple deferred operations.
The returned L{Deferred} will fire when I{all} of the provided L{Deferred}s
have fired, or when any one of them has failed.
This method can be cancelled by calling the C{cancel} method of the
L{Deferred}, all the L{Deferred}s in the list will be cancelled.
This differs from L{DeferredList} in that you don't need to parse
the result for success/failure.
@type deferredList: L{list} of L{Deferred}s
@param consumeErrors: (keyword param) a flag, defaulting to False,
indicating that failures in any of the given L{Deferred}s should not be
propagated to errbacks added to the individual L{Deferred}s after this
L{gatherResults} invocation. Any such errors in the individual
L{Deferred}s will be converted to a callback result of L{None}. This
is useful to prevent spurious 'Unhandled error in Deferred' messages
from being logged. This parameter is available since 11.1.0.
@type consumeErrors: L{bool}
"""
d = DeferredList(deferredList, fireOnOneErrback=True, consumeErrors=consumeErrors)
d.addCallback(_parseDListResult)
return d
# Constants for use with DeferredList
SUCCESS = True
FAILURE = False
## deferredGenerator
class waitForDeferred:
"""
See L{deferredGenerator}.
"""
def __init__(self, d):
warnings.warn(
"twisted.internet.defer.waitForDeferred was deprecated in "
"Twisted 15.0.0; please use twisted.internet.defer.inlineCallbacks "
"instead",
DeprecationWarning,
stacklevel=2,
)
if not isinstance(d, Deferred):
raise TypeError(
"You must give waitForDeferred a Deferred. You gave it {!r}.".format(d)
)
self.d = d
def getResult(self):
if isinstance(self.result, failure.Failure):
self.result.raiseException()
return self.result
def _deferGenerator(g, deferred):
"""
See L{deferredGenerator}.
"""
result = None
# This function is complicated by the need to prevent unbounded recursion
# arising from repeatedly yielding immediately ready deferreds. This while
# loop and the waiting variable solve that by manually unfolding the
# recursion.
waiting = [True, None] # defgen is waiting for result? # result
while 1:
try:
result = next(g)
except StopIteration:
deferred.callback(result)
return deferred
except BaseException:
deferred.errback()
return deferred
# Deferred.callback(Deferred) raises an error; we catch this case
# early here and give a nicer error message to the user in case
# they yield a Deferred.
if isinstance(result, Deferred):
return fail(TypeError("Yield waitForDeferred(d), not d!"))
if isinstance(result, waitForDeferred):
# a waitForDeferred was yielded, get the result.
# Pass result in so it don't get changed going around the loop
# This isn't a problem for waiting, as it's only reused if
# gotResult has already been executed.
def gotResult(r, result=result):
result.result = r
if waiting[0]:
waiting[0] = False
waiting[1] = r
else:
_deferGenerator(g, deferred)
result.d.addBoth(gotResult)
if waiting[0]:
# Haven't called back yet, set flag so that we get reinvoked
# and return from the loop
waiting[0] = False
return deferred
# Reset waiting to initial values for next loop
waiting[0] = True
waiting[1] = None
result = None
@deprecated(Version("Twisted", 15, 0, 0), "twisted.internet.defer.inlineCallbacks")
def deferredGenerator(f):
"""
L{deferredGenerator} and L{waitForDeferred} help you write
L{Deferred}-using code that looks like a regular sequential function.
Consider the use of L{inlineCallbacks} instead, which can accomplish
the same thing in a more concise manner.
There are two important functions involved: L{waitForDeferred}, and
L{deferredGenerator}. They are used together, like this::
@deferredGenerator
def thingummy():
thing = waitForDeferred(makeSomeRequestResultingInDeferred())
yield thing
thing = thing.getResult()
print(thing) #the result! hoorj!
L{waitForDeferred} returns something that you should immediately yield; when
your generator is resumed, calling C{thing.getResult()} will either give you
the result of the L{Deferred} if it was a success, or raise an exception if it
was a failure. Calling C{getResult} is B{absolutely mandatory}. If you do
not call it, I{your program will not work}.
L{deferredGenerator} takes one of these waitForDeferred-using generator
functions and converts it into a function that returns a L{Deferred}. The
result of the L{Deferred} will be the last value that your generator yielded
unless the last value is a L{waitForDeferred} instance, in which case the
result will be L{None}. If the function raises an unhandled exception, the
L{Deferred} will errback instead. Remember that C{return result} won't work;
use C{yield result; return} in place of that.
Note that not yielding anything from your generator will make the L{Deferred}
result in L{None}. Yielding a L{Deferred} from your generator is also an error
condition; always yield C{waitForDeferred(d)} instead.
The L{Deferred} returned from your deferred generator may also errback if your
generator raised an exception. For example::
@deferredGenerator
def thingummy():
thing = waitForDeferred(makeSomeRequestResultingInDeferred())
yield thing
thing = thing.getResult()
if thing == 'I love Twisted':
# will become the result of the Deferred
yield 'TWISTED IS GREAT!'
return
else:
# will trigger an errback
raise Exception('DESTROY ALL LIFE')
Put succinctly, these functions connect deferred-using code with this 'fake
blocking' style in both directions: L{waitForDeferred} converts from a
L{Deferred} to the 'blocking' style, and L{deferredGenerator} converts from the
'blocking' style to a L{Deferred}.
"""
@wraps(f)
def unwindGenerator(*args, **kwargs):
return _deferGenerator(f(*args, **kwargs), Deferred())
return unwindGenerator
## inlineCallbacks
class _DefGen_Return(BaseException):
def __init__(self, value):
self.value = value
def returnValue(val):
"""
Return val from a L{inlineCallbacks} generator.
Note: this is currently implemented by raising an exception
derived from L{BaseException}. You might want to change any
'except:' clauses to an 'except Exception:' clause so as not to
catch this exception.
Also: while this function currently will work when called from
within arbitrary functions called from within the generator, do
not rely upon this behavior.
"""
raise _DefGen_Return(val)
@attr.s
class _CancellationStatus:
"""
Cancellation status of an L{inlineCallbacks} invocation.
@ivar waitingOn: the L{Deferred} being waited upon (which
L{_inlineCallbacks} must fill out before returning)
@ivar deferred: the L{Deferred} to callback or errback when the generator
invocation has finished.
"""
deferred = attr.ib()
waitingOn = attr.ib(default=None)
@failure._extraneous
def _inlineCallbacks(result, g, status):
"""
Carry out the work of L{inlineCallbacks}.
Iterate the generator produced by an C{@}L{inlineCallbacks}-decorated
function, C{g}, C{send()}ing it the results of each value C{yield}ed by
that generator, until a L{Deferred} is yielded, at which point a callback
is added to that L{Deferred} to call this function again.
@param result: The last result seen by this generator. Note that this is
never a L{Deferred} - by the time this function is invoked, the
L{Deferred} has been called back and this will be a particular result
at a point in its callback chain.
@param g: a generator object returned by calling a function or method
decorated with C{@}L{inlineCallbacks}
@param status: a L{_CancellationStatus} tracking the current status of C{g}
"""
# This function is complicated by the need to prevent unbounded recursion
# arising from repeatedly yielding immediately ready deferreds. This while
# loop and the waiting variable solve that by manually unfolding the
# recursion.
waiting = [True, None] # waiting for result? # result
# Get the current contextvars Context object.
current_context = _copy_context()
while 1:
try:
# Send the last result back as the result of the yield expression.
isFailure = isinstance(result, failure.Failure)
if isFailure:
result = current_context.run(result.throwExceptionIntoGenerator, g)
else:
result = current_context.run(g.send, result)
except StopIteration as e:
# fell off the end, or "return" statement
status.deferred.callback(getattr(e, "value", None))
return
except _DefGen_Return as e:
# returnValue() was called; time to give a result to the original
# Deferred. First though, let's try to identify the potentially
# confusing situation which results when returnValue() is
# accidentally invoked from a different function, one that wasn't
# decorated with @inlineCallbacks.
# The traceback starts in this frame (the one for
# _inlineCallbacks); the next one down should be the application
# code.
appCodeTrace = exc_info()[2].tb_next
if version_info < (3, 7):
# The contextvars backport and our no-op shim add an extra frame.
appCodeTrace = appCodeTrace.tb_next
elif implementation.name == "pypy":
# PyPy as of 3.7 adds an extra frame.
appCodeTrace = appCodeTrace.tb_next
if isFailure:
# If we invoked this generator frame by throwing an exception
# into it, then throwExceptionIntoGenerator will consume an
# additional stack frame itself, so we need to skip that too.
appCodeTrace = appCodeTrace.tb_next
# Now that we've identified the frame being exited by the
# exception, let's figure out if returnValue was called from it
# directly. returnValue itself consumes a stack frame, so the
# application code will have a tb_next, but it will *not* have a
# second tb_next.
if appCodeTrace.tb_next.tb_next:
# If returnValue was invoked non-local to the frame which it is
# exiting, identify the frame that ultimately invoked
# returnValue so that we can warn the user, as this behavior is
# confusing.
ultimateTrace = appCodeTrace
while ultimateTrace.tb_next.tb_next:
ultimateTrace = ultimateTrace.tb_next
filename = ultimateTrace.tb_frame.f_code.co_filename
lineno = ultimateTrace.tb_lineno
warnings.warn_explicit(
"returnValue() in %r causing %r to exit: "
"returnValue should only be invoked by functions decorated "
"with inlineCallbacks"
% (
ultimateTrace.tb_frame.f_code.co_name,
appCodeTrace.tb_frame.f_code.co_name,
),
DeprecationWarning,
filename,
lineno,
)
status.deferred.callback(e.value)
return
except BaseException:
status.deferred.errback()
return
if isinstance(result, Deferred):
# a deferred was yielded, get the result.
def gotResult(r):
if waiting[0]:
waiting[0] = False
waiting[1] = r
else:
current_context.run(_inlineCallbacks, r, g, status)
result.addBoth(gotResult)
if waiting[0]:
# Haven't called back yet, set flag so that we get reinvoked
# and return from the loop
waiting[0] = False
status.waitingOn = result
return
result = waiting[1]
# Reset waiting to initial values for next loop. gotResult uses
# waiting, but this isn't a problem because gotResult is only
# executed once, and if it hasn't been executed yet, the return
# branch above would have been taken.
waiting[0] = True
waiting[1] = None
def _cancellableInlineCallbacks(g):
"""
Make an C{@}L{inlineCallbacks} cancellable.
@param g: a generator object returned by calling a function or method
decorated with C{@}L{inlineCallbacks}
@return: L{Deferred} for the C{@}L{inlineCallbacks} that is cancellable.
"""
def cancel(it):
it.callbacks, tmp = [], it.callbacks
it.addErrback(handleCancel)
it.callbacks.extend(tmp)
it.errback(_InternalInlineCallbacksCancelledError())
deferred = Deferred(cancel)
status = _CancellationStatus(deferred)
def handleCancel(result):
"""
Propagate the cancellation of an C{@}L{inlineCallbacks} to the
L{Deferred} it is waiting on.
@param result: An L{_InternalInlineCallbacksCancelledError} from
C{cancel()}.
@return: A new L{Deferred} that the C{@}L{inlineCallbacks} generator
can callback or errback through.
"""
result.trap(_InternalInlineCallbacksCancelledError)
status.deferred = Deferred(cancel)
# We would only end up here if the inlineCallback is waiting on
# another Deferred. It needs to be cancelled.
awaited = status.waitingOn
awaited.cancel()
return status.deferred
_inlineCallbacks(None, g, status)
return deferred
class _InternalInlineCallbacksCancelledError(Exception):
"""
A unique exception used only in L{_cancellableInlineCallbacks} to verify
that an L{inlineCallbacks} is being cancelled as expected.
"""
def inlineCallbacks(f):
"""
L{inlineCallbacks} helps you write L{Deferred}-using code that looks like a
regular sequential function. For example::
@inlineCallbacks
def thingummy():
thing = yield makeSomeRequestResultingInDeferred()
print(thing) # the result! hoorj!
When you call anything that results in a L{Deferred}, you can simply yield it;
your generator will automatically be resumed when the Deferred's result is
available. The generator will be sent the result of the L{Deferred} with the
'send' method on generators, or if the result was a failure, 'throw'.
Things that are not L{Deferred}s may also be yielded, and your generator
will be resumed with the same object sent back. This means C{yield}
performs an operation roughly equivalent to L{maybeDeferred}.
Your inlineCallbacks-enabled generator will return a L{Deferred} object, which
will result in the return value of the generator (or will fail with a
failure object if your generator raises an unhandled exception). Note that
you can't use C{return result} to return a value; use C{returnValue(result)}
instead. Falling off the end of the generator, or simply using C{return}
will cause the L{Deferred} to have a result of L{None}.
Be aware that L{returnValue} will not accept a L{Deferred} as a parameter.
If you believe the thing you'd like to return could be a L{Deferred}, do
this::
result = yield result
returnValue(result)
The L{Deferred} returned from your deferred generator may errback if your
generator raised an exception::
@inlineCallbacks
def thingummy():
thing = yield makeSomeRequestResultingInDeferred()
if thing == 'I love Twisted':
# will become the result of the Deferred
returnValue('TWISTED IS GREAT!')
else:
# will trigger an errback
raise Exception('DESTROY ALL LIFE')
It is possible to use the C{return} statement instead of L{returnValue}::
@inlineCallbacks
def loadData(url):
response = yield makeRequest(url)
return json.loads(response)
You can cancel the L{Deferred} returned from your L{inlineCallbacks}
generator before it is fired by your generator completing (either by
reaching its end, a C{return} statement, or by calling L{returnValue}).
A C{CancelledError} will be raised from the C{yield}ed L{Deferred} that
has been cancelled if that C{Deferred} does not otherwise suppress it.
"""
@wraps(f)
def unwindGenerator(*args, **kwargs):
try:
gen = f(*args, **kwargs)
except _DefGen_Return:
raise TypeError(
"inlineCallbacks requires %r to produce a generator; instead"
"caught returnValue being used in a non-generator" % (f,)
)
if not isinstance(gen, types.GeneratorType):
raise TypeError(
"inlineCallbacks requires %r to produce a generator; "
"instead got %r" % (f, gen)
)
return _cancellableInlineCallbacks(gen)
return unwindGenerator
## DeferredLock/DeferredQueue
class _ConcurrencyPrimitive:
def __init__(self):
self.waiting = []
def _releaseAndReturn(self, r):
self.release()
return r
def run(*args, **kwargs):
"""
Acquire, run, release.
This function takes a callable as its first argument and any
number of other positional and keyword arguments. When the
lock or semaphore is acquired, the callable will be invoked
with those arguments.
The callable may return a L{Deferred}; if it does, the lock or
semaphore won't be released until that L{Deferred} fires.
@return: L{Deferred} of function result.
"""
if len(args) < 2:
if not args:
raise TypeError("run() takes at least 2 arguments, none given.")
raise TypeError(
"%s.run() takes at least 2 arguments, 1 given"
% (args[0].__class__.__name__,)
)
self, f = args[:2]
args = args[2:]
def execute(ignoredResult):
d = maybeDeferred(f, *args, **kwargs)
d.addBoth(self._releaseAndReturn)
return d
d = self.acquire()
d.addCallback(execute)
return d
def __aenter__(self):
"""
We can be used as an asynchronous context manager.
"""
return self.acquire()
def __aexit__(self, exc_type, exc_val, exc_tb):
self.release()
# We return False to indicate that we have not consumed the
# exception, if any.
return succeed(False)
class DeferredLock(_ConcurrencyPrimitive):
"""
A lock for event driven systems.
@ivar locked: C{True} when this Lock has been acquired, false at all other
times. Do not change this value, but it is useful to examine for the
equivalent of a "non-blocking" acquisition.
"""
locked = False
def _cancelAcquire(self, d):
"""
Remove a deferred d from our waiting list, as the deferred has been
canceled.
Note: We do not need to wrap this in a try/except to catch d not
being in self.waiting because this canceller will not be called if
d has fired. release() pops a deferred out of self.waiting and
calls it, so the canceller will no longer be called.
@param d: The deferred that has been canceled.
"""
self.waiting.remove(d)
def acquire(self):
"""
Attempt to acquire the lock. Returns a L{Deferred} that fires on
lock acquisition with the L{DeferredLock} as the value. If the lock
is locked, then the Deferred is placed at the end of a waiting list.
@return: a L{Deferred} which fires on lock acquisition.
@rtype: a L{Deferred}
"""
d = Deferred(canceller=self._cancelAcquire)
if self.locked:
self.waiting.append(d)
else:
self.locked = True
d.callback(self)
return d
def release(self):
"""
Release the lock. If there is a waiting list, then the first
L{Deferred} in that waiting list will be called back.
Should be called by whomever did the L{acquire}() when the shared
resource is free.
"""
assert self.locked, "Tried to release an unlocked lock"
self.locked = False
if self.waiting:
# someone is waiting to acquire lock
self.locked = True
d = self.waiting.pop(0)
d.callback(self)
class DeferredSemaphore(_ConcurrencyPrimitive):
"""
A semaphore for event driven systems.
If you are looking into this as a means of limiting parallelism, you might
find L{twisted.internet.task.Cooperator} more useful.
@ivar limit: At most this many users may acquire this semaphore at
once.
@type limit: L{int}
@ivar tokens: The difference between C{limit} and the number of users
which have currently acquired this semaphore.
@type tokens: L{int}
"""
def __init__(self, tokens):
"""
@param tokens: initial value of L{tokens} and L{limit}
@type tokens: L{int}
"""
_ConcurrencyPrimitive.__init__(self)
if tokens < 1:
raise ValueError("DeferredSemaphore requires tokens >= 1")
self.tokens = tokens
self.limit = tokens
def _cancelAcquire(self, d):
"""
Remove a deferred d from our waiting list, as the deferred has been
canceled.
Note: We do not need to wrap this in a try/except to catch d not
being in self.waiting because this canceller will not be called if
d has fired. release() pops a deferred out of self.waiting and
calls it, so the canceller will no longer be called.
@param d: The deferred that has been canceled.
"""
self.waiting.remove(d)
def acquire(self):
"""
Attempt to acquire the token.
@return: a L{Deferred} which fires on token acquisition.
"""
assert (
self.tokens >= 0
), "Internal inconsistency?? tokens should never be negative"
d = Deferred(canceller=self._cancelAcquire)
if not self.tokens:
self.waiting.append(d)
else:
self.tokens = self.tokens - 1
d.callback(self)
return d
def release(self):
"""
Release the token.
Should be called by whoever did the L{acquire}() when the shared
resource is free.
"""
assert (
self.tokens < self.limit
), "Someone released me too many times: too many tokens!"
self.tokens = self.tokens + 1
if self.waiting:
# someone is waiting to acquire token
self.tokens = self.tokens - 1
d = self.waiting.pop(0)
d.callback(self)
class QueueOverflow(Exception):
pass
class QueueUnderflow(Exception):
pass
class DeferredQueue:
"""
An event driven queue.
Objects may be added as usual to this queue. When an attempt is
made to retrieve an object when the queue is empty, a L{Deferred} is
returned which will fire when an object becomes available.
@ivar size: The maximum number of objects to allow into the queue
at a time. When an attempt to add a new object would exceed this
limit, L{QueueOverflow} is raised synchronously. L{None} for no limit.
@ivar backlog: The maximum number of L{Deferred} gets to allow at
one time. When an attempt is made to get an object which would
exceed this limit, L{QueueUnderflow} is raised synchronously. L{None}
for no limit.
"""
def __init__(self, size=None, backlog=None):
self.waiting = []
self.pending = []
self.size = size
self.backlog = backlog
def _cancelGet(self, d):
"""
Remove a deferred d from our waiting list, as the deferred has been
canceled.
Note: We do not need to wrap this in a try/except to catch d not
being in self.waiting because this canceller will not be called if
d has fired. put() pops a deferred out of self.waiting and calls
it, so the canceller will no longer be called.
@param d: The deferred that has been canceled.
"""
self.waiting.remove(d)
def put(self, obj):
"""
Add an object to this queue.
@raise QueueOverflow: Too many objects are in this queue.
"""
if self.waiting:
self.waiting.pop(0).callback(obj)
elif self.size is None or len(self.pending) < self.size:
self.pending.append(obj)
else:
raise QueueOverflow()
def get(self):
"""
Attempt to retrieve and remove an object from the queue.
@return: a L{Deferred} which fires with the next object available in
the queue.
@raise QueueUnderflow: Too many (more than C{backlog})
L{Deferred}s are already waiting for an object from this queue.
"""
if self.pending:
return succeed(self.pending.pop(0))
elif self.backlog is None or len(self.waiting) < self.backlog:
d = Deferred(canceller=self._cancelGet)
self.waiting.append(d)
return d
else:
raise QueueUnderflow()
class AlreadyTryingToLockError(Exception):
"""
Raised when L{DeferredFilesystemLock.deferUntilLocked} is called twice on a
single L{DeferredFilesystemLock}.
"""
class DeferredFilesystemLock(lockfile.FilesystemLock):
"""
A L{FilesystemLock} that allows for a L{Deferred} to be fired when the lock is
acquired.
@ivar _scheduler: The object in charge of scheduling retries. In this
implementation this is parameterized for testing.
@ivar _interval: The retry interval for an L{IReactorTime} based scheduler.
@ivar _tryLockCall: A L{DelayedCall} based on C{_interval} that will manage
the next retry for acquiring the lock.
@ivar _timeoutCall: A L{DelayedCall} based on C{deferUntilLocked}'s timeout
argument. This is in charge of timing out our attempt to acquire the
lock.
"""
_interval = 1
_tryLockCall = None
_timeoutCall = None
def __init__(self, name, scheduler=None):
"""
@param name: The name of the lock to acquire
@param scheduler: An object which provides L{IReactorTime}
"""
lockfile.FilesystemLock.__init__(self, name)
if scheduler is None:
from twisted.internet import reactor
scheduler = reactor
self._scheduler = scheduler
def deferUntilLocked(self, timeout=None):
"""
Wait until we acquire this lock. This method is not safe for
concurrent use.
@type timeout: L{float} or L{int}
@param timeout: the number of seconds after which to time out if the
lock has not been acquired.
@return: a L{Deferred} which will callback when the lock is acquired, or
errback with a L{TimeoutError} after timing out or an
L{AlreadyTryingToLockError} if the L{deferUntilLocked} has already
been called and not successfully locked the file.
"""
if self._tryLockCall is not None:
return fail(
AlreadyTryingToLockError(
"deferUntilLocked isn't safe for concurrent use."
)
)
def _cancelLock(reason):
"""
Cancel a L{DeferredFilesystemLock.deferUntilLocked} call.
@type reason: L{failure.Failure}
@param reason: The reason why the call is cancelled.
"""
self._tryLockCall.cancel()
self._tryLockCall = None
if self._timeoutCall is not None and self._timeoutCall.active():
self._timeoutCall.cancel()
self._timeoutCall = None
if self.lock():
d.callback(None)
else:
d.errback(reason)
d = Deferred(lambda deferred: _cancelLock(CancelledError()))
def _tryLock():
if self.lock():
if self._timeoutCall is not None:
self._timeoutCall.cancel()
self._timeoutCall = None
self._tryLockCall = None
d.callback(None)
else:
if timeout is not None and self._timeoutCall is None:
reason = failure.Failure(
TimeoutError(
"Timed out acquiring lock: %s after %fs"
% (self.name, timeout)
)
)
self._timeoutCall = self._scheduler.callLater(
timeout, _cancelLock, reason
)
self._tryLockCall = self._scheduler.callLater(self._interval, _tryLock)
_tryLock()
return d
__all__ = [
"Deferred",
"DeferredList",
"succeed",
"fail",
"FAILURE",
"SUCCESS",
"AlreadyCalledError",
"TimeoutError",
"gatherResults",
"maybeDeferred",
"ensureDeferred",
"waitForDeferred",
"deferredGenerator",
"inlineCallbacks",
"returnValue",
"DeferredLock",
"DeferredSemaphore",
"DeferredQueue",
"DeferredFilesystemLock",
"AlreadyTryingToLockError",
"CancelledError",
]
| [
"[email protected]"
] | |
5c70d23d8d54bf46d7d2e8547bf4ec59236ac4ab | fb9bfe18889cdcb1efad2544bec05d1551ec14f8 | /home-assistant/custom_components/hacs/repositories/theme.py | 0831694927efa2d466e194c26034207a18209395 | [
"MIT"
] | permissive | macbury/SmartHouse | b5cac3db82ad2350dc613a7fbb19584082ac29a0 | 796afdf7552c7798fc6a2a238537a36fa1073efe | refs/heads/master | 2022-12-25T10:30:47.115121 | 2022-07-10T15:03:00 | 2022-07-10T15:03:00 | 188,223,508 | 166 | 65 | MIT | 2022-12-10T15:46:43 | 2019-05-23T11:47:23 | Python | UTF-8 | Python | false | false | 3,631 | py | """Class for themes in HACS."""
from __future__ import annotations
from typing import TYPE_CHECKING
from ..enums import HacsCategory, HacsDispatchEvent
from ..exceptions import HacsException
from ..utils.decorator import concurrent
from .base import HacsRepository
if TYPE_CHECKING:
from ..base import HacsBase
class HacsThemeRepository(HacsRepository):
"""Themes in HACS."""
def __init__(self, hacs: HacsBase, full_name: str):
"""Initialize."""
super().__init__(hacs=hacs)
self.data.full_name = full_name
self.data.full_name_lower = full_name.lower()
self.data.category = HacsCategory.THEME
self.content.path.remote = "themes"
self.content.path.local = self.localpath
self.content.single = False
@property
def localpath(self):
"""Return localpath."""
return f"{self.hacs.core.config_path}/themes/{self.data.file_name.replace('.yaml', '')}"
async def async_post_installation(self):
"""Run post installation steps."""
try:
await self.hacs.hass.services.async_call("frontend", "reload_themes", {})
except BaseException: # lgtm [py/catch-base-exception] pylint: disable=broad-except
pass
async def validate_repository(self):
"""Validate."""
# Run common validation steps.
await self.common_validate()
# Custom step 1: Validate content.
compliant = False
for treefile in self.treefiles:
if treefile.startswith("themes/") and treefile.endswith(".yaml"):
compliant = True
break
if not compliant:
raise HacsException(
f"{self.string} Repository structure for {self.ref.replace('tags/','')} is not compliant"
)
if self.repository_manifest.content_in_root:
self.content.path.remote = ""
# Handle potential errors
if self.validate.errors:
for error in self.validate.errors:
if not self.hacs.status.startup:
self.logger.error("%s %s", self.string, error)
return self.validate.success
async def async_post_registration(self):
"""Registration."""
# Set name
self.update_filenames()
self.content.path.local = self.localpath
if self.hacs.system.action:
await self.hacs.validation.async_run_repository_checks(self)
@concurrent(concurrenttasks=10, backoff_time=5)
async def update_repository(self, ignore_issues=False, force=False):
"""Update."""
if not await self.common_update(ignore_issues, force) and not force:
return
# Get theme objects.
if self.repository_manifest.content_in_root:
self.content.path.remote = ""
# Update name
self.update_filenames()
self.content.path.local = self.localpath
# Signal entities to refresh
if self.data.installed:
self.hacs.async_dispatch(
HacsDispatchEvent.REPOSITORY,
{
"id": 1337,
"action": "update",
"repository": self.data.full_name,
"repository_id": self.data.id,
},
)
def update_filenames(self) -> None:
"""Get the filename to target."""
for treefile in self.tree:
if treefile.full_path.startswith(
self.content.path.remote
) and treefile.full_path.endswith(".yaml"):
self.data.file_name = treefile.filename
| [
"[email protected]"
] | |
bd2641b8e5b2b74521b6620cea1f61afcd186eae | 77077a391973d1f8c05647d08fc135facd04fc5e | /xlsxwriter/test/comparison/test_background02.py | 25a76b75104c00aaf83a899b79d12ccfe831151a | [
"BSD-2-Clause-Views"
] | permissive | DeltaEpsilon7787/XlsxWriter | 28fb1012eaa42ea0f82e063f28c0c548ca016c5e | 550b9c5bd678c861dcc9f6f4072b33a69566e065 | refs/heads/main | 2023-08-02T09:14:10.657395 | 2021-09-06T10:51:56 | 2021-09-06T10:51:56 | 384,948,081 | 0 | 0 | NOASSERTION | 2021-07-11T12:57:26 | 2021-07-11T12:57:25 | null | UTF-8 | Python | false | false | 1,300 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
from io import BytesIO
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('background02.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with a background image."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_background(self.image_dir + 'logo.jpg')
workbook.close()
self.assertExcelEqual()
def test_create_file_bytestream(self):
"""Test the creation of an XlsxWriter file with a background image."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
image_file = open(self.image_dir + 'logo.jpg', 'rb')
image_data = BytesIO(image_file.read())
image_file.close()
worksheet.set_background(image_data, is_byte_stream=True)
workbook.close()
self.assertExcelEqual()
| [
"[email protected]"
] | |
f3b5b32158007db75f97c4df7a3bdae34ab46ac3 | 37cfcdfa3b8f1499f5899d2dfa2a48504a690abd | /test/functional/mining_prioritisetransaction.py | fe0256f3a78b06dd52f4dbc84cd78a7169c450e4 | [
"MIT"
] | permissive | CJwon-98/Pyeongtaekcoin | 28acc53280be34b69c986198021724181eeb7d4d | 45a81933a98a7487f11e57e6e9315efe740a297e | refs/heads/master | 2023-08-17T11:18:24.401724 | 2021-10-14T04:32:55 | 2021-10-14T04:32:55 | 411,525,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,616 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Pyeongtaekcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the prioritisetransaction mining RPC."""
import time
from test_framework.messages import COIN, MAX_BLOCK_BASE_SIZE
from test_framework.test_framework import PyeongtaekcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class PrioritiseTransactionTest(PyeongtaekcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-printpriority=1"], ["-printpriority=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Test `prioritisetransaction` required parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction)
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '')
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0)
# Test `prioritisetransaction` invalid extra parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0)
# Test `prioritisetransaction` invalid `txid`
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0)
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')", self.nodes[0].prioritisetransaction, txid='Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000', fee_delta=0)
# Test `prioritisetransaction` invalid `dummy`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-1, "JSON value is not a number as expected", self.nodes[0].prioritisetransaction, txid, 'foo', 0)
assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0)
# Test `prioritisetransaction` invalid `fee_delta`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo')
self.txouts = gen_return_txouts()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx is not None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"]
tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
# This will raise an exception due to min relay fee not being met
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
assert(tx_id not in self.nodes[0].getrawmempool())
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000-byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))
self.log.info("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
assert(tx_id in self.nodes[0].getrawmempool())
# Test that calling prioritisetransaction is sufficient to trigger
# getblocktemplate to (eventually) return a new block.
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN))
self.nodes[0].setmocktime(mock_time+10)
new_template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert(template != new_template)
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| [
"[email protected]"
] | |
be05c884bf49420daeef4374a004c5cda9062076 | 306afd5282d9c24d58297478a1728a006c29e57e | /python3/0213_House_Robber_II.py | 4d1ea3b9abd8dcecdc9fed21a9a7723218c62808 | [] | no_license | ytatus94/Leetcode | d2c1fe3995c7a065139f772569485dc6184295a9 | 01ee75be4ec9bbb080f170cb747f3fc443eb4d55 | refs/heads/master | 2023-06-08T17:32:34.439601 | 2023-05-29T04:33:19 | 2023-05-29T04:33:19 | 171,921,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | class Solution:
def rob(self, nums: List[int]) -> int:
if len(nums) == 1:
return nums[0]
# 第一棟和最後一棟房子相連,所以不能同時偷
# 要拆開成兩種情況討論,變成兩個普通的 house robber I 問題
res1 = self.house_robber(nums[:-1]) # 不偷第一棟房子
res2 = self.house_robber(nums[1:]) # 不偷最後一棟房子
return max(res1, res2)
# 一般的方式
# def house_robber(self, nums):
# if len(nums) == 0:
# return 0
# if len(nums) == 1:
# return nums[0]
# dp = [0 for i in range(len(nums) + 1)]
# dp[0] = 0
# dp[1] = nums[0]
# for i in range(2, len(nums) + 1):
# dp[i] = max(dp[i - 1], dp[i - 2] + nums[i - 1])
# return dp[len(nums)]
# 用滾動數組的方式
def house_robber(self, nums):
if len(nums) == 0:
return 0
if len(nums) == 1:
return nums[0]
old = 0
new = nums[0]
for i in range(2, len(nums) + 1):
t = max(new, old + nums[i - 1])
old = new
new = t
return new
| [
"[email protected]"
] | |
6e61066574e483ec40858762d15e0fdd68ac4c3e | f6e6d10fefa38303fcc63e924232856c69c3a456 | /dev/buildtool/changelog_commands.py | 2576cdff1b508a2bc671955d60245f08374fa7d0 | [
"Apache-2.0"
] | permissive | zer09/spinnaker | bd5a470db1c4918977984bbc2321fded5a980373 | 22cfbc9b640cc9536b16333ff2f5b412347e7223 | refs/heads/master | 2020-03-10T00:20:33.709888 | 2018-04-05T16:40:36 | 2018-04-05T17:34:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,604 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements changelog commands for buildtool."""
import collections
import copy
import datetime
import logging
import os
import re
import shutil
import textwrap
import urllib2
from buildtool import (
SPINNAKER_GITHUB_IO_REPOSITORY_NAME,
CommandFactory,
CommandProcessor,
RepositoryCommandFactory,
RepositoryCommandProcessor,
BomSourceCodeManager,
BranchSourceCodeManager,
ConfigError,
UnexpectedError,
CommitMessage,
GitRunner,
check_kwargs_empty,
check_options_set,
check_path_exists,
ensure_dir_exists,
raise_and_log_error,
write_to_path)
BUILD_CHANGELOG_COMMAND = 'build_changelog'
TITLE_LINE_MATCHER = re.compile(r'\W*\w+\(([^\)]+)\)\s*[:-]?(.*)')
def make_options_with_fallback(options):
"""A hack for now, using git_fallback_branch to support spinnaker.github.io
That repo does not use the release branches, rather master.
So if creating a release, it will fallback to master for that repo.
"""
options_copy = copy.copy(options)
options_copy.git_fallback_branch = 'master'
return options_copy
class ChangelogRepositoryData(
collections.namedtuple('ChangelogRepositoryData',
['repository', 'summary', 'normalized_messages'])):
"""Captures the change information for a given repository."""
def __cmp__(self, other):
return self.repository.name.__cmp__(other.repository.name)
def partition_commits(self, sort=True):
"""Partition the commit messages by the type of change.
Returns an OrderedDict of the partition ordered by significance.
The keys in the dictionary are the type of change.
The values are a list of git.CommitMessage.
"""
partition_types = [
('Breaking Changes',
re.compile(r'^\s*'
r'(.*?BREAKING CHANGE.*)',
re.MULTILINE)),
('Features',
re.compile(r'^\s*'
r'(?:\*\s+)?'
r'((?:feat|feature)[\(:].*)',
re.MULTILINE)),
('Configuration',
re.compile(r'^\s*'
r'(?:\*\s+)?'
r'((?:config)[\(:].*)',
re.MULTILINE)),
('Fixes',
re.compile(r'^\s*'
r'(?:\*\s+)?'
r'((?:bug|fix)[\(:].*)',
re.MULTILINE)),
('Other',
re.compile(r'.*'))
]
workspace = {}
for msg in self.normalized_messages:
text = msg.message
match = None
for section, regex in partition_types:
match = regex.search(text)
if match:
if section not in workspace:
workspace[section] = []
workspace[section].append(msg)
break
result = collections.OrderedDict()
for spec in partition_types:
key = spec[0]
if key in workspace:
result[key] = (self._sort_partition(workspace[key])
if sort
else workspace[key])
return result
@staticmethod
def _sort_partition(commit_messages):
"""sorting key function for CommitMessage.
Returns the commit messages sorted by affected component while
preserving original ordering with the component. The affected
component is the <THING> for titles in the form TYPE(<THING>): <MESSAGE>
"""
thing_dict = {}
def get_thing_list(title_line):
"""Return bucket for title_line, adding new one if needed."""
match = TITLE_LINE_MATCHER.match(title_line)
thing = match.group(1) if match else None
if not thing in thing_dict:
thing_dict[thing] = []
return thing_dict[thing]
for message in commit_messages:
title_line = message.message.split('\n')[0]
get_thing_list(title_line).append(message)
result = []
for thing in sorted(thing_dict.keys()):
result.extend(thing_dict[thing])
return result
class ChangelogBuilder(object):
"""Knows how to create changelogs from git.RepositorySummary."""
STRIP_GITHUB_ID_MATCHER = re.compile(r'^(.*?)\s*\(#\d+\)$')
def __init__(self, **kwargs):
self.__entries = []
self.__with_partition = kwargs.pop('with_partition', True)
self.__with_detail = kwargs.pop('with_detail', False)
self.__write_category_heading = self.__with_partition and self.__with_detail
check_kwargs_empty(kwargs)
self.__sort_partitions = True
def clean_message(self, text):
"""Remove trailing "(#<id>)" from first line of message titles"""
parts = text.split('\n', 1)
if len(parts) == 1:
first, rest = text, ''
else:
first, rest = parts
match = self.STRIP_GITHUB_ID_MATCHER.match(first)
if match:
if rest:
return '\n'.join([match.group(1), rest])
return match.group(1)
return text
def add_repository(self, repository, summary):
"""Add repository changes into the builder."""
message_list = summary.commit_messages
normalized_messages = CommitMessage.normalize_message_list(message_list)
self.__entries.append(ChangelogRepositoryData(
repository, summary, normalized_messages))
def build(self):
"""Construct changelog."""
report = []
sep = ''
for entry in sorted(self.__entries):
summary = entry.summary
repository = entry.repository
commit_messages = entry.normalized_messages
name = repository.name
report.append('{sep}## [{title}](#{name}) {version}'.format(
sep=sep, title=name.capitalize(), name=name,
version=summary.version))
if not commit_messages:
report.append(' No Changes')
report.append('\n\n')
continue
if self.__with_partition:
report.extend(self.build_commits_by_type(entry))
report.append('\n')
if self.__with_detail:
report.extend(self.build_commits_by_sequence(entry))
report.append('\n')
sep = '\n\n'
return '\n'.join(report)
def build_commits_by_type(self, entry):
"""Create a section that enumerates changes by partition type.
Args:
entry: [ChangelogRepositoryData] The repository to report on.
Returns:
list of changelog lines
"""
report = []
partitioned_commits = entry.partition_commits(sort=self.__sort_partitions)
if self.__write_category_heading:
report.append('### Changes by Type')
if not partitioned_commits:
report.append(' No Significant Changes.')
return report
one_liner = TITLE_LINE_MATCHER
base_url = entry.repository.origin
level_marker = '#' * 4
for title, commit_messages in partitioned_commits.items():
report.append('{level} {title}'.format(level=level_marker, title=title))
for msg in commit_messages:
first_line = msg.message.split('\n', 1)[0].strip()
clean_text = self.clean_message(first_line)
match = one_liner.match(clean_text)
if match:
text = '**{thing}:** {message}'.format(
thing=match.group(1), message=match.group(2))
else:
text = clean_text
link = '[{short_hash}]({base_url}/commit/{full_hash})'.format(
short_hash=msg.commit_id[:8], full_hash=msg.commit_id,
base_url=base_url)
report.append('* {text} ({link})'.format(text=text, link=link))
report.append('')
return report
def build_commits_by_sequence(self, entry):
"""Create a section that enumerates all changes in order.
Args:
entry: [ChangelogRepositoryData] The repository to report on.
Returns:
list of changelog lines
"""
level_name = [None, 'MAJOR', 'MINOR', 'PATCH']
report = []
if self.__write_category_heading:
report.append('### Changes by Sequence')
base_url = entry.repository.origin
for msg in entry.normalized_messages:
clean_text = self.clean_message(msg.message)
link = '[{short_hash}]({base_url}/commit/{full_hash})'.format(
short_hash=msg.commit_id[:8], full_hash=msg.commit_id,
base_url=base_url)
level = msg.determine_semver_implication()
report.append('**{level}** ({link})\n{detail}\n'.format(
level=level_name[level], link=link, detail=clean_text))
return report
class BuildChangelogCommand(RepositoryCommandProcessor):
"""Implements the build_changelog."""
def __init__(self, factory, options, **kwargs):
# Use own repository to avoid race conditions when commands are
# running concurrently.
options_copy = copy.copy(options)
options_copy.github_disable_upstream_push = True
super(BuildChangelogCommand, self).__init__(factory, options_copy, **kwargs)
def _do_repository(self, repository):
"""Collect the summary for the given repository."""
return self.git.collect_repository_summary(repository.git_dir)
def _do_postprocess(self, result_dict):
"""Construct changelog from the collected summary, then write it out."""
options = self.options
path = os.path.join(self.get_output_dir(), 'changelog.md')
builder = ChangelogBuilder(with_detail=options.include_changelog_details)
repository_map = {repository.name: repository
for repository in self.source_repositories}
for name, summary in result_dict.items():
builder.add_repository(repository_map[name], summary)
changelog_text = builder.build()
write_to_path(changelog_text, path)
logging.info('Wrote changelog to %s', path)
class BuildChangelogFactory(RepositoryCommandFactory):
"""Builds changelog files."""
def __init__(self, **kwargs):
super(BuildChangelogFactory, self).__init__(
BUILD_CHANGELOG_COMMAND, BuildChangelogCommand,
'Build a git changelog and write it out to a file.',
BomSourceCodeManager, **kwargs)
def init_argparser(self, parser, defaults):
"""Adds command-specific arguments."""
super(BuildChangelogFactory, self).init_argparser(
parser, defaults)
self.add_argument(
parser, 'include_changelog_details', defaults, False,
action='store_true',
help='Include a "details" section with the full commit messages'
' in time sequence in the changelog.')
class PublishChangelogFactory(RepositoryCommandFactory):
def __init__(self, **kwargs):
super(PublishChangelogFactory, self).__init__(
'publish_changelog', PublishChangelogCommand,
'Publish Spinnaker version Changelog to spinnaker.github.io.',
BranchSourceCodeManager, **kwargs)
def init_argparser(self, parser, defaults):
super(PublishChangelogFactory, self).init_argparser(
parser, defaults)
GitRunner.add_parser_args(parser, defaults)
GitRunner.add_publishing_parser_args(parser, defaults)
self.add_argument(
parser, 'spinnaker_version', defaults, None,
help='The version of spinnaker this documentation is for.')
self.add_argument(
parser, 'changelog_gist_url', defaults, None,
help='The gist to the existing changelog content being published.')
class PublishChangelogCommand(RepositoryCommandProcessor):
"""Implements publish_changelog."""
def __init__(self, factory, options, **kwargs):
super(PublishChangelogCommand, self).__init__(
factory, make_options_with_fallback(options),
source_repository_names=[SPINNAKER_GITHUB_IO_REPOSITORY_NAME],
**kwargs)
check_options_set(options, ['spinnaker_version', 'changelog_gist_url'])
try:
logging.debug('Verifying changelog gist exists at "%s"',
options.changelog_gist_url)
urllib2.urlopen(options.changelog_gist_url)
except urllib2.HTTPError as error:
raise_and_log_error(
ConfigError(
'Changelog gist "{url}": {error}'.format(
url=options.changelog_gist_url,
error=error.message)))
def _do_repository(self, repository):
if repository.name != SPINNAKER_GITHUB_IO_REPOSITORY_NAME:
raise_and_log_error(UnexpectedError('Got "%s"' % repository.name))
base_branch = 'master'
self.scm.ensure_git_path(repository, branch=base_branch)
version = self.options.spinnaker_version
if self.options.git_allow_publish_master_branch:
branch_flag = ''
head_branch = 'master'
else:
branch_flag = '-B'
head_branch = version + '-changelog'
files_added = self.prepare_local_repository_files(repository)
git_dir = repository.git_dir
message = 'doc(changelog): Spinnaker Version ' + version
local_git_commands = [
# These commands are accomodating to a branch already existing
# because the branch is on the version, not build. A rejected
# build for some reason that is re-tried will have the same version
# so the branch may already exist from the earlier attempt.
'fetch origin ' + base_branch,
'checkout ' + base_branch,
'checkout {flag} {branch}'.format(
flag=branch_flag, branch=head_branch),
'add ' + ' '.join([os.path.abspath(path) for path in files_added]),
'commit -m "{msg}"'.format(msg=message),
]
logging.debug('Commiting changes into local repository "%s" branch=%s',
repository.git_dir, head_branch)
git = self.git
git.check_run_sequence(git_dir, local_git_commands)
logging.info('Pushing branch="%s" into "%s"',
head_branch, repository.origin)
git.push_branch_to_origin(git_dir, branch=head_branch)
def prepare_local_repository_files(self, repository):
if repository.name != SPINNAKER_GITHUB_IO_REPOSITORY_NAME:
raise_and_log_error(UnexpectedError('Got "%s"' % repository.name))
timestamp = '{:%Y-%m-%d %H:%M:%S %Z}'.format(datetime.datetime.now())
version = self.options.spinnaker_version
changelog_filename = '{version}-changelog.md'.format(version=version)
target_path = os.path.join(repository.git_dir,
'_changelogs', changelog_filename)
major, minor, _ = version.split('.')
logging.debug('Adding changelog file %s', target_path)
with open(target_path, 'w') as f:
# pylint: disable=trailing-whitespace
header = textwrap.dedent(
"""\
---
title: Version {version}
date: {timestamp}
tags: changelogs {major}.{minor}
---
# Spinnaker {version}
""".format(
version=version,
timestamp=timestamp,
major=major, minor=minor))
f.write(header)
f.write('<script src="%s.js"/>' % self.options.changelog_gist_url)
return [target_path]
class PushChangelogFactory(CommandFactory):
def __init__(self, **kwargs):
super(PushChangelogFactory, self).__init__(
'push_changelog_to_gist', PushChangelogCommand,
'Push raw changelog to an existing gist, possibly overwriting what'
' was already there. This is intended for builds only, not publishing.'
' The expectation is that these will be curated then published'
' with the "publish_changelog" command.'
'\nThis will add (or overwrite) the changelog with the name'
' "<branch>-raw-changelog.md".', **kwargs)
def init_argparser(self, parser, defaults):
super(PushChangelogFactory, self).init_argparser(
parser, defaults)
GitRunner.add_parser_args(parser, defaults)
self.add_argument(
parser, 'changelog_path', defaults, None,
help='The path to the changelog to push.')
self.add_argument(
parser, 'git_branch', defaults, None,
help='The branch name that this changelog is for. Note that this does'
' not actually *use* any branches, rather the branch name is used'
' to decorates the changelog filename.')
self.add_argument(
parser, 'build_changelog_gist_url', defaults, None,
help='The gist to push the changelog into.')
class PushChangelogCommand(CommandProcessor):
"""Implements push_changelog_to_gist."""
def __init__(self, factory, options, **kwargs):
super(PushChangelogCommand, self).__init__(factory, options, **kwargs)
check_options_set(
options, ['build_changelog_gist_url', 'git_branch'])
if not options.changelog_path:
options.changelog_path = os.path.join(
self.get_output_dir(command=BUILD_CHANGELOG_COMMAND), 'changelog.md')
check_path_exists(options.changelog_path, why='changelog_path')
self.__git = GitRunner(options)
def _do_command(self):
options = self.options
gist_url = options.build_changelog_gist_url
index = gist_url.rfind('/')
if index < 0:
index = gist_url.rfind(':') # ssh gist
gist_id = gist_url[index + 1:]
git_dir = os.path.join(self.get_input_dir(), gist_id)
if not os.path.exists(git_dir):
logging.debug('Cloning gist from %s', gist_url)
ensure_dir_exists(os.path.dirname(git_dir))
self.__git.check_run(os.path.dirname(git_dir), 'clone ' + gist_url)
else:
logging.debug('Updating gist in "%s"', git_dir)
self.__git.check_run(git_dir, 'fetch origin master')
self.__git.check_run(git_dir, 'checkout master')
dest_path = os.path.join(
git_dir, '%s-raw-changelog.md' % options.git_branch)
logging.debug('Copying "%s" to "%s"', options.changelog_path, dest_path)
shutil.copyfile(options.changelog_path, dest_path)
self.__git.check_run(git_dir, 'add ' + os.path.basename(dest_path))
self.__git.check_run(
git_dir, 'commit -a -m "Updated %s"' % os.path.basename(dest_path))
logging.debug('Pushing back gist')
self.__git.check_run(git_dir, 'push -f origin master')
def register_commands(registry, subparsers, defaults):
"""Registers all the commands for this module."""
BuildChangelogFactory().register(registry, subparsers, defaults)
PushChangelogFactory().register(registry, subparsers, defaults)
PublishChangelogFactory().register(registry, subparsers, defaults)
| [
"[email protected]"
] | |
9cbb8de9fa1b82364b1435e1745f5a067f0bce6a | 3669cd260bdab697376feca747d1635d35f42c83 | /lang/clang-devel/files/patch-utils_llvm-build_llvmbuild_main.py | 2bbe4511dd58f04fbedcf24691c3bcbe18e71267 | [] | no_license | tuxillo/DPorts | 58072bc88887c7a53a51988c76a70366bef44a93 | f523fb13a9d3ecc5ce9a8045fdf146ae05de5399 | refs/heads/master | 2020-04-03T08:02:44.297511 | 2013-03-04T07:56:00 | 2013-03-04T07:56:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py |
$FreeBSD: ports/lang/clang-devel/files/patch-utils_llvm-build_llvmbuild_main.py,v 1.2 2012/11/17 05:58:48 svnexp Exp $
--- utils/llvm-build/llvmbuild/main.py.orig
+++ utils/llvm-build/llvmbuild/main.py
@@ -633,7 +633,13 @@
# We handle a few special cases of target names here for historical
# reasons, as these are the names configure currently comes up with.
- native_target_name = { 'x86' : 'X86',
+ native_target_name = { 'amd64' : 'X86',
+ 'arm' : 'ARM',
+ 'i386' : 'X86',
+ 'mips' : 'Mips',
+ 'powerpc' : 'PowerPC',
+ 'sparc64' : 'Sparc',
+ 'x86' : 'X86',
'x86_64' : 'X86',
'Unknown' : None }.get(opts.native_target,
opts.native_target)
| [
"[email protected]"
] | |
bb2aef030b5b31e6b3dc6710d92c38df6a019f77 | e968c7b2a81eac674fe90d4988d49dc76cd6ea90 | /Chap0/project/guess_number_advanced.py | c7469c5b549b102d7e4fce82232709e7f0821658 | [] | no_license | AIHackerTest/0x0o_Py101-004 | f27c2988ef4b755546a2a64bf5f8e225c1c46c93 | cf3fcd4d2618b63e04732ddc0cc9dfdd36e94b8d | refs/heads/master | 2021-06-28T20:05:03.110594 | 2017-09-13T03:41:25 | 2017-09-13T03:41:25 | 103,240,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,957 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program name: 猜数字进阶
Author: 0x0
Github: https://github.com/0x0o
Edition:
Edit date: 2017.08.15
游戏介绍:程序内部用 0 - 9 生成一个 4 位数,每个数位上的数字不重复,且首位数字不为零,如 1942
用户输入 4 位数进行猜测,程序返回相应提示
用 A 表示数字和位置都正确,用 B 表示数字正确但位置错误
用户猜测后,程序返回 A 和 B 的数量
比如:2A1B 表示用户所猜数字,有 2 个数字,数字、位置都正确,有 1 个数字,数字正确但位置错误
猜对或用完 10 次机会,游戏结束
"""
import random
# 生成 random_list 保存为四个数字元素的 list # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 可以写成 range(10)
random_list = random.sample(range(10), 4)
# 如果首位为零重新生成
while random_list[0] == 0:
random_list = random.sample(range(10), 4)
print("生成的随机数为 {}".format(random_list))
# 用户输入,把输入拆成一个 list
print("请输入 4 位数进行猜测 ")
# 判断用户输入
def check_guess(input_list):
a = 0
b = 0
for i, num in enumerate(random_list):
if int(guess_list[i]) == int(random_list[i]): # 数字正确且位置正确
a += 1
if a == 4:
print("恭喜你全部猜中")
return True
elif int(guess_list[i]) in random_list: # 位置正确
b += 1
if(a != 4):
print("{}A{}B".format(a, b))
return False
chance = 10
for i in range(0,10):
# 处理用户输入
guess_num = input("> ")
guess_list = list(guess_num)
guess = check_guess(guess_list)
if chance == 0:
print("用完 10 次机会,游戏结束")
break
if guess:
break
else:
chance -= 1
print("你还有 {} 次机会".format(chance))
| [
"[email protected]"
] | |
fa3d745753daff8cdc2dae59e1518e0bf8f81b84 | 1c7b5b866b505b7b8c47dce504c5bd27a34d5992 | /TargetOffer/和为S的两个数字.py | 9b7088871e2e866064899912df859868121c9059 | [] | no_license | ii0/algorithms-6 | 2dbcb3df504810ea52b41e5129b334f62136d70a | 3eddc77d2f3dafffd177f2a9ee28e9850da2f020 | refs/heads/master | 2022-04-25T23:17:53.332297 | 2019-09-19T14:52:04 | 2019-09-19T14:52:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | """
author: buppter
datetime: 2019/8/15 16:34
题目描述
输入一个递增排序的数组和一个数字S,在数组中查找两个数,使得他们的和正好是S,如果有多对数字的和等于S,输出两个数的乘积最小的。
解决思路:
1. 已经排序,所以可以使用双指针
2. 对于未排序的,可以使用 y = s - x,借助Map, LeetCode 第一题。但要判断最小乘积,略显的复杂
"""
class Solution:
def FindNumbersWithSum1(self, array: list, tsum: int) -> list:
if not array:
return []
l, r = 0, len(array) - 1
while l < r:
if array[l] + array[r] < tsum:
l += 1
elif array[l] + array[r] > tsum:
r -= 1
else:
return [array[l], array[r]]
return []
def FindNumbersWithSum2(self, array: list, tsum: int) -> list:
if not array:
return []
dic = {}
res = []
for i, v in enumerate(array):
if tsum - v not in dic:
dic[v] = i
else:
res.append([tsum - v, v])
if len(res) == 1:
return res[0]
if not res:
return []
return self.getMin(res)
def getMin(self, array):
res = []
for i in array:
s = 1
for l in i:
s *= l
res.append(s)
return array[res.index(min(res))]
| [
"[email protected]"
] | |
ca5a807578a341ab6150858ebc98582151ea5b7b | f3b233e5053e28fa95c549017bd75a30456eb50c | /tyk2_input/55/55-42_wat_20Abox/setin.py | a052af3560b39d0ff4ffd8b4721a1f81128a90d8 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | import os
final_common = ["C1", "C2", "C3", "C4", "C5", "C6", "CL1", "C8", "O9", "N10", "C11", "C12", "C13", "N14", "C15", "C16", "N17", "C18", "O19", "CL2", "H1", "H2", "H6", "HN10", "H12", "H13", "H16", "HN17"]
dir = '/mnt/scratch/songlin3/run/tyk2/L55/wat_20Abox/ti_one-step/'
res1i='55'
res1='NNN'
res2list=LIST1
for i in res2list:
a=i.upper()
res2='L'+a
filesdir = dir + "%s_%s"%(res1i,i) + "/" + "files" + "/"
os.chdir(filesdir)
os.system("cp ../../input-files/*in .")
os.system("cp ../%s-%s_merged.* ."%(res1i,i))
pdb = file.readlines(open('%s-%s_merged.pdb'%(res1i,i),'r'))
for line in pdb:
newlist = []
newlist = line.split()
if len(newlist) > 4 and newlist[3] == '%s'%(res1):
resid1 = newlist[4]
os.system("sed -i 's/ZZZ/%s/g' temp_*.in"%(resid1))
break
for line in pdb:
newlist = []
newlist = line.split()
if len(newlist) > 4 and newlist[3] == '%s'%(res2):
resid2 = newlist[4]
os.system("sed -i 's/42/%s/g' temp_*.in"%(resid2))
break
print res1 + '>' + res2
atmnmlist1 = []
for line in pdb:
newlist=[]
newlist = line.split()
if len(newlist) > 4 and newlist[3] == '%s'%(res1) and newlist[2] not in final_common:
atomname = newlist[2]
print atomname
atmnmlist1.append(newlist[2])
print atmnmlist1
sc1 = ':1@'
for num in range(0,len(atmnmlist1)):
sc1 = sc1 + atmnmlist1[num] + ','
print sc1
os.system("sed -i 's/AAA/%s/g' temp_*in"%(sc1))
###res2
print res2 + '>' + res1
atmnmlist2 = []
for line in pdb:
newlist=[]
newlist = line.split()
if len(newlist) > 4 and newlist[3] == '%s'%(res2) and newlist[2] not in final_common:
atomname = newlist[2]
print atomname
atmnmlist2.append(newlist[2])
print atmnmlist2
sc2 = ':2@'
#print len(atmnmlist1)
for num in range(0,len(atmnmlist2)):
sc2 = sc2 + atmnmlist2[num] + ','
print sc2
os.system("sed -i 's/BBB/%s/g' temp_*in"%(sc2))
os.system("cd ..")
os.chdir(dir)
| [
"[email protected]"
] | |
d6d6e742047319fd822c0f16e580902ce8b79fad | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /09_Data_Preparation_for_Machine_Learning/18/04_transform_evaluate.py | 5a5532223e4999a7b4f4ff6f9f11674e3a596ea3 | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | # evaluate knn on the diabetes dataset with robust scaler transform
from numpy import mean
from numpy import std
from pandas import read_csv
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import RobustScaler
# load dataset
dataset = read_csv('pima-indians-diabetes.csv', header=None)
data = dataset.values
# separate into input and output columns
X, y = data[:, :-1], data[:, -1]
# ensure inputs are floats and output is an integer label
X = X.astype('float32')
y = LabelEncoder().fit_transform(y.astype('str'))
# define the pipeline
trans = RobustScaler()
model = KNeighborsClassifier()
pipeline = Pipeline(steps=[('t', trans), ('m', model)])
# evaluate the pipeline
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
n_scores = cross_val_score(pipeline, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
# report pipeline performance
print('Accuracy: %.3f (%.3f)' % (mean(n_scores), std(n_scores)))
| [
"[email protected]"
] | |
4bc4b731c7f905446543b704195279a8088cf9af | 77e9e68cd6da7b7268b9c1bdfa43c6b31c19ed09 | /metrics.py | f4626d370caf73da1065d238ac6f0708bea01c4b | [] | no_license | abhishekdiphu/Automatic-keypoint-localization-in-2dmedical-images | 4fd07da82cb31bdeb7ed987563beae3db5768578 | 9caf76b94ee43fe6917a78e43601974397f3dea2 | refs/heads/main | 2023-03-18T00:23:01.771151 | 2021-03-03T20:00:14 | 2021-03-03T20:00:14 | 308,170,915 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,348 | py | import torch
import numpy as np
import matplotlib.pyplot as plt
import hiwi
eps = 1e-8
import datasets.img as I
class Options(object):
'''
Presetting :
Args :
self.outputRes : output resolution of the model
self.nStack : No of stack hour glass used in the generator network
'''
def __init__(self, outputRes, nStack):
self.outputRes = outputRes
self.nStack = nStack
class PCK(object):
"""docstring for A.O Mader's metrics for lower leg datasets"""
def __init__(self, opts):
super(PCK, self).__init__()
self.opts = opts
self.input_resolution = 256
self.output_resolution = 64
def calc_dists(self, preds, target, normalize):
'''
Args:
pred (multidimentional tensor) : the predicted heatmaps of the model.
target (multidimentional tensor) : ground truth heatmaps of the x-rays images
normalize ( numpy-array) : to put the pred and target in the same scale, not used .
return:
dists (numpy array (c, n)): n is the batch size , c is the column , from 0 to 5, one each for a keypoints
example:
for batch of 4 images :
joints(c)
a_l a_r f_l f_r k_l k_r
0 1 2 3 4 5
0[[1 1 1 1 1 1],
images(n) 1 [1 1 1 1 1 1],
2 [1 1 1 1 1 1],
3 [1 1 1 1 1 1]]
'''
preds = (preds*(self.input_resolution/self.output_resolution)).astype(np.float32)
#print(preds)
target = (target*(self.input_resolution/self.output_resolution)).astype(np.float32)
print(target)
dists = np.zeros((preds.shape[1], preds.shape[0]))
#mader = []
#print(dists)
for n in range(preds.shape[0]):
for c in range(preds.shape[1]):
if target[n, c, 0] > 0 and target[n, c, 1] > 0:
normed_preds = preds[n, c, :] #/ normalize[n]
#print(normed_preds)
normed_targets = target[n, c, :] #/ normalize[n]
#print(normed_targets)
dists[c, n] = np.linalg.norm(normed_preds*np.array([4.1 , 4.1]) - normed_targets*np.array([4.1 , 4.1]))
#dist = hiwi.Evaluation(normed_targets ,normed_preds,
# localized =np.less(dists[c, n], 10) ,
# spacing = np.array([4.1 , 4.1]))
#mader.append(dist)
#print("{}".format(dist))
#print(dists[c , n])
else:
#print(target[n, c, 0] , target[n, c, 1])
dists[c, n] = -1
#for i in mader:
# print("{}".format(i))
#Avg = hiwi.AvgEvaluation(mader)
#fig, ax = plt.subplots()
#plot = Avg.plot_localizations(ax = ax , max_error= 4.1, max_error_mm= 1 )
#plt.show()
#plt.savefig("plot.png")
#print("{}".format(Avg))
return dists
def dist_acc(self, dists, thr= 10): #7.31 # 4.87 #2.44
''' Return percentage below threshold while ignoring values with a -1
10 mm = 2.44px
20 mm = 4.87px
30 mm = 7.31px
4.1mm = 1px
Args:
dists(numpy array): return value of calc_dists() -->dist[c , n]
thr(float or int) : threshold value in terms of px.
Return:
float , Return percentage below the threshold while ignoring values with a -1
'''
dist_cal = np.not_equal(dists, -1)
#print(" Total no of key-point present :" , dist_cal)
num_dist_cal = dist_cal.sum()
#print("Sum of present key-keypoints :", num_dist_cal )
#print("trial :",np.less(dists, thr))
#print("2 :",np.less(dists[dist_cal], thr))
if num_dist_cal > 0:
#print("Accurately detected :",np.less(dists[dist_cal], thr).sum())
#print("dist_cal :",np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal)
return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal
else:
return -1
def get_max_preds(self, batch_heatmaps):
'''
get predictions from score maps
Args:
heatmap (multi-dimentional array)s: numpy.ndarray([batch_size, num_joints, height, width])
Return:
pred (numpy array) : predictions of the models
'''
assert isinstance(batch_heatmaps, np.ndarray), 'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
#print(preds)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
#print("print pred_mask :", pred_mask)
#print('before mask', preds)
preds *= pred_mask
#print('after mask', preds)
return preds, maxvals
def eval(self, pred, target, alpha=0.5):
'''
Calculate accuracy according to eulidian distance(true , predicted) < 10 mm , 20 mm , 30 mm,
but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs',
followed by individual accuracies
Args:
pred (multidimentional tensor) : the predicted heatmaps of the model.
target (multidimentional tensor) : ground truth heatmaps of the x-rays images
alpha (float) : Not used
return:
avg_acc (float) : Percentage of keypoints localized within the threshold.
cnt (int) : total no of keypoints present
new_pfake (numpy array): p_fake values as per the equations
'''
idx = list(range(6))
print(len(idx))
norm = 1.0
if True:
h = self.opts.outputRes
w = self.opts.outputRes
print("pred shape: ", pred.shape[0])
norm = np.ones((pred.shape[0], 2)) * np.array([h, w]) / 10
dists = self.calc_dists(pred, target, norm)
acc = np.zeros((len(idx) + 1))
##--------------------calculate minibatch of pfakes ------------------------------##
#acc_for_pfake = np.zeros((len(idx) + 1))
#new_pfake = np.zeros((pred.shape[0], len(idx)))
##--------------------------------------------------------------------------------##
avg_acc = 0.0
cnt = 0
p_fake =[]
for i in range(len(idx)):
acc[i + 1] = self.dist_acc(dists[idx[i]])
if acc[i + 1] >= 0:
p_fake.append(int(acc[i + 1]))
elif acc[i + 1] < 0:
p_fake.append(0)
if acc[i + 1] >= 0:
avg_acc = avg_acc + acc[i + 1]
cnt += 1
print('acc[%d] = %f' % (i + 1, acc[i + 1]))
print(p_fake)
avg_acc = 1.0 * avg_acc / cnt if cnt != 0 else 0
if cnt != 0:
acc[0] = avg_acc
#-----------------------------------------------------------------------------#
#for n in range(pred.shape[0]):
# for c in range(len(idx)):
# acc_for_pfake[i + 1] = self.dist_acc(dists[idx[c] ,n])
# if acc_for_pfake[i + 1] >= 0:
# new_pfake[n ,c]= int(acc_for_pfake[i + 1])
# elif acc_for_pfake[i + 1] < 0:
# new_pfake[n ,c]= 1
#print("===============p-fake===============")
#print(new_pfake)
#print("==============================================================================")
return avg_acc,cnt,p_fake
def StackedHourGlass(self, output, target, alpha=0.5):
'''
get the accuracy by calculating eulclidian distance (pred , target) < 10 mm , 20 mm ,30mm
Args:
outputs (multidim tensor) : prediction heatmaps from the stack hour glass
target (multidim tensor) : ground truth heatmaps from the datasets.
return :
eval (function): self.eval() function is executed.
'''
predictions = self.get_max_preds(output[self.opts.nStack-1].detach().cpu().numpy())
comb_pred = np.sum(output[-1].detach().cpu().numpy()[0], axis=0)
#print(comb_pred)
#plt.imshow(comb_pred)
#plt.colorbar()
#plt.savefig('comb_hmap.png' , cmap = 'gist_gray' ) ## uncommented
#plt.clf()
#plt.imshow(np.sum(target.detach().cpu().numpy()[0], axis=0), cmap ='seismic')
#plt.colorbar()
#plt.savefig('gt_hmap.png')
#plt.clf()
target = self.get_max_preds(target.cpu().numpy())
return self.eval(predictions[0], target[0], alpha)
| [
"[email protected]"
] | |
1787a9d7699255bd09744b9a3cdb66697a4b4de1 | 468f397949b514c03b8b497bdff0c7dc6dff753c | /addons/hc_person/__openerp__.py | 542444eb5404795bd7d17d31403ebb8ab04d8f9e | [] | no_license | LasLabs/odoo-fhir | bc1750de277c71a453a1c75a2f3fbe6ffc8faf4b | 4eaccbd218f89587217b932651f4eb38feb43047 | refs/heads/master | 2021-01-20T05:09:11.970604 | 2017-04-28T21:44:17 | 2017-04-28T21:44:17 | 89,753,143 | 1 | 0 | null | 2017-04-29T00:00:03 | 2017-04-29T00:00:03 | null | UTF-8 | Python | false | false | 1,736 | py | # -*- coding: utf-8 -*-
{
'name': "Person",
'summary': """
A person independent of a specific health-related context
""",
'description': """
Demographics and administrative information about a person independent of a specific health-related context.
**Scope and Usage**
An individual has identity outside of a healthcare setting. The Person resource is used to capture
this information and to relate the person as an individual to other resources that do have a health-related context.
For example, while a patient resource may be created and maintained by each organization providing
care for that person as a patient, a person resource provides a mechanism for linking patient resources
across different organizations and their unique patient identity domains.
""",
'author': "HL7 FHIR",
'website': "https://hl7-fhir.github.io/person.html",
'contributors': "Luigi Sison",
'maintainer': "Luigi Sison",
'license': "GPL-3",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Health Care',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['hc_base'],
# always loaded
'data': [
'security/ir.model.access.csv',
'security/hc_person_security.xml',
'views/hc_res_person_views.xml',
'views/hc_res_person_templates.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
'installable': 'True',
# 'auto-install': 'True',
}
| [
"[email protected]"
] | |
23432df9eef11bab3e6a972cfdcc73e473190c62 | c43c88015f9498aed5f3b5a339d245c31781444e | /Free/l10n_ru/__manifest__.py | 9b57e4476142649d8c7f06a54fccc7adfca950b0 | [] | no_license | mulaudzicalvin/perpul | 65106d41d5197fea17628ac1a7fa7e581d29d75e | 00e3a5ee1771d2e09a48460ca23c2e9c2ef507d6 | refs/heads/master | 2020-03-09T18:39:33.131420 | 2018-02-05T05:17:36 | 2018-02-05T05:17:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,046 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2016 CodUP (<http://codup.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Russia - Accounting',
'version': '3.0',
'summary': 'План счетов РФ',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Russia in OpenERP.
==============================================================================
Возможности:
- План счетов бухгалтерского учёта финансово-хозяйственной деятельности организаций, утверждённый Приказом Минфина РФ от 31.10.2000 года № 94н
""",
'author': 'CodUP',
'website': 'http://codup.com',
'depends': ['account'],
'demo': [],
'data': [
'data/account_chart.xml',
'data/account.account.template.csv',
'data/account_chart_template.xml',
'data/account_tax_template.xml',
'data/account_chart_template.yml',
],
'sequence': 1,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | [
"[email protected]"
] | |
65932c9daac0c0d74e2dfd51da456a1788016eff | a55fa75d170dec85b230b68046aae7cb18a7ea55 | /backend/mobile_8_dec_dev_16442/wsgi.py | 8eb7242448c4fc2456e1d329215a5f4645042dc1 | [] | no_license | crowdbotics-apps/mobile-8-dec-dev-16442 | be0c8274472d7d396bbf12722a80cda25b371590 | 50cc76003f526ed3cfdb811988812663f449918e | refs/heads/master | 2023-06-30T04:40:18.561586 | 2020-12-08T09:16:42 | 2020-12-08T09:16:42 | 319,529,324 | 0 | 0 | null | 2021-08-03T20:05:31 | 2020-12-08T04:50:35 | JavaScript | UTF-8 | Python | false | false | 421 | py | """
WSGI config for mobile_8_dec_dev_16442 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mobile_8_dec_dev_16442.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
bd8e7ae6df45a567320e605bd58c48a15da5e7c5 | dcb1904a6acbee6a4102a86468f7f805dd4326f6 | /hackerrank_tuple.py | 1292ffd1494d12e6826137c66f939a1a62f1472f | [] | no_license | Rabbi50/HackerRank-Problem-Solve | b7304eaaf42a9a4b85cfd9d53646d4c69f066ee1 | 1501a802f86f13c98acd75936ce79e71c862128d | refs/heads/master | 2020-09-21T19:21:16.883823 | 2020-07-14T14:40:06 | 2020-07-14T14:40:06 | 224,897,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | # if __name__ == '__main__':
# n = int(input())
# # integer_list = map(int, input().split())
# # print()
# input_line=input()
# input_list=input_line.split()
# for i in range(n):
# input_list[i]=int(input_list[i])
# #print(input_list)
# t=tuple(input_list)
# print(hash(3))
# if __name__ == '__main__':
# n = int(input())
# integer_list = map(int, input().split())
# print(hash(tuple(integer_list)))
# numbers = input().strip().split()
# for i in range(len(numbers)):
# numbers[i] = int(numbers[i])
# T = tuple(numbers)
# print(hash(T))
if __name__ == '__main__':
n = int(input())
integer_list = map(int, input().split())
print(hash(tuple(integer_list))) | [
"[email protected]"
] | |
42baf2d7ff0468903f6f4794c8562724d5b3a362 | 8bb4a472344fda15985ac322d14e8f4ad79c7553 | /Python3-Core/src/test/prompto/translate/omo/TestCategories.py | 00b03872c5e6ee79505515f577f99228f18b9296 | [] | no_license | prompto/prompto-python3 | c6b356f5af30c6826730ba7f2ad869f341983a2d | 64bd3d97d4702cc912097d41d961f7ab3fd82bee | refs/heads/master | 2022-12-24T12:33:16.251468 | 2022-11-27T17:37:56 | 2022-11-27T17:37:56 | 32,623,633 | 4 | 0 | null | 2019-05-04T11:06:05 | 2015-03-21T07:17:25 | Python | UTF-8 | Python | false | false | 1,677 | py | from prompto.parser.o.BaseOParserTest import BaseOParserTest
class TestCategories(BaseOParserTest):
def setUp(self):
super(type(self), self).setUp()
def testAttributeConstructor(self):
self.compareResourceOMO("categories/attributeConstructor.poc")
def testCopyFromAscendant(self):
self.compareResourceOMO("categories/copyFromAscendant.poc")
def testCopyFromAscendantWithOverride(self):
self.compareResourceOMO("categories/copyFromAscendantWithOverride.poc")
def testCopyFromDescendant(self):
self.compareResourceOMO("categories/copyFromDescendant.poc")
def testCopyFromDescendantWithOverride(self):
self.compareResourceOMO("categories/copyFromDescendantWithOverride.poc")
def testCopyFromDocument(self):
self.compareResourceOMO("categories/copyFromDocument.poc")
def testCopyFromStored(self):
self.compareResourceOMO("categories/copyFromStored.poc")
def testEmptyConstructor(self):
self.compareResourceOMO("categories/emptyConstructor.poc")
def testEquals(self):
self.compareResourceOMO("categories/equals.poc")
def testLiteralConstructor(self):
self.compareResourceOMO("categories/literalConstructor.poc")
def testPopulateFalse(self):
self.compareResourceOMO("categories/populateFalse.poc")
def testResourceAttribute(self):
self.compareResourceOMO("categories/resourceAttribute.poc")
def testSynonymConstructor(self):
self.compareResourceOMO("categories/synonymConstructor.poc")
def testValueConstructor(self):
self.compareResourceOMO("categories/valueConstructor.poc")
| [
"[email protected]"
] | |
64aa5b08d5c62364013595552c7828a2a2d1976f | df0461f16c82af1fd5c580dd9ab91094158e4c43 | /artifacts/proxy.py | e514b2eee728675f2f21105934a81b76fe67406e | [
"Apache-2.0"
] | permissive | lucaschultz/unearth | 738021310178062f0d1893a86fe68e99eaf98b74 | 60bbc887415205b23483d0cb99c3774ab47c9c66 | refs/heads/master | 2020-04-22T17:20:34.191739 | 2019-02-25T15:21:44 | 2019-02-25T15:21:44 | 170,538,069 | 2 | 0 | Apache-2.0 | 2019-02-13T16:12:55 | 2019-02-13T16:12:54 | null | UTF-8 | Python | false | false | 623 | py | from SystemConfiguration import SCDynamicStoreCreate, SCDynamicStoreCopyValue
factoid = 'proxies'
def fact():
'''Returns the current dns servers'''
proxies = 'None'
net_config = SCDynamicStoreCreate(None, "net", None, None)
proxy_info = SCDynamicStoreCopyValue(net_config, "State:/Network/Global/Proxies")
if proxy_info and proxy_info.get('ProxyAutoConfigURLString'):
try:
proxies = proxy_info['ProxyAutoConfigURLString']
except KeyError as err:
pass
return {factoid: proxies}
if __name__ == '__main__':
print '<result>%s</result>' % fact()[factoid]
| [
"[email protected]"
] | |
07eee5fb5e2ef6d4bba6977b2f628b3aa2179927 | c957fbcb133093d3331731259c557cef5ccf45d1 | /src/contentbase/json_renderer.py | 9f0b60111957e16ac2aaf8bf69633c4a3d46a99c | [
"MIT"
] | permissive | ClinGen/clincoded | da2aa2c08cf98f7af4953f81b13b94653b9c8264 | 5624c74546ce2a44eda00ee632a8de8c2099da10 | refs/heads/dev | 2022-09-27T15:48:08.000844 | 2021-08-03T19:05:54 | 2021-08-03T19:05:54 | 36,758,056 | 31 | 10 | MIT | 2022-09-16T19:33:53 | 2015-06-02T19:54:12 | JavaScript | UTF-8 | Python | false | false | 1,358 | py | from pyramid.threadlocal import get_current_request
import json
import pyramid.renderers
import uuid
def includeme(config):
config.add_renderer(None, json_renderer)
class JSON(pyramid.renderers.JSON):
'''Provide easier access to the configured serializer
'''
def dumps(self, value):
request = get_current_request()
default = self._make_default(request)
return json.dumps(value, default=default, **self.kw)
class BinaryFromJSON:
def __init__(self, app_iter):
self.app_iter = app_iter
def __len__(self):
return len(self.app_iter)
def __iter__(self):
for s in self.app_iter:
yield s.encode('utf-8')
class JSONResult(object):
def __init__(self):
self.app_iter = []
self.write = self.app_iter.append
@classmethod
def serializer(cls, value, **kw):
fp = cls()
json.dump(value, fp, **kw)
if str is bytes:
return fp.app_iter
else:
return BinaryFromJSON(fp.app_iter)
json_renderer = JSON(serializer=JSONResult.serializer)
def uuid_adapter(obj, request):
return str(obj)
def listy_adapter(obj, request):
return list(obj)
json_renderer.add_adapter(uuid.UUID, uuid_adapter)
json_renderer.add_adapter(set, listy_adapter)
json_renderer.add_adapter(frozenset, listy_adapter)
| [
"[email protected]"
] | |
884229c842df85409ab4a26013a3943a54b8d419 | 685038d4be188fa72e9dba1d2213a47ee3aa00a2 | /ECOS2021/Demands/Inputs/Surveys/A/S7/Oct_S7_A.py | ea3279e31338f885612c658133f974cb1e135206 | [] | no_license | CIE-UMSS/Tradeoff-between-Installed-Capacity-and-Unserved-Energy | e5599e4e4ac60b97f0c4c57c5de95e493b1b5ac4 | 459f31552e3ab57a2e52167ab82f8f48558e173c | refs/heads/master | 2023-06-01T18:09:29.839747 | 2021-06-19T15:56:26 | 2021-06-19T15:56:26 | 343,720,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,789 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 14:33:07 2020
@author: alejandrosoto
Script for 2 class of household in Raqaypampa.
"""
# -*- coding: utf-8 -*-
"""
@author: Alejandro Soto
"""
from core import User, np
User_list = []
#User classes definition
HI = User("high income",1)
User_list.append(HI)
LI = User("low income",0)
User_list.append(LI)
'''
Base scenario (BSA): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2), Water Heater (1), Mixer (1)
Base scenario (B): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2)
A
Scenario 1: BSA + Fridge (1) + Freezer* (1).
Scenario 2: BSA + Fridge (1).
Scenario 3: BSA + Fridge (1)*.
Scenario 4: BSA + Freezer (1).
Scenario 5: BSA + Welder (1).
Scerario 6: BSA + Grinder (1).
Scanerio 7: Add + Dryer (1),
Scenario 9: All
B
Scenario 8: BSB + Water Heater** (1).
Scenario 10: BSA + Pump Water (1).
Scenario 11: BSA + DVD (1).
Scenario 12: BSA + Blender (1).
Scenario 13: BSA + Iron (1).
Scerario 14: BSA + Mill (1).
* With seasonal variation
** Occasional use
Cold Months: May-Aug Std Cycle 8:00-18:00 Above 10 degrees
Warm Months: Jan-Apr Std Cycle 0:00-23:59 Above 10 degrees
Hot Nonths: Sep-Dec Std Cycle 0:00-10:00; 15:01-23:59 Above 10 degrees
Int Cycle 10:01-15:00
'''
#High-Income
#indoor bulb
HI_indoor_bulb = HI.Appliance(HI,3,7,1,320,0.6,190)
HI_indoor_bulb.windows([1080,1440],[0,0])
#outdoor bulb
HI_outdoor_bulb = HI.Appliance(HI,1,13,1,340,0.1,300)
HI_outdoor_bulb.windows([1100,1440],[0,0])
HI_Radio = HI.Appliance(HI,1,7,1,280,0.3,110)
HI_Radio.windows([420,708],[0,0])
#tv
HI_TV = HI.Appliance(HI,1,60,3,300,0.38,114)
HI_TV.windows([1140,1440],[651,1139],0.35,[300,650])
#phone charger
HI_Phone_charger = HI.Appliance(HI,2,5,3,250,0.4,95)
HI_Phone_charger.windows([1190,1440],[0,420],0.35,[421,1189])
#water_heater
HI_Water_heater = HI.Appliance(HI,1,150,1,60,0.05,30)
HI_Water_heater.windows([0,1440],[0,0])
#mixer
HI_Mixer = HI.Appliance(HI,1,50,1,10,0.5,5,occasional_use = 0.3)
HI_Mixer.windows([420,560],[0,0])
'''
#grinder
HI_Grinder = HI.Appliance(HI,1,750,1,480,0.125,60,occasional_use = 0.3)
HI_Grinder.windows([360,1080],[0,0])
'''
#Lower Income
#indoor bulb
LI_indoor_bulb = LI.Appliance(LI,3,7,2,287,0.4,124)
LI_indoor_bulb.windows([1153,1440],[0,300],0.5)
#outdoor bulb
LI_outdoor_bulb = LI.Appliance(LI,1,13,1,243,0.3,71)
LI_outdoor_bulb.windows([1197,1440],[0,0])
#radio
LI_Radio = LI.Appliance(LI,1,7,2,160,0.3,49)
LI_Radio.windows([480,840],[841,1200],0.5)
#TV
LI_TV = LI.Appliance(LI,1,100,3,250,0.3,74)
LI_TV.windows([1170,1420],[551,1169],0.3,[300,550])
#phone charger
LI_Phone_charger = LI.Appliance(LI,2,5,3,200,0.4,82)
LI_Phone_charger.windows([1020,1440],[0,420],0.3,[720,1019])
| [
"[email protected]"
] | |
15ad905cd84616800887959795f7b7b25d2c0bc8 | f47fe8a7d8cd87b3bfa2e172b4a9fc93e3a4abc2 | /2015/AST1/vezbovni/Ivan/treci.py | 92de3fe389d536db156a2b47a60dfee2cec9d33a | [] | no_license | ispastlibrary/Titan | a4a7e4bb56544d28b884a336db488488e81402e0 | f60e5c6dc43876415b36ad76ab0322a1f709b14d | refs/heads/master | 2021-01-17T19:23:32.839966 | 2016-06-03T13:47:44 | 2016-06-03T13:47:44 | 60,350,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | def fahr(a):
F = (9.0/5)*a + 32
print("temperatura je: ", F)
return F
def cels(a):
C = (a-32)*(5/9)
print("temperatura je:", C)
return C
prvo = fahr(100)
drugo = cels(212)
| [
"[email protected]"
] | |
a79fbdaf7b77d609257fa8ea0f0ee08500283919 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03167/s857237201.py | ed807852eb80a79aa87a94d4e96803a8c9ee4e1c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | def add(a, b):
mod = 1e9+7
c = a + b
if c >= mod:
c -= mod
return c
H, W = [int(x) for x in input().split()]
sl = [input() for _ in range(H)]
dp = [[0 for _ in range(W)] for _ in range(H)]
dp[0][0] = 1
for i in range(H):
for j in range(W):
for frm in [[i-1, j], [i, j-1]]:
r, c = frm
if r >= 0 and c >= 0 and sl[r][c] != '#':
dp[i][j] = add(dp[i][j], dp[r][c])
print(int(dp[H-1][W-1]))
"""
using ll = long long;
int add(int a, int b) {
int MOD = 1e9 + 7;
return (a + b) % MOD;
}
int main() {
int H;
int W;
scanf("%d%d\n", &H, &W);
vector<vector<int>> dp(H, vector<int>(W));
vector<vector<bool>> is_wall(H, vector<bool>(W));
for (int i = 0; i < H; i++) {
scanf("\n");
for (int j = 0; j < W; j++) {
char ch;
scanf("%c", &ch);
//cout << i << " " << j << " " << ch << endl;
if (ch == '#') {
is_wall[i][j] = true;
}
}
}
dp[0][0] = 1;
for (int i = 0; i < H; i++) {
for (int j = 0; j < W; j++) {
//cout << i << " " << j << " " << is_wall[i][j] << endl;
if (!is_wall[i][j]) {
if (i - 1 >= 0) {
dp[i][j] = add(dp[i][j], dp[i-1][j]);
}
if (j - 1 >= 0) {
dp[i][j] = add(dp[i][j], dp[i][j-1]);
}
}
}
}
printf("%d\n", dp[H-1][W-1]);
return 0;
}
""" | [
"[email protected]"
] | |
754965cb553d4700b0aea09f00514c8478a8e968 | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/mixed-reality/azext_mixed_reality/vendored_sdks/mixedreality/models/_mixed_reality_client_enums.py | 3c095b5688815dc47d4e0a5d9e8e2dad0cf60b42 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 2,085 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class NameUnavailableReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""reason of name unavailable.
"""
INVALID = "Invalid"
ALREADY_EXISTS = "AlreadyExists"
class Serial(with_metaclass(_CaseInsensitiveEnumMeta, int, Enum)):
"""serial of key to be regenerated
"""
#: The Primary Key.
PRIMARY = 1
#: The Secondary Key.
SECONDARY = 2
class SkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""This field is required to be implemented by the Resource Provider if the service has more than
one tier, but is not required on a PUT.
"""
FREE = "Free"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
| [
"[email protected]"
] | |
96f7a3504263209d18bf80116a1946eb5dd546ca | caaf1b0754db1e676c37a6f1e58f19183754e654 | /sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_sql_vulnerability_assessment_baseline_operations.py | 5b4020b6bf6c9ef2299df4eac6647c9adb74d137 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | rdomenzain/azure-sdk-for-python | 45dfb39121a0abda048c22e7309733a56259f525 | 58984255aeb904346b6958c5ba742749a2cc7d1b | refs/heads/master | 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 | MIT | 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null | UTF-8 | Python | false | false | 15,139 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_sql_vulnerability_assessment_request(
resource_group_name: str,
server_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
system_database_name: Literal["master"] = kwargs.pop(
"system_database_name", _params.pop("systemDatabaseName", "master")
)
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/sqlVulnerabilityAssessments/{vulnerabilityAssessmentName}/baselines",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"vulnerabilityAssessmentName": _SERIALIZER.url(
"vulnerability_assessment_name", vulnerability_assessment_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["systemDatabaseName"] = _SERIALIZER.query("system_database_name", system_database_name, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str,
server_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
baseline_name: Union[str, _models.BaselineName],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
system_database_name: Literal["master"] = kwargs.pop(
"system_database_name", _params.pop("systemDatabaseName", "master")
)
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/sqlVulnerabilityAssessments/{vulnerabilityAssessmentName}/baselines/{baselineName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"vulnerabilityAssessmentName": _SERIALIZER.url(
"vulnerability_assessment_name", vulnerability_assessment_name, "str"
),
"baselineName": _SERIALIZER.url("baseline_name", baseline_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["systemDatabaseName"] = _SERIALIZER.query("system_database_name", system_database_name, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class SqlVulnerabilityAssessmentBaselineOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.sql.SqlManagementClient`'s
:attr:`sql_vulnerability_assessment_baseline` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_sql_vulnerability_assessment(
self,
resource_group_name: str,
server_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
**kwargs: Any
) -> Iterable["_models.DatabaseSqlVulnerabilityAssessmentBaselineSet"]:
"""Gets a list of database's sql vulnerability assessment rule baselines.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:keyword system_database_name: The vulnerability assessment system database name. Default value
is "master". Note that overriding this default value may result in unsupported behavior.
:paramtype system_database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DatabaseSqlVulnerabilityAssessmentBaselineSet or
the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.DatabaseSqlVulnerabilityAssessmentBaselineSet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
system_database_name: Literal["master"] = kwargs.pop(
"system_database_name", _params.pop("systemDatabaseName", "master")
)
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview"))
cls: ClsType[_models.DatabaseSqlVulnerabilityAssessmentBaselineSetListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_sql_vulnerability_assessment_request(
resource_group_name=resource_group_name,
server_name=server_name,
vulnerability_assessment_name=vulnerability_assessment_name,
subscription_id=self._config.subscription_id,
system_database_name=system_database_name,
api_version=api_version,
template_url=self.list_by_sql_vulnerability_assessment.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize(
"DatabaseSqlVulnerabilityAssessmentBaselineSetListResult", pipeline_response
)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_sql_vulnerability_assessment.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/sqlVulnerabilityAssessments/{vulnerabilityAssessmentName}/baselines"
}
@distributed_trace
def get(
self,
resource_group_name: str,
server_name: str,
vulnerability_assessment_name: Union[str, _models.VulnerabilityAssessmentName],
baseline_name: Union[str, _models.BaselineName],
**kwargs: Any
) -> _models.DatabaseSqlVulnerabilityAssessmentBaselineSet:
"""Gets a list of database's sql vulnerability assessment rule baselines.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param vulnerability_assessment_name: The name of the vulnerability assessment. "default"
Required.
:type vulnerability_assessment_name: str or ~azure.mgmt.sql.models.VulnerabilityAssessmentName
:param baseline_name: "default" Required.
:type baseline_name: str or ~azure.mgmt.sql.models.BaselineName
:keyword system_database_name: The vulnerability assessment system database name. Default value
is "master". Note that overriding this default value may result in unsupported behavior.
:paramtype system_database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DatabaseSqlVulnerabilityAssessmentBaselineSet or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.DatabaseSqlVulnerabilityAssessmentBaselineSet
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
system_database_name: Literal["master"] = kwargs.pop(
"system_database_name", _params.pop("systemDatabaseName", "master")
)
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01-preview"))
cls: ClsType[_models.DatabaseSqlVulnerabilityAssessmentBaselineSet] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
vulnerability_assessment_name=vulnerability_assessment_name,
baseline_name=baseline_name,
subscription_id=self._config.subscription_id,
system_database_name=system_database_name,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DatabaseSqlVulnerabilityAssessmentBaselineSet", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/sqlVulnerabilityAssessments/{vulnerabilityAssessmentName}/baselines/{baselineName}"
}
| [
"[email protected]"
] | |
36d142620364d13b8ee4ffa85e69d9eede13dc46 | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /TrackLater-master/tracklater/main.py | 72e022c9b512b865703c592e30c9a97c5fa8c49a | [
"MIT"
] | permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import importlib
from typing import Dict
from types import ModuleType
from tracklater import settings
from tracklater.timemodules.interfaces import AbstractParser
from tracklater.models import ApiCall, Entry, Issue, Project
from tracklater.database import db
import logging
logger = logging.getLogger(__name__)
def store_parser_to_database(parser, module_name, start_date, end_date):
Entry.query.filter(
Entry.module == module_name, Entry.start_time >= start_date,
Entry.start_time <= end_date
).delete()
for entry in parser.entries:
entry.module = module_name
db.session.merge(entry)
for issue in parser.issues:
issue.module = module_name
db.session.merge(issue)
Project.query.delete()
for project in parser.projects:
project.module = module_name
db.session.merge(project)
db.session.add(ApiCall(
start_date=start_date,
end_date=end_date,
module=module_name
))
db.session.commit()
def set_parser_caching_data(parser, module_name):
apicall = ApiCall.query.filter_by(module=module_name).order_by('created').first()
if apicall:
parser.set_database_values(
start_date=apicall.start_date,
end_date=apicall.end_date,
issue_count=Issue.query.filter_by(module=module_name).count(),
entry_count=Entry.query.filter_by(module=module_name).count(),
project_count=Project.query.filter_by(module=module_name).count(),
)
class Parser(object):
def __init__(self, start_date, end_date, modules=None) -> None:
self.start_date = start_date
self.end_date = end_date
self.modules: Dict[str, AbstractParser] = {}
for module_name in settings.ENABLED_MODULES:
if modules and module_name not in modules:
continue
module: ModuleType = importlib.import_module(
'tracklater.timemodules.{}'.format(module_name)
)
if getattr(module, 'Parser', None) is None:
logger.warning('Module %s has no Parser class', module_name)
parser = module.Parser(self.start_date, self.end_date) # type: ignore
self.modules[module_name] = parser
def parse(self) -> None:
for module_name, parser in self.modules.items():
set_parser_caching_data(parser, module_name)
parser.parse()
logger.warning("Parsing %s", module_name)
store_parser_to_database(self.modules[module_name], module_name,
start_date=self.start_date, end_date=self.end_date)
logger.warning("Task done %s", module_name)
| [
"[email protected]"
] | |
d96a9159f7e818a5432f964d54d8790c633a202a | 3458efd930792fc768f53d773603c917d172ac3d | /webapp/store_frontend/StoreFrontendController.py | 6eb3e4be6e6f7a353059d8204ad8dd15017c0497 | [] | no_license | binary-butterfly/shared-delivery | 0a4a90d9c42d7948267d674da1d1ec323d345c1b | 63167a6f7d80c822ac02ffc6dd698fcf1ff9e37e | refs/heads/master | 2022-08-17T15:03:04.568889 | 2020-04-28T16:05:00 | 2020-04-28T16:05:00 | 249,144,165 | 15 | 9 | null | 2022-07-20T23:00:35 | 2020-03-22T08:42:59 | JavaScript | UTF-8 | Python | false | false | 6,031 | py | # encoding: utf-8
"""
Copyright (c) 2017, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from flask import Blueprint, render_template, flash, redirect, abort
from ..models import Store, OpeningTime, ObjectDump, Region
from ..extensions import db
from .StoreFrontendForm import StoreFrontendForm
from ..store_management.StoreManagementHelper import get_opening_times_for_form, create_store_revision
store_frontend = Blueprint('store_frontend', __name__, template_folder='templates')
@store_frontend.route('/store/<int:store_id>')
def store_frontend_main(store_id):
store = Store.query.get_or_404(store_id)
opening_times_raw = OpeningTime.query.filter_by(store_id=store.id).order_by(OpeningTime.weekday, OpeningTime.open).all()
opening_times = {
'all': [],
'delivery': [],
'pickup': []
}
for opening_time_raw in opening_times_raw:
opening_times[opening_time_raw.type].append(opening_time_raw)
return render_template('store-frontend.html', store=store, opening_times=opening_times)
@store_frontend.route('/store/<string:region_slug>/suggest', methods=['GET', 'POST'])
def store_frontend_suggest_new(region_slug):
form = StoreFrontendForm()
region = Region.query.filter_by(slug=region_slug).first()
if not region:
abort(404)
if form.validate_on_submit():
opening_times_data = {}
for field in ['all', 'delivery', 'pickup']:
opening_times_data[field] = getattr(form, 'opening_times_%s' % field)
delattr(form, 'opening_times_%s' % field)
store = Store()
form.populate_obj(store)
store.region_id = region.id
store_suggestion = store.to_dict()
store_suggestion['opening_time'] = []
for field in ['all', 'delivery', 'pickup']:
if getattr(form, '%s_switch' % field):
for opening_time in opening_times_data[field]:
store_suggestion['opening_time'].append({
'type': field,
'weekday': opening_time.weekday.data,
'open': opening_time.open.data_out,
'close': opening_time.close.data_out
})
store_suggestion['category'] = form.category.data
object_dump = ObjectDump()
object_dump.data = store_suggestion
object_dump.type = 'suggestion'
object_dump.object = 'store'
object_dump.region_id = store.region_id
object_dump.object_id = store.id
db.session.add(object_dump)
db.session.commit()
flash('Danke für Deinen Verbesserungsvorschlag! Wir schauen kurz drüber und schalten diesen dann normalerweise binnen 24 Stunden frei.', 'success')
return redirect('/')
return render_template('store-suggest-new.html', form=form)
@store_frontend.route('/store/<int:store_id>/suggest', methods=['GET', 'POST'])
def store_frontend_suggest(store_id):
store = Store.query.get_or_404(store_id)
form = StoreFrontendForm(obj=store)
if form.validate_on_submit():
opening_times_data = {}
for field in ['all', 'delivery', 'pickup']:
opening_times_data[field] = getattr(form, 'opening_times_%s' % field)
delattr(form, 'opening_times_%s' % field)
form.populate_obj(store)
store_suggestion = store.to_dict()
db.session.rollback()
store_suggestion['opening_time'] = []
for field in ['all', 'delivery', 'pickup']:
if getattr(form, '%s_switch' % field):
for opening_time in opening_times_data[field]:
store_suggestion['opening_time'].append({
'type': field,
'weekday': opening_time.weekday.data,
'open': opening_time.open.data_out,
'close': opening_time.close.data_out
})
store_suggestion['category'] = form.category.data
object_dump = ObjectDump()
object_dump.data = store_suggestion
object_dump.type = 'suggestion'
object_dump.object = 'store'
object_dump.region_id = store.region_id
object_dump.object_id = store.id
db.session.add(object_dump)
db.session.commit()
flash('Danke für Deinen Verbesserungsvorschlag! Wir schauen kurz drüber und schalten diesen dann normalerweise binnen 24 Stunden frei.', 'success')
return redirect('/store/%s' % store.id)
return render_template('store-suggest.html', store=store, opening_times=get_opening_times_for_form(store.id), form=form)
| [
"[email protected]"
] | |
6fec5b707195f997de20929632b6dabf2412d1e1 | aa6059b13468595a872897694572767d278318d1 | /RemoveVideoWaterMark/LightVideo.py | d31d6d5dfb4b69a7001b0e7f4d10a0d15614204c | [] | no_license | 18708111002/Tools | 3845273724fc9bd2b1e31991339053448d08bfa2 | c81f6df8ac7e57c0c544be78a706c919c3c57384 | refs/heads/master | 2022-11-09T10:47:33.608418 | 2018-06-08T09:14:11 | 2018-06-08T09:14:11 | 127,995,521 | 1 | 2 | null | 2022-11-02T07:22:24 | 2018-04-04T02:27:44 | Python | UTF-8 | Python | false | false | 1,625 | py | #encode-UTF-8
from watchdog.observers import Observer
from watchdog.events import *
import time
class FileEventHandler(FileSystemEventHandler):
def __init__(self):
FileSystemEventHandler.__init__(self)
def on_moved(self, event):
if event.is_directory:
print("directory moved from {0} to {1}".format(event.src_path,event.dest_path))
else:
print("file moved from {0} to {1}".format(event.src_path,event.dest_path))
def on_created(self, event):
if event.is_directory:
print("Starting processing " + event.src_path)
cmd = (r"D:\ffmpeg\bin\ffmpeg -i " + event.src_path +
r" -vf delogo=x=650:y=32:w=160:h=65 " + event.src_path)
os.system(cmd)
print("directory created:{0}".format(event.src_path))
else:
print("file created:{0}".format(event.src_path))
def on_deleted(self, event):
if event.is_directory:
print("directory deleted:{0}".format(event.src_path))
else:
print("file deleted:{0}".format(event.src_path))
def on_modified(self, event):
if event.is_directory:
print("directory modified:{0}".format(event.src_path))
else:
print("file modified:{0}".format(event.src_path))
if __name__ == "__main__":
observer = Observer()
event_handler = FileEventHandler()
observer.schedule(event_handler,"d:/outputvideo",True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() | [
"[email protected]"
] | |
08e727a142804947d5964e26d273b8b082b12aed | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/kbc8894/tutorial-titanic-with-tensorflow/tutorial-titanic-with-tensorflow.py | c8538ff8a97bc15c9974578f4967ebb584534c9d | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,790 | py | #!/usr/bin/env python
# coding: utf-8
# In[4]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# In[5]:
train_df =pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df.info()
test_df.info()
# In[6]:
train_df.describe()
# In[7]:
train_df.head()
# In[8]:
train_df.describe(include=['O'])
# Column Info
# 1. PassengerId, Name : Unique
# 2. Ticket: Almost unique
# 3. Cabin: It have many missing value.
# 4. So, I select these feature (Pclass, Sex, Age, SibSp, Parch, Fare, Embarked)
# In[9]:
selected_feature = ['Pclass','Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
parameters = {}
parameters['selected_feature'] = selected_feature
# In[10]:
def cleanup_data(train_df, test_df):
age_mean = pd.concat([train_df['Age'], test_df['Age']], ignore_index=True).mean()
fare_mean = pd.concat([train_df['Fare'], test_df['Fare']], ignore_index=True).mean()
train = train_df[['Survived'] + selected_feature].copy()
train['Sex'] = train['Sex'].map({'male': 1, 'female': 0}).astype(int)
train['Age'] = train['Age'].fillna(age_mean)
train = train.dropna()
train['Embarked'] = train['Embarked'].map({'S': 0, 'C': 1, 'Q': 2}).astype(int)
test = test_df[selected_feature].copy()
test['Sex'] = test['Sex'].map({'male': 1, 'female': 0}).astype(int)
test['Age'] = test['Age'].fillna(age_mean)
test['Fare'] = test['Fare'].fillna(fare_mean)
test['Embarked'] = test['Embarked'].map({'S': 0, 'C': 1, 'Q': 2}).astype(int)
return train, test
train, test = cleanup_data(train_df, test_df)
# In[11]:
train.describe()
# In[12]:
test.describe()
# In[13]:
def feature_scaling(parmeters):
def get_mean(data_list):
return pd.concat(data_list, ignore_index=True).mean()
def get_std(data_list):
return pd.concat(data_list, ignore_index=True).std()
def get_min(data_list):
return pd.concat(data_list, ignore_index=True).min()
def get_max(data_list):
return pd.concat(data_list, ignore_index=True).max()
for feature in parameters['selected_feature']:
if parameters['feature_scaling'] == 'rescaling':
data_list = [train[feature], test[feature]]
min_ = get_min(data_list)
max_ = get_max(data_list)
train[feature] = (train[feature] - min_) / (max_ - min_)
test[feature] = (test[feature] - min_) / (max_ - min_)
elif parameters['feature_scaling'] == 'mean_normalization':
data_list = [train[feature], test[feature]]
mean = get_mean(data_list)
min_ = get_min(data_list)
max_ = get_max(data_list)
train[feature] = (train[feature] - mean) / (max_ - min_)
test[feature] = (test[feature] - mean) / (max_ - min_)
else:
data_list = [train[feature], test[feature]]
mean = get_mean(data_list)
std = get_std(data_list)
train[feature] = (train[feature] - mean) / std
test[feature] = (test[feature] - mean) / std
# In[14]:
parameters['feature_scaling'] = 'standardization'
feature_scaling(parameters)
# In[15]:
train.describe()
# In[16]:
test.describe()
# In[17]:
m = int(train.values.shape[0] * 0.7)
train_X = train[selected_feature].values[:m, :]
train_Y = train['Survived'].values.reshape(-1, 1)[:m, :]
valid_X = train[selected_feature].values[m:, :]
valid_Y = train['Survived'].values.reshape(-1, 1)[m:, :]
test_X = test[selected_feature].values
print(train_X.shape, train_Y.shape)
print(valid_X.shape, valid_Y.shape)
print(test_X.shape)
# In[18]:
import math
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[0] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation, :]
shuffled_Y = Y[permutation, :].reshape((m,1))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[k * mini_batch_size : (k + 1) * mini_batch_size, :]
mini_batch_Y = shuffled_Y[k * mini_batch_size : (k + 1) * mini_batch_size, :]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size:, :]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size:, :]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# In[19]:
import tensorflow as tf
def make_model(paramters):
num_feature = len(parameters['selected_feature'])
X = tf.placeholder(tf.float32, [None, num_feature])
Y = tf.placeholder(tf.float32, [None, 1])
layers_dim = paramters['layers_dim']
fc = tf.contrib.layers.stack(X, tf.contrib.layers.fully_connected, layers_dim)
hypothesis = tf.contrib.layers.fully_connected(fc, 1, activation_fn=None)
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=hypothesis, labels=Y)
cost = tf.reduce_mean(loss)
learning_rate = parameters['learning_rate']
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
prediction = tf.round(tf.sigmoid(hypothesis))
correct_prediction = tf.equal(prediction, Y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
model = {'X': X, 'Y': Y, 'hypothesis': hypothesis, 'cost': cost,
'train_op': train_op, 'prediction': prediction, 'accuracy': accuracy}
return model
# In[20]:
def train(parameters, model):
num_epochs = parameters['num_epochs']
minibatch_size = parameters['minibatch_size']
train_size = train_X.shape[0]
saver = tf.train.Saver()
epoch_list = []
cost_list = []
with tf.Session() as sess:
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
for epoch in range(num_epochs):
epoch_cost = 0.
num_minibatches = int(train_size / minibatch_size)
minibatches = random_mini_batches(train_X, train_Y, minibatch_size)
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
feed_dict = {model['X'] : minibatch_X, model['Y'] : minibatch_Y}
_ ,minibatch_cost = sess.run([model['train_op'], model['cost']], feed_dict= feed_dict)
epoch_cost += minibatch_cost / num_minibatches
if parameters['print'] and (epoch % parameters['print_freq'] == 0):
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if parameters['save_cost'] and (epoch % parameters['save_cost_freq'] == 0):
epoch_list.append(epoch)
cost_list.append(epoch_cost)
saver.save(sess, parameters['model_name'])
return {'epoch_list': epoch_list, 'cost_list' : cost_list}
# In[21]:
# set model parameters
parameters['layers_dim'] = [14]
parameters['learning_rate'] = 0.01
# set train parameters (hyper parameter)
parameters['num_epochs'] = 2000
parameters['minibatch_size'] = 16
# set option parameters
parameters['model_name'] = 'titanic'
parameters['print'] = True
parameters['print_freq'] = 100
parameters['save_cost'] = True
parameters['save_cost_freq'] = 10
for k, v in parameters.items():
print(k, '=', v)
# In[22]:
with tf.Graph().as_default():
model = make_model(parameters)
plot_data = train(parameters, model)
# In[23]:
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
print
if parameters['save_cost']:
plt.plot(plot_data['epoch_list'], plot_data['cost_list'])
# In[24]:
def evaluate(parameters, model):
saver = tf.train.Saver()
with tf.Session() as sess:
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
saver.restore(sess, parameters['model_name'])
print ("Train Accuracy:", model['accuracy'].eval({model['X']: train_X, model['Y']: train_Y}))
print ("Valid Accuracy:", model['accuracy'].eval({model['X']: valid_X, model['Y']: valid_Y}))
# In[25]:
with tf.Graph().as_default():
model = make_model(parameters)
evaluate(parameters, model)
# In[26]:
def predict(parameters, model):
saver = tf.train.Saver()
with tf.Session() as sess:
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
saver.restore(sess, parameters['model_name'])
return model['prediction'].eval({model['X']: test_X})
# In[27]:
answer = pd.DataFrame(test_df['PassengerId'], columns=['PassengerId'])
with tf.Graph().as_default():
model = make_model(parameters)
test_Y = predict(parameters, model)
answer['Survived'] = test_Y.astype(int)
answer.to_csv('answer.csv', index=False)
# In[28]:
from sklearn import linear_model
regr = linear_model.LinearRegression()
regr.fit(train_X, train_Y)
match = np.sum(test_Y == np.round(regr.predict(test_X)))
print('match ratio with linear_model of scikit-learn: ', match / test_Y.shape[0])
| [
"[email protected]"
] | |
5154db3907a3d17cdf26b8e4ff5596f31844b55c | f9f1f887629855bbf12ecb0b7358fed5946b3caa | /.history/app_blog_forum/views_20201117201218.py | bd4632b14091e7318696c02acc68f232583f1721 | [] | no_license | hibamohi5/blog_forum | 4f687cee3ca6bdb1d0302b3657a77c01945404b3 | d6380eb7149355c79276b738da7da94c2ee03570 | refs/heads/main | 2023-01-14T18:33:53.043754 | 2020-11-20T01:52:22 | 2020-11-20T01:52:22 | 314,417,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
def index(request):
return render(request, "index.html")
def register_new_user(request):
errors = User.objects.user_registration_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/register/view')
def view_home(request):
if 'user_id' not in request.session:
return redirect('/')
user = User.objects.get(id=request.session['user_id'])
context = {
'user':user
}
print(user_id)
return render(request, ) | [
"[email protected]"
] | |
65e81cbf10ea4356c9f7ac0ff7f733020f175a0b | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/models/_models_py3.py | 7e8a77c9e75187edeeaefb7f66b43bb743db9604 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 50,031 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._compute_management_client_enums import *
class AccessUri(msrest.serialization.Model):
"""A disk access SAS uri.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar access_sas: A SAS uri for accessing a disk.
:vartype access_sas: str
"""
_validation = {
'access_sas': {'readonly': True},
}
_attribute_map = {
'access_sas': {'key': 'accessSAS', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessUri, self).__init__(**kwargs)
self.access_sas = None
class ApiError(msrest.serialization.Model):
"""Api error.
:param details: The Api error details.
:type details: list[~azure.mgmt.compute.v2019_11_01.models.ApiErrorBase]
:param innererror: The Api inner error.
:type innererror: ~azure.mgmt.compute.v2019_11_01.models.InnerError
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'details': {'key': 'details', 'type': '[ApiErrorBase]'},
'innererror': {'key': 'innererror', 'type': 'InnerError'},
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
details: Optional[List["ApiErrorBase"]] = None,
innererror: Optional["InnerError"] = None,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiError, self).__init__(**kwargs)
self.details = details
self.innererror = innererror
self.code = code
self.target = target
self.message = message
class ApiErrorBase(msrest.serialization.Model):
"""Api error base.
:param code: The error code.
:type code: str
:param target: The target of the particular error.
:type target: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
target: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ApiErrorBase, self).__init__(**kwargs)
self.code = code
self.target = target
self.message = message
class CreationData(msrest.serialization.Model):
"""Data used when creating a disk.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param create_option: Required. This enumerates the possible sources of a disk's creation.
Possible values include: "Empty", "Attach", "FromImage", "Import", "Copy", "Restore", "Upload".
:type create_option: str or ~azure.mgmt.compute.v2019_11_01.models.DiskCreateOption
:param storage_account_id: Required if createOption is Import. The Azure Resource Manager
identifier of the storage account containing the blob to import as a disk.
:type storage_account_id: str
:param image_reference: Disk source information.
:type image_reference: ~azure.mgmt.compute.v2019_11_01.models.ImageDiskReference
:param gallery_image_reference: Required if creating from a Gallery Image. The id of the
ImageDiskReference will be the ARM id of the shared galley image version from which to create a
disk.
:type gallery_image_reference: ~azure.mgmt.compute.v2019_11_01.models.ImageDiskReference
:param source_uri: If createOption is Import, this is the URI of a blob to be imported into a
managed disk.
:type source_uri: str
:param source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot
or disk.
:type source_resource_id: str
:ivar source_unique_id: If this field is set, this is the unique id identifying the source of
this resource.
:vartype source_unique_id: str
:param upload_size_bytes: If createOption is Upload, this is the size of the contents of the
upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for
the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer).
:type upload_size_bytes: long
"""
_validation = {
'create_option': {'required': True},
'source_unique_id': {'readonly': True},
}
_attribute_map = {
'create_option': {'key': 'createOption', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'image_reference': {'key': 'imageReference', 'type': 'ImageDiskReference'},
'gallery_image_reference': {'key': 'galleryImageReference', 'type': 'ImageDiskReference'},
'source_uri': {'key': 'sourceUri', 'type': 'str'},
'source_resource_id': {'key': 'sourceResourceId', 'type': 'str'},
'source_unique_id': {'key': 'sourceUniqueId', 'type': 'str'},
'upload_size_bytes': {'key': 'uploadSizeBytes', 'type': 'long'},
}
def __init__(
self,
*,
create_option: Union[str, "DiskCreateOption"],
storage_account_id: Optional[str] = None,
image_reference: Optional["ImageDiskReference"] = None,
gallery_image_reference: Optional["ImageDiskReference"] = None,
source_uri: Optional[str] = None,
source_resource_id: Optional[str] = None,
upload_size_bytes: Optional[int] = None,
**kwargs
):
super(CreationData, self).__init__(**kwargs)
self.create_option = create_option
self.storage_account_id = storage_account_id
self.image_reference = image_reference
self.gallery_image_reference = gallery_image_reference
self.source_uri = source_uri
self.source_resource_id = source_resource_id
self.source_unique_id = None
self.upload_size_bytes = upload_size_bytes
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class Disk(Resource):
"""Disk resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar managed_by: A relative URI containing the ID of the VM that has the disk attached.
:vartype managed_by: str
:ivar managed_by_extended: List of relative URIs containing the IDs of the VMs that have the
disk attached. maxShares should be set to a value greater than one for disks to allow attaching
them to multiple VMs.
:vartype managed_by_extended: list[str]
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2019_11_01.models.DiskSku
:param zones: The Logical zone list for Disk.
:type zones: list[str]
:ivar time_created: The time when the disk was created.
:vartype time_created: ~datetime.datetime
:param os_type: The Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_11_01.models.OperatingSystemTypes
:param hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS
disks only. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2019_11_01.models.HyperVGeneration
:param creation_data: Disk source information. CreationData information cannot be changed after
the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2019_11_01.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:ivar disk_size_bytes: The size of the disk in bytes. This field is read only.
:vartype disk_size_bytes: long
:ivar unique_id: Unique Guid identifying the resource.
:vartype unique_id: str
:param encryption_settings_collection: Encryption settings collection used for Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2019_11_01.models.EncryptionSettingsCollection
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
:param disk_iops_read_write: The number of IOPS allowed for this disk; only settable for
UltraSSD disks. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_write: long
:param disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD
disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of
10.
:type disk_m_bps_read_write: long
:param disk_iops_read_only: The total number of IOPS that will be allowed across all VMs
mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_only: long
:param disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs
mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses
the ISO notation, of powers of 10.
:type disk_m_bps_read_only: long
:ivar disk_state: The state of the disk. Possible values include: "Unattached", "Attached",
"Reserved", "ActiveSAS", "ReadyToUpload", "ActiveUpload".
:vartype disk_state: str or ~azure.mgmt.compute.v2019_11_01.models.DiskState
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2019_11_01.models.Encryption
:param max_shares: The maximum number of VMs that can attach to the disk at the same time.
Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
:type max_shares: int
:ivar share_info: Details of the list of all VMs that have the disk attached. maxShares should
be set to a value greater than one for disks to allow attaching them to multiple VMs.
:vartype share_info: list[~azure.mgmt.compute.v2019_11_01.models.ShareInfoElement]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'managed_by_extended': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
'disk_state': {'readonly': True},
'share_info': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'managed_by_extended': {'key': 'managedByExtended', 'type': '[str]'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'zones': {'key': 'zones', 'type': '[str]'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'},
'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'},
'disk_m_bps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'},
'disk_state': {'key': 'properties.diskState', 'type': 'str'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'max_shares': {'key': 'properties.maxShares', 'type': 'int'},
'share_info': {'key': 'properties.shareInfo', 'type': '[ShareInfoElement]'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
zones: Optional[List[str]] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
disk_iops_read_only: Optional[int] = None,
disk_m_bps_read_only: Optional[int] = None,
encryption: Optional["Encryption"] = None,
max_shares: Optional[int] = None,
**kwargs
):
super(Disk, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.managed_by_extended = None
self.sku = sku
self.zones = zones
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.disk_iops_read_only = disk_iops_read_only
self.disk_m_bps_read_only = disk_m_bps_read_only
self.disk_state = None
self.encryption = encryption
self.max_shares = max_shares
self.share_info = None
class DiskEncryptionSet(Resource):
"""disk encryption set resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The managed identity for the disk encryption set. It should be given
permission on the key vault before it can be used to encrypt disks.
:type identity: ~azure.mgmt.compute.v2019_11_01.models.EncryptionSetIdentity
:param active_key: The key vault key which is currently used by this disk encryption set.
:type active_key: ~azure.mgmt.compute.v2019_11_01.models.KeyVaultAndKeyReference
:ivar previous_keys: A readonly collection of key vault keys previously used by this disk
encryption set while a key rotation is in progress. It will be empty if there is no ongoing key
rotation.
:vartype previous_keys: list[~azure.mgmt.compute.v2019_11_01.models.KeyVaultAndKeyReference]
:ivar provisioning_state: The disk encryption set provisioning state.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'previous_keys': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'EncryptionSetIdentity'},
'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'},
'previous_keys': {'key': 'properties.previousKeys', 'type': '[KeyVaultAndKeyReference]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
identity: Optional["EncryptionSetIdentity"] = None,
active_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(DiskEncryptionSet, self).__init__(location=location, tags=tags, **kwargs)
self.identity = identity
self.active_key = active_key
self.previous_keys = None
self.provisioning_state = None
class DiskEncryptionSetList(msrest.serialization.Model):
"""The List disk encryption set operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disk encryption sets.
:type value: list[~azure.mgmt.compute.v2019_11_01.models.DiskEncryptionSet]
:param next_link: The uri to fetch the next page of disk encryption sets. Call ListNext() with
this to fetch the next page of disk encryption sets.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DiskEncryptionSet]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DiskEncryptionSet"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskEncryptionSetList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskEncryptionSetUpdate(msrest.serialization.Model):
"""disk encryption set update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param active_key: Key Vault Key Url and vault id of KeK, KeK is optional and when provided is
used to unwrap the encryptionKey.
:type active_key: ~azure.mgmt.compute.v2019_11_01.models.KeyVaultAndKeyReference
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'active_key': {'key': 'properties.activeKey', 'type': 'KeyVaultAndKeyReference'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
active_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(DiskEncryptionSetUpdate, self).__init__(**kwargs)
self.tags = tags
self.active_key = active_key
class DiskList(msrest.serialization.Model):
"""The List Disks operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of disks.
:type value: list[~azure.mgmt.compute.v2019_11_01.models.Disk]
:param next_link: The uri to fetch the next page of disks. Call ListNext() with this to fetch
the next page of disks.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Disk]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Disk"],
next_link: Optional[str] = None,
**kwargs
):
super(DiskList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DiskSku(msrest.serialization.Model):
"""The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The sku name. Possible values include: "Standard_LRS", "Premium_LRS",
"StandardSSD_LRS", "UltraSSD_LRS".
:type name: str or ~azure.mgmt.compute.v2019_11_01.models.DiskStorageAccountTypes
:ivar tier: The sku tier.
:vartype tier: str
"""
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "DiskStorageAccountTypes"]] = None,
**kwargs
):
super(DiskSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class DiskUpdate(msrest.serialization.Model):
"""Disk update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or
UltraSSD_LRS.
:type sku: ~azure.mgmt.compute.v2019_11_01.models.DiskSku
:param os_type: the Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_11_01.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2019_11_01.models.EncryptionSettingsCollection
:param disk_iops_read_write: The number of IOPS allowed for this disk; only settable for
UltraSSD disks. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_write: long
:param disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD
disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of
10.
:type disk_m_bps_read_write: long
:param disk_iops_read_only: The total number of IOPS that will be allowed across all VMs
mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes.
:type disk_iops_read_only: long
:param disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs
mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses
the ISO notation, of powers of 10.
:type disk_m_bps_read_only: long
:param max_shares: The maximum number of VMs that can attach to the disk at the same time.
Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
:type max_shares: int
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2019_11_01.models.Encryption
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'DiskSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'disk_iops_read_write': {'key': 'properties.diskIOPSReadWrite', 'type': 'long'},
'disk_m_bps_read_write': {'key': 'properties.diskMBpsReadWrite', 'type': 'long'},
'disk_iops_read_only': {'key': 'properties.diskIOPSReadOnly', 'type': 'long'},
'disk_m_bps_read_only': {'key': 'properties.diskMBpsReadOnly', 'type': 'long'},
'max_shares': {'key': 'properties.maxShares', 'type': 'int'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["DiskSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
disk_iops_read_write: Optional[int] = None,
disk_m_bps_read_write: Optional[int] = None,
disk_iops_read_only: Optional[int] = None,
disk_m_bps_read_only: Optional[int] = None,
max_shares: Optional[int] = None,
encryption: Optional["Encryption"] = None,
**kwargs
):
super(DiskUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
self.disk_iops_read_write = disk_iops_read_write
self.disk_m_bps_read_write = disk_m_bps_read_write
self.disk_iops_read_only = disk_iops_read_only
self.disk_m_bps_read_only = disk_m_bps_read_only
self.max_shares = max_shares
self.encryption = encryption
class Encryption(msrest.serialization.Model):
"""Encryption at rest settings for disk or snapshot.
:param disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling
encryption at rest.
:type disk_encryption_set_id: str
:param type: The type of key used to encrypt the data of the disk. Possible values include:
"EncryptionAtRestWithPlatformKey", "EncryptionAtRestWithCustomerKey".
:type type: str or ~azure.mgmt.compute.v2019_11_01.models.EncryptionType
"""
_attribute_map = {
'disk_encryption_set_id': {'key': 'diskEncryptionSetId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
disk_encryption_set_id: Optional[str] = None,
type: Optional[Union[str, "EncryptionType"]] = None,
**kwargs
):
super(Encryption, self).__init__(**kwargs)
self.disk_encryption_set_id = disk_encryption_set_id
self.type = type
class EncryptionSetIdentity(msrest.serialization.Model):
"""The managed identity for the disk encryption set. It should be given permission on the key vault before it can be used to encrypt disks.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is
supported. Default value: "SystemAssigned".
:vartype type: str
:ivar principal_id: The object id of the Managed Identity Resource. This will be sent to the RP
from ARM via the x-ms-identity-principal-id header in the PUT request if the resource has a
systemAssigned(implicit) identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the Managed Identity Resource. This will be sent to the RP
from ARM via the x-ms-client-tenant-id header in the PUT request if the resource has a
systemAssigned(implicit) identity.
:vartype tenant_id: str
"""
_validation = {
'type': {'constant': True},
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
type = "SystemAssigned"
def __init__(
self,
**kwargs
):
super(EncryptionSetIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
class EncryptionSettingsCollection(msrest.serialization.Model):
"""Encryption settings for disk or snapshot.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Set this flag to true and provide DiskEncryptionKey and optional
KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and
KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object,
the existing settings remain unchanged.
:type enabled: bool
:param encryption_settings: A collection of encryption settings, one for each disk volume.
:type encryption_settings:
list[~azure.mgmt.compute.v2019_11_01.models.EncryptionSettingsElement]
:param encryption_settings_version: Describes what type of encryption is used for the disks.
Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption
with AAD app.'1.1' corresponds to Azure Disk Encryption.
:type encryption_settings_version: str
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'encryption_settings': {'key': 'encryptionSettings', 'type': '[EncryptionSettingsElement]'},
'encryption_settings_version': {'key': 'encryptionSettingsVersion', 'type': 'str'},
}
def __init__(
self,
*,
enabled: bool,
encryption_settings: Optional[List["EncryptionSettingsElement"]] = None,
encryption_settings_version: Optional[str] = None,
**kwargs
):
super(EncryptionSettingsCollection, self).__init__(**kwargs)
self.enabled = enabled
self.encryption_settings = encryption_settings
self.encryption_settings_version = encryption_settings_version
class EncryptionSettingsElement(msrest.serialization.Model):
"""Encryption settings for one disk volume.
:param disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key.
:type disk_encryption_key: ~azure.mgmt.compute.v2019_11_01.models.KeyVaultAndSecretReference
:param key_encryption_key: Key Vault Key Url and vault id of the key encryption key.
KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key.
:type key_encryption_key: ~azure.mgmt.compute.v2019_11_01.models.KeyVaultAndKeyReference
"""
_attribute_map = {
'disk_encryption_key': {'key': 'diskEncryptionKey', 'type': 'KeyVaultAndSecretReference'},
'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultAndKeyReference'},
}
def __init__(
self,
*,
disk_encryption_key: Optional["KeyVaultAndSecretReference"] = None,
key_encryption_key: Optional["KeyVaultAndKeyReference"] = None,
**kwargs
):
super(EncryptionSettingsElement, self).__init__(**kwargs)
self.disk_encryption_key = disk_encryption_key
self.key_encryption_key = key_encryption_key
class GrantAccessData(msrest.serialization.Model):
"""Data used for requesting a SAS.
All required parameters must be populated in order to send to Azure.
:param access: Required. Possible values include: "None", "Read", "Write".
:type access: str or ~azure.mgmt.compute.v2019_11_01.models.AccessLevel
:param duration_in_seconds: Required. Time duration in seconds until the SAS access expires.
:type duration_in_seconds: int
"""
_validation = {
'access': {'required': True},
'duration_in_seconds': {'required': True},
}
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'duration_in_seconds': {'key': 'durationInSeconds', 'type': 'int'},
}
def __init__(
self,
*,
access: Union[str, "AccessLevel"],
duration_in_seconds: int,
**kwargs
):
super(GrantAccessData, self).__init__(**kwargs)
self.access = access
self.duration_in_seconds = duration_in_seconds
class ImageDiskReference(msrest.serialization.Model):
"""The source image used for creating the disk.
All required parameters must be populated in order to send to Azure.
:param id: Required. A relative uri containing either a Platform Image Repository or user image
reference.
:type id: str
:param lun: If the disk is created from an image's data disk, this is an index that indicates
which of the data disks in the image to use. For OS disks, this field is null.
:type lun: int
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(
self,
*,
id: str,
lun: Optional[int] = None,
**kwargs
):
super(ImageDiskReference, self).__init__(**kwargs)
self.id = id
self.lun = lun
class InnerError(msrest.serialization.Model):
"""Inner error details.
:param exceptiontype: The exception type.
:type exceptiontype: str
:param errordetail: The internal error message or exception dump.
:type errordetail: str
"""
_attribute_map = {
'exceptiontype': {'key': 'exceptiontype', 'type': 'str'},
'errordetail': {'key': 'errordetail', 'type': 'str'},
}
def __init__(
self,
*,
exceptiontype: Optional[str] = None,
errordetail: Optional[str] = None,
**kwargs
):
super(InnerError, self).__init__(**kwargs)
self.exceptiontype = exceptiontype
self.errordetail = errordetail
class KeyVaultAndKeyReference(msrest.serialization.Model):
"""Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the encryptionKey.
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the key or secret.
:type source_vault: ~azure.mgmt.compute.v2019_11_01.models.SourceVault
:param key_url: Required. Url pointing to a key or secret in KeyVault.
:type key_url: str
"""
_validation = {
'source_vault': {'required': True},
'key_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'key_url': {'key': 'keyUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
key_url: str,
**kwargs
):
super(KeyVaultAndKeyReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.key_url = key_url
class KeyVaultAndSecretReference(msrest.serialization.Model):
"""Key Vault Secret Url and vault id of the encryption key.
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the key or secret.
:type source_vault: ~azure.mgmt.compute.v2019_11_01.models.SourceVault
:param secret_url: Required. Url pointing to a key or secret in KeyVault.
:type secret_url: str
"""
_validation = {
'source_vault': {'required': True},
'secret_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'secret_url': {'key': 'secretUrl', 'type': 'str'},
}
def __init__(
self,
*,
source_vault: "SourceVault",
secret_url: str,
**kwargs
):
super(KeyVaultAndSecretReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.secret_url = secret_url
class ShareInfoElement(msrest.serialization.Model):
"""ShareInfoElement.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar vm_uri: A relative URI containing the ID of the VM that has the disk attached.
:vartype vm_uri: str
"""
_validation = {
'vm_uri': {'readonly': True},
}
_attribute_map = {
'vm_uri': {'key': 'vmUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShareInfoElement, self).__init__(**kwargs)
self.vm_uri = None
class Snapshot(Resource):
"""Snapshot resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:ivar managed_by: Unused. Always Null.
:vartype managed_by: str
:param sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
:type sku: ~azure.mgmt.compute.v2019_11_01.models.SnapshotSku
:ivar time_created: The time when the disk was created.
:vartype time_created: ~datetime.datetime
:param os_type: The Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_11_01.models.OperatingSystemTypes
:param hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS
disks only. Possible values include: "V1", "V2".
:type hyper_v_generation: str or ~azure.mgmt.compute.v2019_11_01.models.HyperVGeneration
:param creation_data: Disk source information. CreationData information cannot be changed after
the disk has been created.
:type creation_data: ~azure.mgmt.compute.v2019_11_01.models.CreationData
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:ivar disk_size_bytes: The size of the disk in bytes. This field is read only.
:vartype disk_size_bytes: long
:ivar unique_id: Unique Guid identifying the resource.
:vartype unique_id: str
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2019_11_01.models.EncryptionSettingsCollection
:ivar provisioning_state: The disk provisioning state.
:vartype provisioning_state: str
:param incremental: Whether a snapshot is incremental. Incremental snapshots on the same disk
occupy less space than full snapshots and can be diffed.
:type incremental: bool
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2019_11_01.models.Encryption
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_by': {'readonly': True},
'time_created': {'readonly': True},
'disk_size_bytes': {'readonly': True},
'unique_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'time_created': {'key': 'properties.timeCreated', 'type': 'iso-8601'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'hyper_v_generation': {'key': 'properties.hyperVGeneration', 'type': 'str'},
'creation_data': {'key': 'properties.creationData', 'type': 'CreationData'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'disk_size_bytes': {'key': 'properties.diskSizeBytes', 'type': 'long'},
'unique_id': {'key': 'properties.uniqueId', 'type': 'str'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'incremental': {'key': 'properties.incremental', 'type': 'bool'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
hyper_v_generation: Optional[Union[str, "HyperVGeneration"]] = None,
creation_data: Optional["CreationData"] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
incremental: Optional[bool] = None,
encryption: Optional["Encryption"] = None,
**kwargs
):
super(Snapshot, self).__init__(location=location, tags=tags, **kwargs)
self.managed_by = None
self.sku = sku
self.time_created = None
self.os_type = os_type
self.hyper_v_generation = hyper_v_generation
self.creation_data = creation_data
self.disk_size_gb = disk_size_gb
self.disk_size_bytes = None
self.unique_id = None
self.encryption_settings_collection = encryption_settings_collection
self.provisioning_state = None
self.incremental = incremental
self.encryption = encryption
class SnapshotList(msrest.serialization.Model):
"""The List Snapshots operation response.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of snapshots.
:type value: list[~azure.mgmt.compute.v2019_11_01.models.Snapshot]
:param next_link: The uri to fetch the next page of snapshots. Call ListNext() with this to
fetch the next page of snapshots.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Snapshot]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["Snapshot"],
next_link: Optional[str] = None,
**kwargs
):
super(SnapshotList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SnapshotSku(msrest.serialization.Model):
"""The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
Variables are only populated by the server, and will be ignored when sending a request.
:param name: The sku name. Possible values include: "Standard_LRS", "Premium_LRS",
"Standard_ZRS".
:type name: str or ~azure.mgmt.compute.v2019_11_01.models.SnapshotStorageAccountTypes
:ivar tier: The sku tier.
:vartype tier: str
"""
_validation = {
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "SnapshotStorageAccountTypes"]] = None,
**kwargs
):
super(SnapshotSku, self).__init__(**kwargs)
self.name = name
self.tier = None
class SnapshotUpdate(msrest.serialization.Model):
"""Snapshot update resource.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS.
:type sku: ~azure.mgmt.compute.v2019_11_01.models.SnapshotSku
:param os_type: the Operating System type. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.compute.v2019_11_01.models.OperatingSystemTypes
:param disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it
indicates the size of the disk to create. If this field is present for updates or creation with
other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a
running VM, and can only increase the disk's size.
:type disk_size_gb: int
:param encryption_settings_collection: Encryption settings collection used be Azure Disk
Encryption, can contain multiple encryption settings per disk or snapshot.
:type encryption_settings_collection:
~azure.mgmt.compute.v2019_11_01.models.EncryptionSettingsCollection
:param encryption: Encryption property can be used to encrypt data at rest with customer
managed keys or platform managed keys.
:type encryption: ~azure.mgmt.compute.v2019_11_01.models.Encryption
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SnapshotSku'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'disk_size_gb': {'key': 'properties.diskSizeGB', 'type': 'int'},
'encryption_settings_collection': {'key': 'properties.encryptionSettingsCollection', 'type': 'EncryptionSettingsCollection'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
sku: Optional["SnapshotSku"] = None,
os_type: Optional[Union[str, "OperatingSystemTypes"]] = None,
disk_size_gb: Optional[int] = None,
encryption_settings_collection: Optional["EncryptionSettingsCollection"] = None,
encryption: Optional["Encryption"] = None,
**kwargs
):
super(SnapshotUpdate, self).__init__(**kwargs)
self.tags = tags
self.sku = sku
self.os_type = os_type
self.disk_size_gb = disk_size_gb
self.encryption_settings_collection = encryption_settings_collection
self.encryption = encryption
class SourceVault(msrest.serialization.Model):
"""The vault id is an Azure Resource Manager Resource id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}.
:param id: Resource Id.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(SourceVault, self).__init__(**kwargs)
self.id = id
| [
"[email protected]"
] | |
b8e36c49cbb92f1d4a8ac4bf7b7e9fb0fd876631 | f8376e83352d2dfab28c41f24345071a77a45fd9 | /functions/global_local.py | 03db0337c256d7c65d4f6b36c16185a9deb2b290 | [] | no_license | harihavwas/pythonProgram | 2111ee98eccda68165159db0305c413ee53ee38a | 126df8b3a418dbaf618575b450fd4cfde44c80a7 | refs/heads/master | 2023-07-27T23:39:10.867329 | 2021-09-16T15:35:00 | 2021-09-16T15:35:00 | 402,320,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | x=5
def fun():
x=3
#global x
x+=10
print("Local : ",x)
fun()
print("Global : ",x) | [
"[email protected]"
] | |
91d6fab35d81a28c33317763a9190bc23875ccc4 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/aon.py | 6dc3567d7e22619be9c11b26f76c74afac1934e5 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'aON':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
50e3b6211cb784adbca528e697dca518ab8b7ac8 | 50f202b7068abcac204e795ee7a2dc9f13ab07e3 | /mchck_swd.py | e01cd7996edaa5390d5e681d593db025609fb332 | [] | permissive | twitchyliquid64/PySWD | 7830dd9213167d82f567bf5d912b930fa9bfb0e7 | 2981d4dcc385cd58f3c2423b359f3f53623184e0 | refs/heads/master | 2020-03-22T17:17:41.642609 | 2018-07-19T01:05:19 | 2018-07-19T01:05:19 | 140,386,338 | 0 | 0 | BSD-3-Clause | 2018-07-10T06:19:43 | 2018-07-10T06:19:43 | null | UTF-8 | Python | false | false | 1,567 | py | import time
import logging
import serial
from SWDAdapterBase import *
CMD_HANDSHAKE = "?SWD?"
CMD_HANDSHAKE_REPLY = "!SWD1"
CMD_WRITE_WORD = 0x90
CMD_WRITE_BITS = 0xa0
CMD_WRITE_BYTE = CMD_WRITE_BITS | (8 - 1)
CMD_READ_WORD = 0x10
CMD_READ_BITS = 0x20
CMD_CYCLE_CLOCK = 0x28
class Adapter(SWDAdapterBase):
def __init__(self, options):
SWDAdapterBase.__init__(self)
if not options.port:
raise SWDInitError("Port parameter is required")
self.hwlog = logging.getLogger("hwcomm")
self.port = serial.Serial(port=options.port, baudrate=115200, timeout=0.1)
self.init_adapter()
self.JTAG2SWD()
def init_adapter(self):
for i in xrange(20):
self.port.write(CMD_HANDSHAKE)
reply = self.port.read(len(CMD_HANDSHAKE_REPLY))
if reply == CMD_HANDSHAKE_REPLY:
return True
time.sleep(0.1)
raise SWDInitError("Did not get handshake reply")
def readBits(self, num):
"Read 1-8 bits from SWD"
v = bytearray([CMD_READ_BITS | (num - 1)])
self.port.write(v)
self.hwlog.debug("Wrote %s", self.renderHex(v))
v = ord(self.port.read(1))
self.hwlog.debug("Read %#02x", v)
return v
def writeBits(self, val, num):
"Write 1-8 bits to SWD"
v = bytearray([CMD_WRITE_BITS | (num - 1), val])
self.hwlog.debug("Wrote %s", self.renderHex(v))
self.port.write(v)
@staticmethod
def renderHex(arr):
return " ".join([hex(x) for x in arr])
| [
"[email protected]"
] | |
2cc1de21342c8d7f3e1a2426bfa787d969a5c08f | 61174b68f49f8f78243a4b67eed727bd7e107481 | /extras/sample_site/sample_site/urls.py | dcacf4f406c4d6887b4be3a4a21e6153ec9d7278 | [
"MIT"
] | permissive | cltrudeau/django-yaset | b586a90780a758a36103edf6b0790c49d2227f43 | 1b27eeb3f4bdb638609e2045b1c2902f7af96141 | refs/heads/master | 2022-05-05T05:19:35.665955 | 2019-07-26T21:00:35 | 2019-07-26T21:00:35 | 198,712,577 | 0 | 0 | MIT | 2022-04-22T22:00:01 | 2019-07-24T21:33:40 | Python | UTF-8 | Python | false | false | 127 | py | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
07f19b1600350ce134465c5c2401089bbc90b0d0 | 8567438779e6af0754620a25d379c348e4cd5a5d | /testing/xvfb.py | a5620e7cde4072d7bd8b5f6bef54b27af767d9e1 | [
"BSD-3-Clause"
] | permissive | thngkaiyuan/chromium | c389ac4b50ccba28ee077cbf6115c41b547955ae | dab56a4a71f87f64ecc0044e97b4a8f247787a68 | refs/heads/master | 2022-11-10T02:50:29.326119 | 2017-04-08T12:28:57 | 2017-04-08T12:28:57 | 84,073,924 | 0 | 1 | BSD-3-Clause | 2022-10-25T19:47:15 | 2017-03-06T13:04:15 | null | UTF-8 | Python | false | false | 2,751 | py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs tests with Xvfb and Openbox on Linux and normally on other platforms."""
import os
import platform
import signal
import subprocess
import sys
import threading
import test_env
def _kill(proc, send_signal):
"""Kills |proc| and ignores exceptions thrown for non-existent processes."""
try:
os.kill(proc.pid, send_signal)
except OSError:
pass
def kill(proc, timeout_in_seconds=10):
"""Tries to kill |proc| gracefully with a timeout for each signal."""
if not proc or not proc.pid:
return
_kill(proc, signal.SIGTERM)
thread = threading.Thread(target=proc.wait)
thread.start()
thread.join(timeout_in_seconds)
if thread.is_alive():
print >> sys.stderr, 'Xvfb running after SIGTERM, trying SIGKILL.'
_kill(proc, signal.SIGKILL)
thread.join(timeout_in_seconds)
if thread.is_alive():
print >> sys.stderr, 'Xvfb running after SIGTERM and SIGKILL; good luck!'
def run_executable(cmd, env):
"""Runs an executable within Xvfb on Linux or normally on other platforms.
Returns the exit code of the specified commandline, or 1 on failure.
"""
if sys.platform == 'linux2':
if env.get('_CHROMIUM_INSIDE_XVFB') == '1':
openbox_proc = None
xcompmgr_proc = None
try:
# Some ChromeOS tests need a window manager.
openbox_proc = subprocess.Popen('openbox', stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
# Some tests need a compositing wm to make use of transparent visuals.
xcompmgr_proc = subprocess.Popen('xcompmgr', stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
return test_env.run_executable(cmd, env)
except OSError as e:
print >> sys.stderr, 'Failed to start Xvfb or Openbox: %s' % str(e)
return 1
finally:
kill(openbox_proc)
kill(xcompmgr_proc)
else:
env['_CHROMIUM_INSIDE_XVFB'] = '1'
xvfb_script = __file__
if xvfb_script.endswith('.pyc'):
xvfb_script = xvfb_script[:-1]
return subprocess.call(['xvfb-run', '-a', "--server-args=-screen 0 "
"1280x800x24 -ac -nolisten tcp -dpi 96",
xvfb_script] + cmd, env=env)
else:
return test_env.run_executable(cmd, env)
def main():
if len(sys.argv) < 2:
print >> sys.stderr, (
'Usage: xvfb.py [command args...]')
return 2
return run_executable(sys.argv[1:], os.environ.copy())
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
a9ddf2c7ce753bd52658b66a00fbd265e29339f3 | 2049bda43e392d5f5981fbfdb70090ba226e4ef8 | /apps/user/management/commands/proxy_detection.py | 933792ab6086b2d3cab311183442d44cbdc89ce0 | [] | no_license | embedded1/django-package-forwarding | 2ef84a1fde5ba6817d42d89f983512bdc3d77bc3 | 8c3286e9a7da8f4ae0401a81c8037585b3bb7ba6 | refs/heads/master | 2020-06-22T17:05:36.637695 | 2019-07-26T09:34:40 | 2019-07-26T09:34:40 | 197,738,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,026 | py | from django.utils.translation import ugettext as _
from django.core.management.base import BaseCommand
from django.template import loader, Context
from django.core import mail
from django.conf import settings
from django.contrib.auth.models import User
from decimal import Decimal as D
import requests
import logging
logger = logging.getLogger("management_commands")
class Command(BaseCommand):
"""
Background task that periodaclly checks if registered user is using a proxy service
"""
help = _("Proxy detection")
def calc_proxy_score(self, ip):
payload = {
'l': settings.MINFRAUD_LICENSE_KEY,
'i': ip
}
response = requests.get('https://minfraud.maxmind.com/app/ipauth_http', params=payload)
if response.status_code != requests.codes.ok:
logger.error("Request failed with status %s" % response.status_code)
return None
proxy = dict( f.split('=') for f in response.text.split(';') )
if 'err' in proxy and len(proxy['err']):
logger.error("MaxMind returned an error code for the request: %s" % proxy['err'])
return None
return proxy['proxyScore']
def handle(self, **options):
users = User.objects.select_related('profile').exclude(
is_superuser=True).filter(
is_active=True, profile__ip__isnull=False, profile__proxy_score__isnull=True)
emails = []
for user in users:
profile = user.get_profile()
ip = profile.ip
#call maxmind api to calculate proxy score
proxy_score = self.calc_proxy_score(ip)
#save proxy score
if proxy_score:
profile.proxy_score = proxy_score
profile.save()
#send alert only if we detected a proxy
if D(proxy_score) != D('0.00'):
ctx = Context({
'user_name': user.get_full_name(),
'proxy_score': proxy_score
})
subject_tpl = loader.get_template('user/alerts/emails/admins/proxy_detection_subject.txt')
body_tpl = loader.get_template('user/alerts/emails/admins/proxy_detection_body.txt')
body_html_tpl = loader.get_template('user/alerts/emails/admins/proxy_detection_body.html')
# Build email and add to list
email = {
'subject': subject_tpl.render(ctx).strip(),
'message': body_tpl.render(ctx),
'html_message': body_html_tpl.render(ctx)
}
emails.append(email)
#we use celery to dispatch emails, therefore we iterate over all emails and add
#each one of them to the task queue,send_many doesn't work with priority = now
#therefore, we use the regular send mail
#for email in emails:
# mail.mail_admins(**email)
| [
"[email protected]"
] | |
a9fd8d74fd2eb83202790909a6fdb4ff546cd49d | 5d3acf19a31749111bc9332632d56cfa8f229872 | /testing/tests/001-main/003-self/200-json/001-users.py | ace465f0ccc2064551dac140ac940ae4add24ee0 | [
"MIT",
"Apache-2.0"
] | permissive | fragmede/critic | 217adea764b96b028fe6d95ee8f0ec82bc38b606 | f32a41b8c209440b2cbf208b1790320ef6ba3ecb | refs/heads/master | 2020-12-28T07:47:37.603777 | 2015-06-23T08:15:38 | 2015-07-14T09:12:28 | 39,280,420 | 0 | 0 | NOASSERTION | 2022-09-02T20:59:50 | 2015-07-18T00:03:57 | Python | UTF-8 | Python | false | false | 7,042 | py | # @dependency 001-main/001-empty/003-criticctl/002-adduser-deluser.py
# @dependency 001-main/001-empty/004-mixed/003-oauth.py
# @dependency 001-main/001-empty/004-mixed/004-password.py
# @dependency 001-main/003-self/028-gitemails.py
frontend.json(
"users",
expect={ "users": [user_json("admin", "Testing Administrator"),
user_json("alice"),
user_json("bob"),
user_json("dave"),
user_json("erin"),
user_json("howard"),
user_json("extra", status="retired"),
user_json("carol"),
user_json("felix"),
user_json("gina", no_email=True),
user_json("iris")] })
frontend.json(
"users",
params={ "status": "current" },
expect={ "users": [user_json("admin", "Testing Administrator"),
user_json("alice"),
user_json("bob"),
user_json("dave"),
user_json("erin"),
user_json("howard"),
user_json("carol"),
user_json("felix"),
user_json("gina", no_email=True),
user_json("iris")] })
frontend.json(
"users",
params={ "status": "retired" },
expect={ "users": [user_json("extra", status="retired")] })
frontend.json(
"users",
params={ "sort": "fullname" },
expect={ "users": [user_json("alice"),
user_json("bob"),
user_json("carol"),
user_json("dave"),
user_json("erin"),
user_json("extra", status="retired"),
user_json("felix"),
user_json("gina", no_email=True),
user_json("howard"),
user_json("iris"),
user_json("admin", "Testing Administrator")] })
frontend.json(
"users",
params={ "sort": "fullname",
"count": "4" },
expect={ "users": [user_json("alice"),
user_json("bob"),
user_json("carol"),
user_json("dave")] })
frontend.json(
"users",
params={ "sort": "fullname",
"offset": "2",
"count": "4" },
expect={ "users": [user_json("carol"),
user_json("dave"),
user_json("erin"),
user_json("extra", status="retired")] })
frontend.json(
"users",
params={ "sort": "fullname",
"offset": "6" },
expect={ "users": [user_json("felix"),
user_json("gina", no_email=True),
user_json("howard"),
user_json("iris"),
user_json("admin", "Testing Administrator")] })
frontend.json(
"users/%d" % instance.userid("alice"),
expect=user_json("alice"))
frontend.json(
"users/%d" % instance.userid("alice"),
params={ "fields": "id" },
expect={ "id": instance.userid("alice") })
frontend.json(
"users",
params={ "name": "alice" },
expect=user_json("alice"))
frontend.json(
"users/%d/emails" % instance.userid("alice"),
expect={ "emails": [{ "address": "[email protected]",
"selected": True,
"verified": None }] })
filter_json = { "id": int,
"type": "reviewer",
"path": "028-gitemails/",
"repository": 1,
"delegates": [instance.userid("erin")] }
frontend.json(
"users/%d/filters" % instance.userid("alice"),
expect={ "filters": [filter_json] })
frontend.json(
"users/%d/filters" % instance.userid("alice"),
params={ "repository": "critic" },
expect={ "filters": [filter_json] })
frontend.json(
"users/%d/filters" % instance.userid("alice"),
params={ "repository": "1" },
expect={ "filters": [filter_json] })
frontend.json(
"users/%d/filters" % instance.userid("alice"),
params={ "include": "users,repositories" },
expect={ "filters": [{ "id": int,
"type": "reviewer",
"path": "028-gitemails/",
"repository": 1,
"delegates": [instance.userid("erin")] }],
"linked": { "repositories": [critic_json],
"users": [user_json("erin")] }})
frontend.json(
"users/%d,%d,%d" % (instance.userid("alice"),
instance.userid("bob"),
instance.userid("dave")),
expect={ "users": [user_json("alice"),
user_json("bob"),
user_json("dave")] })
frontend.json(
"users/%d,%d,%d" % (instance.userid("alice"),
instance.userid("bob"),
instance.userid("dave")),
params={ "fields[users]": "name" },
expect={ "users": [{ "name": "alice" },
{ "name": "bob" },
{ "name": "dave" }] })
frontend.json(
"users/4711",
expect={ "error": { "title": "No such resource",
"message": "Resource not found: Invalid user id: 4711" }},
expected_http_status=404)
frontend.json(
"users/alice",
expect={ "error": { "title": "Invalid API request",
"message": "Invalid numeric id: 'alice'" }},
expected_http_status=400)
frontend.json(
"users",
params={ "name": "nosuchuser" },
expect={ "error": { "title": "No such resource",
"message": "Resource not found: Invalid user name: 'nosuchuser'" }},
expected_http_status=404)
frontend.json(
"users",
params={ "status": "clown" },
expect={ "error": { "title": "Invalid API request",
"message": "Invalid user status values: 'clown'" }},
expected_http_status=400)
frontend.json(
"users",
params={ "status": "current,clown,president" },
expect={ "error": { "title": "Invalid API request",
"message": "Invalid user status values: 'clown', 'president'" }},
expected_http_status=400)
frontend.json(
"users",
params={ "sort": "age" },
expect={ "error": { "title": "Invalid API request",
"message": "Invalid user sort parameter: 'age'" }},
expected_http_status=400)
frontend.json(
"users/%d/emails/1" % instance.userid("alice"),
expect={ "error": { "title": "Invalid API request",
"message": "Resource does not support arguments: v1/users/emails" }},
expected_http_status=400)
frontend.json(
"users/%d/filters/1" % instance.userid("alice"),
expect={ "error": { "title": "Invalid API request",
"message": "Resource does not support arguments: v1/users/filters" }},
expected_http_status=400)
| [
"[email protected]"
] | |
869a50983066c01546bfa59c724d88e8d2fa2d10 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/44730c28cc28b124da727c569ddc9706715f50b1-<main>-bug.py | a18ff146bef3def4007632f225c584366ee5ef90 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,947 | py |
def main():
module = AnsibleModule(argument_spec=dict(login_user=dict(default=None), login_password=dict(default=None, no_log=True), login_host=dict(default='localhost'), login_port=dict(default='27017'), login_database=dict(default=None), replica_set=dict(default=None), database=dict(required=True, aliases=['db']), name=dict(required=True, aliases=['user']), password=dict(aliases=['pass'], no_log=True), ssl=dict(default=False, type='bool'), roles=dict(default=None, type='list'), state=dict(default='present', choices=['absent', 'present']), update_password=dict(default='always', choices=['always', 'on_create']), ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED'])), supports_check_mode=True)
if (not pymongo_found):
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
ssl_cert_reqs = None
roles = (module.params['roles'] or [])
state = module.params['state']
update_password = module.params['update_password']
try:
connection_params = {
'host': login_host,
'port': int(login_port),
}
if replica_set:
connection_params['replicaset'] = replica_set
if ssl:
connection_params['ssl'] = ssl
connection_params['ssl_cert_reqs'] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
client = MongoClient(**connection_params)
check_compatibility(module, client)
if ((login_user is None) and (login_password is None)):
mongocnf_creds = load_mongocnf()
if (mongocnf_creds is not False):
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif ((login_password is None) or (login_user is None)):
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if ((login_user is not None) and (login_password is not None)):
client.admin.authenticate(login_user, login_password, source=login_database)
elif (LooseVersion(PyMongoVersion) >= LooseVersion('3.0')):
if (db_name != 'admin'):
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
except Exception:
e = get_exception()
module.fail_json(msg=('unable to connect to database: %s' % str(e)))
if (state == 'present'):
if ((password is None) and (update_password == 'always')):
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
try:
uinfo = user_find(client, user, db_name)
if ((update_password != 'always') and uinfo):
password = None
if (not check_if_roles_changed(uinfo, roles, db_name)):
module.exit_json(changed=False, user=user)
if module.check_mode:
module.exit_json(changed=True, user=user)
user_add(module, client, db_name, user, password, roles)
except Exception:
e = get_exception()
module.fail_json(msg=('Unable to add or update user: %s' % str(e)))
elif (state == 'absent'):
try:
user_remove(module, client, db_name, user)
except Exception:
e = get_exception()
module.fail_json(msg=('Unable to remove user: %s' % str(e)))
module.exit_json(changed=True, user=user)
| [
"[email protected]"
] | |
a352a0a1b1ee3450f63f90b6486011d84e3b1868 | e87c04d6c2bbba383f9c75620b16f02358039ab5 | /보충/미로1.py | 889bc61652d663fee00031279caf1f6e9ba1bf6d | [] | no_license | yoonwoo123/Algorithm | 2bf6e103009572cbcf3abfd783f6c28762529340 | 5d1e76f1bf6c2fc6acb25dc5296d62b2ca453ec6 | refs/heads/master | 2022-06-18T14:06:06.248906 | 2022-05-29T10:41:15 | 2022-05-29T10:41:15 | 221,483,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | import sys
sys.stdin = open("미로1_input.txt")
dx = [1, 0, -1, 0] # 우, 하, 좌, 상
dy = [0, 1, 0, -1]
def dfs(x, y):
maze[y][x] = 9 # 방문표시
for i in range(4):
global flag
nx = x + dx[i]
ny = y + dy[i]
if maze[ny][nx] == 3:
flag = 1
break
if maze[ny][nx] == 0: # 함수를 넣어서 갈 수 있는 것만 통과
dfs(nx, ny)
if flag == 1:
return 1
else:
return 0
testcases = 10
for T in range(testcases):
tc = input()
maze = []
flag = 0
for i in range(16):
maze.append(list(map(int, list(input()))))
dfs(1, 1)
print("%s%d %d" % ('#', T + 1, flag)) | [
"[email protected]"
] | |
8f90f53f4f8b98f34c6f21170636a11a3faf3b30 | 456433ac78b70cb8ae076ae166a85e349f181d7f | /systems/KURSSKLAD/KURSTERM/TRANSUNITRETURN/transunitreturn.py | 014805c78aeb006058805d3a66217c259c87f717 | [] | no_license | shybkoi/WMS-Demo | 854c1679b121c68323445b60f3992959f922be8d | 2525559c4f56654acfbc21b41b3f5e40387b89e0 | refs/heads/master | 2021-01-23T01:51:20.074825 | 2017-03-23T11:51:18 | 2017-03-23T11:51:18 | 85,937,726 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 3,269 | py | # -*- coding: cp1251 -*-
from systems.KURSSKLAD.KURSTERM.common import TCommonTerm
from systems.KURSSKLAD.KURSTERM.TRANSUNITRETURN.templates.index import index
from systems.KURSSKLAD.KURSTERM.TRANSUNITRETURN.templates.auto import auto
from systems.KURSSKLAD.KURSTERM.TRANSUNITRETURN.templates.autoTransUnit import autoTransUnit
from kinterbasdb import ProgrammingError as FBExc
from cherrypy import HTTPRedirect
class TTransUnitReturn(TCommonTerm):
helpSystem = True
def index(self, id_system=None):
TCommonTerm.index(self, id_system)
self.setIfaceVar('wmsid',self.GetKSessionID())
return self.main()
index.exposed = True
def main(self, barcode=None, mes=None):
if barcode:
bcInfo = self.kBarCodeInfo(barcode)
if bcInfo and bcInfo['result']==0:
if bcInfo['usercode']=='AUTO':
raise HTTPRedirect('auto?id=%s'%(bcInfo['recordid']))
else:
mes = _('Не верный ШК')
else:
mes = bcInfo['mes']
return self.drawTemplate(templ=index,data=[{'mes':mes}])
main.exposed = True
def auto(self, id, barcode = None, mes = None):
if barcode:
bcInfo = self.kBarCodeInfo(barcode)
if bcInfo and bcInfo['result']==0:
if bcInfo['usercode']=='AUTO':
raise HTTPRedirect('auto?id=%s'%(bcInfo['recordid']))
elif bcInfo['usercode']=='TRANSUNIT':
raise HTTPRedirect('autoTransUnit?aid=%s&tuid=%s'%(id,bcInfo['recordid']))
else:
mes = _('Не верный ШК')
else:
mes = bcInfo['mes']
a = self.autoInfo(id)
atu = self.dbExec(sql="select * from WH_TRANSUNITRETURN_AUTOLISTTU(?)",params=[id],fetch="all")
return self.drawTemplate(templ=auto,data=[a,atu,{'backurl':'main', 'mes':mes}])
auto.exposed = True
def autoTransUnit(self, aid, tuid, barcode = None, mes = None):
tuid = self.kId(tuid)
if barcode:
bcInfo = self.kBarCodeInfo(barcode)
if bcInfo and bcInfo['result']==0:
if bcInfo['usercode']=='AUTO':
raise HTTPRedirect('auto?id=%s'%(bcInfo['recordid']))
elif bcInfo['usercode']=='TRANSUNIT':
if tuid == self.kId(bcInfo['recordid']):
try: self.dbExec(sql="execute procedure WH_TRANSUNITRETURN_DO(?,?,?)",params=[aid,tuid,self.getIfaceVar('wmsid')],fetch="none")
except FBExc, exc: mes = self.fbExcText(exc[1])
else: raise HTTPRedirect('auto?id=%s'%(aid))
else:
raise HTTPRedirect('autoTransUnit?aid=%s&tuid=%s'%(aid,bcInfo['recordid']))
else:
mes = _('Не верный ШК')
else:
mes = bcInfo['mes']
a = self.autoInfo(aid)
tu = self.transUnitInfo(tuid)
return self.drawTemplate(templ=autoTransUnit,data=[a,tu,{'backurl':'auto?id=%s'%(aid),'mes':mes}])
autoTransUnit.exposed = True
| [
"[email protected]"
] | |
925ca8233ec1f869327ec17b01779d58af3a7eeb | ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0 | /02_algorithm/04.Stack/20190819/stack_practice_03/stack_practice_03.py | a2b7c341e89dea47790118435e6561dd2c2769d4 | [] | no_license | wally-wally/TIL | 93fc1d0e3bc7d030341ed54155294c68c48b4c7d | 936783bc86f563646c0398c24e2fcaa707f0ed23 | refs/heads/master | 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 | Python | UTF-8 | Python | false | false | 632 | py | # 문제를 재귀적으로 푼다
# 재귀적 정의를 구현할 때 재귀호출이 좋다.
# 재귀적 정의 --> 좀 더 작은 문제의 답을 사용해서 더 큰 문제의 답을 구하는 방법
# 팩토리얼 예제
# 문제의 크기는 자연수로 표현
# 1, n = 1 or 0
# (n-1)! * n, n > 1
def factorial(n): # n(매개변수) : 문제(크기)를 나타내는 값
# 반환값 = n!의 값(문제의 해)
if n == 0 or n == 1: # 기저 사례
# 재귀호출 하지 않고 종료
return 1
else:
# 재귀호출
return factorial(n - 1) * n
print(factorial(4)) | [
"[email protected]"
] | |
dde95a6598f20b7d186a996cf811781d6e534b9e | 9633f30e171550a5008ffe1a90d21254e7fe9c19 | /0x0C-python-almost_a_circle/models/base.py | b9ce9c1c1c3eaf425240be1a6412718d8d35b1ae | [] | no_license | jj131204/holbertonschool-higher_level_programming | 95e2b528bc68e7a3897c5ff49a23b1f37e9abee4 | be2afa3b2a54e88d7dd4e39e5116c9bd1b941ba6 | refs/heads/master | 2023-08-30T16:56:37.834970 | 2021-09-22T22:34:37 | 2021-09-22T22:34:37 | 361,829,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | #!/usr/bin/python3
"""create class Base"""
import json
class Base:
"""class base"""
__nb_objects = 0
def __init__(self, id=None):
""" def __init__(self, id=None): """
if id is not None:
self.id = id
else:
Base.__nb_objects += 1
self.id = Base.__nb_objects
@staticmethod
def to_json_string(list_dictionaries):
"""returns the JSON string representation of list_dictionaries"""
if list_dictionaries is None or list_dictionaries == []:
return "[]"
return json.dumps(list_dictionaries)
@classmethod
def save_to_file(cls, list_objs):
"""that writes the JSON string representation of list_objs to a file"""
str_ = []
name = cls.__name__ + ".json"
if list_objs is not None:
for i in list_objs:
str_.append(cls.to_dictionary(i))
with open(name, mode="w") as file:
file.write(cls.to_json_string(str_))
| [
"[email protected]"
] | |
c9b09b68eed4b34734399a88a6c0a55d7be9401c | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /test/test_access_policy_category.py | 11c2017c46bf3867b915f353ab0c59fca1768429 | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.access_policy_category import AccessPolicyCategory # noqa: E501
from swagger_client.rest import ApiException
class TestAccessPolicyCategory(unittest.TestCase):
"""AccessPolicyCategory unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAccessPolicyCategory(self):
"""Test AccessPolicyCategory"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.access_policy_category.AccessPolicyCategory() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
3815f5be00e1ab76b95e2bb5b78305f5c93ec47d | d33ae935a34431c5757eaaea31d250819ff3dac0 | /modules/folderDict.py | 73cf9cb9e10b80f89536e332cf7590ec0c412ffd | [] | no_license | OliPelz/linminUP | d23e36765a5c2015b770e0e0f24e3ff6c2fe9134 | 50ec012038267a5cabefdae2b231738515a3713e | refs/heads/master | 2021-01-09T06:24:12.635693 | 2016-04-21T10:57:01 | 2016-04-21T10:57:01 | 56,667,433 | 0 | 0 | null | 2016-04-20T07:56:32 | 2016-04-20T07:56:31 | null | UTF-8 | Python | false | false | 9,751 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# --------------------------------------------------
# File Name: folderDict.py
# Purpose:
# Creation Date: 04-11-2015
# Last Modified: Wed Mar 9 11:38:05 2016
# Author(s): The DeepSEQ Team, University of Nottingham UK
# Copyright 2015 The Author(s) All Rights Reserved
# Credits:
# --------------------------------------------------
import os
import sys
import xmltodict
import h5py
def getHDFtime((f, mtime)):
try:
hdf = h5py.File(f)
expStartTime = hdf['UniqueGlobalKey/tracking_id'].attrs['exp_start_time']
reads = 'Analyses/EventDetection_000/Reads/'
for read in hdf[reads]:
startTime = hdf[ reads + read ].attrs['start_time']
st = int(expStartTime) + int(startTime)
except:
st = mtime
return f, st
def assignHDFtimes(d):
return dict(map(getHDFtime, d.items()) )
# ---------------------------------------------------------------------------
def file_dict_of_folder(args, xml_file_dict, path):
file_list_dict = dict()
ref_list_dict = dict()
# global xml_file_dict
# xml_file_dict=dict()
if os.path.isdir(path):
print 'caching existing fast5 files in: %s' % path
for (path, dirs, files) in os.walk(path):
for f in files:
if 'downloads' in path and args.preproc is False \
or args.preproc is True:
if 'muxscan' not in f and f.endswith('.fast5'):
file_list_dict[os.path.join(path, f)] = \
os.stat(os.path.join(path, f)).st_mtime
if args.batch_fasta is True:
if 'reference' in path:
if f.endswith('.fa') or f.endswith('.fasta'
) or f.endswith('.fna'):
ref_path = path
while 'downloads' \
not in os.path.split(ref_path)[1]:
ref_path = \
os.path.split(ref_path)[0]
if ref_path not in ref_list_dict:
ref_list_dict[ref_path] = list()
ref_list_dict[ref_path].append(os.path.join(path,
f))
if 'XML' in path:
if f.endswith('.xml'):
xml_path = path
while 'downloads' \
not in os.path.split(xml_path)[1]:
# print xml_path, os.path.split(xml_path), len (os.path.split(xml_path))
xml_path = os.path.split(xml_path)[0]
# print "FINAL", xml_path
try:
xmlraw = open(os.path.join(path, f), 'r'
).read()
xmldict = xmltodict.parse(xmlraw)
if xml_path not in xml_file_dict:
xml_file_dict[xml_path] = dict()
xml_file_dict[xml_path]['study'] = \
dict()
xml_file_dict[xml_path]['experiment'
] = dict()
xml_file_dict[xml_path]['run'] = \
dict()
xml_file_dict[xml_path]['sample'] = \
dict()
if 'STUDY_SET' in xmldict:
# print "STUDY", f
primary_id = xmldict['STUDY_SET'
]['STUDY']['IDENTIFIERS']['PRIMARY_ID']
# print "STUDY_ID", primary_id
title = xmldict['STUDY_SET']['STUDY'
]['DESCRIPTOR']['STUDY_TITLE']
# print "TITLE", title
abstr = xmldict['STUDY_SET']['STUDY'
]['DESCRIPTOR']['STUDY_ABSTRACT']
# print "ABSTRACT", abstr
if primary_id \
not in xml_file_dict[xml_path]['study']:
xml_file_dict[xml_path]['study'
][primary_id] = dict()
xml_file_dict[xml_path]['study'
][primary_id]['file'] = f
xml_file_dict[xml_path]['study'
][primary_id]['xml'] = xmlraw
xml_file_dict[xml_path]['study'
][primary_id]['title'] = title
xml_file_dict[xml_path]['study'
][primary_id]['abstract'] = abstr
xml_file_dict[xml_path]['study'
][primary_id]['path'] = path
if 'EXPERIMENT_SET' in xmldict:
# print "EXPERIMENT", f
primary_id = \
xmldict['EXPERIMENT_SET']['EXPERIMENT']['IDENTIFIERS']['PRIMARY_ID']
# print "EXP_ID", primary_id
study_id = xmldict['EXPERIMENT_SET'
]['EXPERIMENT']['STUDY_REF']['IDENTIFIERS']['PRIMARY_ID']
# print "STUDY_ID", study_id
sample_id = xmldict['EXPERIMENT_SET'
]['EXPERIMENT']['DESIGN']['SAMPLE_DESCRIPTOR']['IDENTIFIERS'
]['PRIMARY_ID']
# print "SAMPLE_ID", sample_id
if primary_id \
not in xml_file_dict[xml_path]['experiment']:
xml_file_dict[xml_path]['experiment'
][primary_id] = dict()
xml_file_dict[xml_path]['experiment'
][primary_id]['file'] = f
xml_file_dict[xml_path]['experiment'
][primary_id]['xml'] = xmlraw
xml_file_dict[xml_path]['experiment'
][primary_id]['sample_id'] = sample_id
xml_file_dict[xml_path]['experiment'
][primary_id]['study_id'] = study_id
# for a,b in xmldict['EXPERIMENT_SET']['EXPERIMENT'].items():
# ....print a,b
if 'SAMPLE_SET' in xmldict:
# print "SAMPLE_SET", f
primary_id = xmldict['SAMPLE_SET'
]['SAMPLE']['IDENTIFIERS']['PRIMARY_ID']
# print "SAMPLE_ID", primary_id
if primary_id \
not in xml_file_dict[xml_path]['sample']:
xml_file_dict[xml_path]['sample'
][primary_id] = dict()
xml_file_dict[xml_path]['sample'
][primary_id]['file'] = f
xml_file_dict[xml_path]['sample'
][primary_id]['xml'] = xmlraw
if 'RUN_SET' in xmldict:
# print "RUN", f
primary_id = xmldict['RUN_SET'
]['RUN']['IDENTIFIERS']['PRIMARY_ID']
exp_id = xmldict['RUN_SET']['RUN'
]['EXPERIMENT_REF']['IDENTIFIERS']['PRIMARY_ID']
# print "RUN_ID", primary_id
if primary_id \
not in xml_file_dict[xml_path]['run']:
xml_file_dict[xml_path]['run'
][primary_id] = dict()
xml_file_dict[xml_path]['run'
][primary_id]['xml'] = xmlraw
xml_file_dict[xml_path]['run'
][primary_id]['file'] = f
xml_file_dict[xml_path]['run'
][primary_id]['exp_id'] = exp_id
except Exception, err:
err_string = \
'Error with XML file: %s : %s' \
% (f, err)
print >> sys.stderr, err_string
continue
print 'found %d existing fast5 files to process first.' \
% len(file_list_dict)
if 0 < len(xml_file_dict):
print 'found %d XML folders.' % len(xml_file_dict)
counts = dict()
for xmldir in xml_file_dict.keys():
for xmltype in xml_file_dict[xmldir].keys():
if xmltype not in counts:
counts[xmltype] = \
len(xml_file_dict[xmldir][xmltype])
else:
counts[xmltype] += \
len(xml_file_dict[xmldir][xmltype])
for xmltype in counts:
print 'found %d %s xml files.' % (counts[xmltype], xmltype)
if 0 < len(ref_list_dict):
print 'found %d reference fasta folders.' % len(ref_list_dict)
# print found_ref_note
for path in ref_list_dict.keys():
files = ','.join(ref_list_dict[path])
process_ref_fasta(args, valid_ref_dir, bwa_index_dir,
files, ref_fasta_hash)
# with open(dbcheckhash["logfile"][dbname],"a") as logfilehandle:
# ....logfilehandle.write(found_fast5_note+os.linesep)
# ....logfilehandle.close()
# 0.63 ... # file_list_dict = assignHDFtimes(file_list_dict)
return (file_list_dict, xml_file_dict)
| [
"[email protected]"
] | |
67356b560bdcc2411922d7c19c8c1593edebbd9e | 7cbdb437946c7c79ca514f52c352ae3bfe5b1aaf | /recipe/tests.py | ee043909e2b45e6e4aff36e23350a72f8a89a11c | [] | no_license | SamirIngley/Recipes-Blog | 435311fab3cb27f0b288729805a2aed50bfb9a3f | 7b12f50738b4f65557c0e1f92f905ddd83de2ebf | refs/heads/master | 2020-11-26T22:27:49.239843 | 2019-12-26T01:21:19 | 2019-12-26T01:21:19 | 229,217,552 | 0 | 0 | null | 2020-06-06T01:05:46 | 2019-12-20T07:55:06 | Python | UTF-8 | Python | false | false | 1,838 | py | # wiki/tests.py
from django.test import TestCase
from django.contrib.auth.models import User
from recipe.models import Page
class RecipeTestCase(TestCase):
def test_true_is_true(self):
""" Tests if True is equal to True. Should always pass. """
self.assertEqual(True, True)
def test_page_slugify_on_save(self):
""" Tests the slug generated when saving a Page. """
# Author is a required field in our model.
# Create a user for this test and save it to the test database.
user = User()
user.save()
# Create and save a new page to the test database.
page = Page(title="My Test Page", content="test", author=user)
page.save()
# Make sure the slug that was generated in Page.save()
# matches what we think it should be.
self.assertEqual(page.slug, "my-test-page")
class PageListViewTests(TestCase):
def test_multiple_pages(self):
# Make some test data to be displayed on the page.
user = User.objects.create()
Page.objects.create(title="My Test Page", content="test", author=user)
Page.objects.create(title="Another Test Page", content="test", author=user)
# Issue a GET request to the MakeWiki homepage.
# When we make a request, we get a response back.
response = self.client.get('/')
# Check that the response is 200 OK.
self.assertEqual(response.status_code, 200)
# Check that the number of pages passed to the template
# matches the number of pages we have in the database.
responses = response.context['pages']
self.assertEqual(len(responses), 2)
self.assertQuerysetEqual(
responses,
['<Page: My Test Page>', '<Page: Another Test Page>'],
ordered=False
) | [
"[email protected]"
] | |
90d4aab70505bc32d8460fdc76a1d6a0bc6b0724 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/compute/v20191201/get_log_analytic_export_throttled_requests.py | 3a6f0ea7758156897c6c5bbd943793b12d7e02fe | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 3,608 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetLogAnalyticExportThrottledRequestsResult',
'AwaitableGetLogAnalyticExportThrottledRequestsResult',
'get_log_analytic_export_throttled_requests',
]
@pulumi.output_type
class GetLogAnalyticExportThrottledRequestsResult:
"""
LogAnalytics operation status response
"""
def __init__(__self__, properties=None):
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def properties(self) -> 'outputs.LogAnalyticsOutputResponseResult':
"""
LogAnalyticsOutput
"""
return pulumi.get(self, "properties")
class AwaitableGetLogAnalyticExportThrottledRequestsResult(GetLogAnalyticExportThrottledRequestsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLogAnalyticExportThrottledRequestsResult(
properties=self.properties)
def get_log_analytic_export_throttled_requests(blob_container_sas_uri: Optional[str] = None,
from_time: Optional[str] = None,
group_by_operation_name: Optional[bool] = None,
group_by_resource_name: Optional[bool] = None,
group_by_throttle_policy: Optional[bool] = None,
location: Optional[str] = None,
to_time: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLogAnalyticExportThrottledRequestsResult:
"""
Use this data source to access information about an existing resource.
:param str blob_container_sas_uri: SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to.
:param str from_time: From time of the query
:param bool group_by_operation_name: Group query result by Operation Name.
:param bool group_by_resource_name: Group query result by Resource Name.
:param bool group_by_throttle_policy: Group query result by Throttle Policy applied.
:param str location: The location upon which virtual-machine-sizes is queried.
:param str to_time: To time of the query
"""
__args__ = dict()
__args__['blobContainerSasUri'] = blob_container_sas_uri
__args__['fromTime'] = from_time
__args__['groupByOperationName'] = group_by_operation_name
__args__['groupByResourceName'] = group_by_resource_name
__args__['groupByThrottlePolicy'] = group_by_throttle_policy
__args__['location'] = location
__args__['toTime'] = to_time
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:compute/v20191201:getLogAnalyticExportThrottledRequests', __args__, opts=opts, typ=GetLogAnalyticExportThrottledRequestsResult).value
return AwaitableGetLogAnalyticExportThrottledRequestsResult(
properties=__ret__.properties)
| [
"[email protected]"
] | |
566b632913cca4028b7dccc5d3de8e7c512bb74d | de33ba7be349eed5e2a1fc3f2bd9fce5bfdb9f13 | /phenocube/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 97b8ed608dac27cf268b983335667de73c67fb65 | [
"MIT"
] | permissive | SteveMHill/phenocube-py | 9bebf239e24af3f97e59b080560228605e6611c5 | cb262aef1c0925efd2e955170bacd2989da03769 | refs/heads/main | 2023-02-24T03:35:11.461869 | 2020-12-22T12:15:22 | 2020-12-22T12:15:22 | 334,703,261 | 0 | 0 | MIT | 2021-01-31T16:37:21 | 2021-01-31T16:36:47 | null | UTF-8 | Python | false | false | 279,207 | py | # -*- coding: utf-8 -*-
# module pyparsing.py
#
# Copyright (c) 2003-2019 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pip._vendor.pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of '+', '|' and '^' operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
__version__ = "2.4.7"
__versionTime__ = "30 Mar 2020 00:43 UTC"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
from operator import itemgetter
import itertools
from functools import wraps
from contextlib import contextmanager
try:
# Python 3
from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
# Python 3
from collections.abc import Iterable
from collections.abc import MutableMapping, Mapping
except ImportError:
# Python 2.7
from collections import Iterable
from collections import MutableMapping, Mapping
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
try:
from types import SimpleNamespace
except ImportError:
class SimpleNamespace:
pass
# version compatibility configuration
__compat__ = SimpleNamespace()
__compat__.__doc__ = """
A cross-version compatibility configuration for pyparsing features that will be
released in a future version. By setting values in this configuration to True,
those features can be enabled in prior versions for compatibility development
and testing.
- collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping
of results names when an And expression is nested within an Or or MatchFirst; set to
True to enable bugfix released in pyparsing 2.3.0, or False to preserve
pre-2.3.0 handling of named results
"""
__compat__.collect_all_And_tokens = True
__diag__ = SimpleNamespace()
__diag__.__doc__ = """
Diagnostic configuration (all default to False)
- warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results
name is defined on a MatchFirst or Or expression with one or more And subexpressions
(only warns if __compat__.collect_all_And_tokens is False)
- warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names
- warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined
with a results name, but has no contents defined
- warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is
incorrectly called with multiple str arguments
- enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent
calls to ParserElement.setName()
"""
__diag__.warn_multiple_tokens_in_named_alternation = False
__diag__.warn_ungrouped_named_tokens_in_collection = False
__diag__.warn_name_set_on_empty_Forward = False
__diag__.warn_on_multiple_string_args_to_oneof = False
__diag__.enable_debug_on_named_expressions = False
__diag__._all_names = [
nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")
]
def _enable_all_warnings():
__diag__.warn_multiple_tokens_in_named_alternation = True
__diag__.warn_ungrouped_named_tokens_in_collection = True
__diag__.warn_name_set_on_empty_Forward = True
__diag__.warn_on_multiple_string_args_to_oneof = True
__diag__.enable_all_warnings = _enable_all_warnings
__all__ = [
"__version__",
"__versionTime__",
"__author__",
"__compat__",
"__diag__",
"And",
"CaselessKeyword",
"CaselessLiteral",
"CharsNotIn",
"Combine",
"Dict",
"Each",
"Empty",
"FollowedBy",
"Forward",
"GoToColumn",
"Group",
"Keyword",
"LineEnd",
"LineStart",
"Literal",
"PrecededBy",
"MatchFirst",
"NoMatch",
"NotAny",
"OneOrMore",
"OnlyOnce",
"Optional",
"Or",
"ParseBaseException",
"ParseElementEnhance",
"ParseException",
"ParseExpression",
"ParseFatalException",
"ParseResults",
"ParseSyntaxException",
"ParserElement",
"QuotedString",
"RecursiveGrammarException",
"Regex",
"SkipTo",
"StringEnd",
"StringStart",
"Suppress",
"Token",
"TokenConverter",
"White",
"Word",
"WordEnd",
"WordStart",
"ZeroOrMore",
"Char",
"alphanums",
"alphas",
"alphas8bit",
"anyCloseTag",
"anyOpenTag",
"cStyleComment",
"col",
"commaSeparatedList",
"commonHTMLEntity",
"countedArray",
"cppStyleComment",
"dblQuotedString",
"dblSlashComment",
"delimitedList",
"dictOf",
"downcaseTokens",
"empty",
"hexnums",
"htmlComment",
"javaStyleComment",
"line",
"lineEnd",
"lineStart",
"lineno",
"makeHTMLTags",
"makeXMLTags",
"matchOnlyAtCol",
"matchPreviousExpr",
"matchPreviousLiteral",
"nestedExpr",
"nullDebugAction",
"nums",
"oneOf",
"opAssoc",
"operatorPrecedence",
"printables",
"punc8bit",
"pythonStyleComment",
"quotedString",
"removeQuotes",
"replaceHTMLEntity",
"replaceWith",
"restOfLine",
"sglQuotedString",
"srange",
"stringEnd",
"stringStart",
"traceParseAction",
"unicodeString",
"upcaseTokens",
"withAttribute",
"indentedBlock",
"originalTextFor",
"ungroup",
"infixNotation",
"locatedExpr",
"withClass",
"CloseMatch",
"tokenMap",
"pyparsing_common",
"pyparsing_unicode",
"unicode_set",
"conditionAsParseAction",
"re",
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
unicode = str
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [
sum,
len,
sorted,
reversed,
list,
tuple,
set,
any,
all,
min,
max,
]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode
friendly. It first tries str(obj). If that fails with
a UnicodeEncodeError, then it tries unicode(obj). It then
< returns the unicode object | encodes it with the default
encoding | ... >.
"""
if isinstance(obj, unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), "xmlcharrefreplace")
xmlcharref = Regex(r"&#\d+;")
xmlcharref.setParseAction(lambda t: "\\u" + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__, fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = "&><\"'"
to_symbols = ("&" + s + ";" for s in "amp gt lt quot apos".split())
for from_, to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
def conditionAsParseAction(fn, message=None, fatal=False):
msg = message if message is not None else "failed user-defined condition"
exc_type = ParseFatalException if fatal else ParseException
fn = _trim_arity(fn)
@wraps(fn)
def pa(s, l, t):
if not bool(fn(s, l, t)):
raise exc_type(s, l, msg)
return pa
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(self, pstr, loc=0, msg=None, elem=None):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__(self, aname):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if aname == "lineno":
return lineno(self.loc, self.pstr)
elif aname in ("col", "column"):
return col(self.loc, self.pstr)
elif aname == "line":
return line(self.loc, self.pstr)
else:
raise AttributeError(aname)
def __str__(self):
if self.pstr:
if self.loc >= len(self.pstr):
foundstr = ", found end of text"
else:
foundstr = (", found %r" % self.pstr[self.loc : self.loc + 1]).replace(
r"\\", "\\"
)
else:
foundstr = ""
return "%s%s (at char %d), (line:%d, col:%d)" % (
self.msg,
foundstr,
self.loc,
self.lineno,
self.column,
)
def __repr__(self):
return _ustr(self)
def markInputline(self, markerString=">!<"):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join(
(line_str[:line_column], markerString, line_str[line_column:])
)
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
@staticmethod
def explain(exc, depth=16):
"""
Method to take an exception and translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- exc - exception raised during parsing (need not be a ParseException, in support
of Python exceptions that might be raised in a parse action)
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
Note: the diagnostic output will include string representations of the expressions
that failed to parse. These representations will be more helpful if you use `setName` to
give identifiable names to your expressions. Otherwise they will use the default string
forms, which may be cryptic to read.
explain() is only supported under Python 3.
"""
import inspect
if depth is None:
depth = sys.getrecursionlimit()
ret = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(" " * (exc.col - 1) + "^")
ret.append("{0}: {1}".format(type(exc).__name__, exc))
if depth > 0:
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen = set()
for i, ff in enumerate(callers[-depth:]):
frm = ff[0]
f_self = frm.f_locals.get("self", None)
if isinstance(f_self, ParserElement):
if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"):
continue
if f_self in seen:
continue
seen.add(f_self)
self_type = type(f_self)
ret.append(
"{0}.{1} - {2}".format(
self_type.__module__, self_type.__name__, f_self
)
)
elif f_self is not None:
self_type = type(f_self)
ret.append(
"{0}.{1}".format(self_type.__module__, self_type.__name__)
)
else:
code = frm.f_code
if code.co_name in ("wrapper", "<module>"):
continue
ret.append("{0}".format(code.co_name))
depth -= 1
if not depth:
break
return "\n".join(ret)
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like :class:`ParseFatalException`, but thrown internally
when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
that parsing is to stop immediately because an unbacktrackable
syntax error has been found.
"""
pass
# ~ class ReparseException(ParseBaseException):
# ~ """Experimental class - parse actions can raise this exception to cause
# ~ pyparsing to reparse the input string:
# ~ - with a modified input string, and/or
# ~ - with a modified start location
# ~ Set the values of the ReparseException in the constructor, and raise the
# ~ exception in a parse action to cause pyparsing to use the new string/location.
# ~ Setting the values as None causes no change to be made.
# ~ """
# ~ def __init_( self, newstring, restartLoc ):
# ~ self.newParseText = newstring
# ~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by :class:`ParserElement.validate` if the
grammar could be improperly recursive
"""
def __init__(self, parseElementList):
self.parseElementTrace = parseElementList
def __str__(self):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self, p1, p2):
self.tup = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self, i):
self.tup = (self.tup[0], i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to
the parsed data:
- as a list (``len(results)``)
- by list index (``results[0], results[1]``, etc.)
- by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`)
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(
self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name, int):
name = _ustr(
name
) # will always return a str, but use _ustr for consistency
self.__name = name
if not (
isinstance(toklist, (type(None), basestring, list))
and toklist in (None, "", [])
):
if isinstance(toklist, basestring):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(
ParseResults(toklist.__toklist), 0
)
else:
self[name] = _ParseResultsWithOffset(
ParseResults(toklist[0]), 0
)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError, TypeError, IndexError):
self[name] = toklist
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([v[0] for v in self.__tokdict[i]])
def __setitem__(self, k, v, isinstance=isinstance):
if isinstance(v, _ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]
sub = v[0]
elif isinstance(k, (int, slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k, list()) + [
_ParseResultsWithOffset(v, 0)
]
sub = v
if isinstance(sub, ParseResults):
sub.__parent = wkref(self)
def __delitem__(self, i):
if isinstance(i, (int, slice)):
mylen = len(self.__toklist)
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i + 1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position - (position > j)
)
else:
del self.__tokdict[i]
def __contains__(self, k):
return k in self.__tokdict
def __len__(self):
return len(self.__toklist)
def __bool__(self):
return not not self.__toklist
__nonzero__ = __bool__
def __iter__(self):
return iter(self.__toklist)
def __reversed__(self):
return iter(self.__toklist[::-1])
def _iterkeys(self):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues(self):
return (self[k] for k in self._iterkeys())
def _iteritems(self):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys."""
values = _itervalues
"""Returns an iterator of all named result values."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys(self):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values(self):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items(self):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys(self):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop(self, *args, **kwargs):
"""
Removes and returns item at specified index (default= ``last``).
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
passed no argument or an integer argument, it will use ``list``
semantics and pop tokens from the list of parsed tokens. If passed
a non-integer argument (most likely a string), it will use ``dict``
semantics and pop the corresponding value from any defined results
names. A second default return value argument is supported, just as in
``dict.pop()``.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k, v in kwargs.items():
if k == "default":
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given ``defaultValue`` or ``None`` if no
``defaultValue`` is specified.
Similar to ``dict.get()``.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert(self, index, insStr):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to ``list.insert()``.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(
value, position + (position > index)
)
def append(self, item):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend(self, itemseq):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self.__iadd__(itemseq)
else:
self.__toklist.extend(itemseq)
def clear(self):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__(self, name):
try:
return self[name]
except KeyError:
return ""
def __add__(self, other):
ret = self.copy()
ret += other
return ret
def __iadd__(self, other):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a < 0 else a + offset
otheritems = other.__tokdict.items()
otherdictitems = [
(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
for k, vlist in otheritems
for v in vlist
]
for k, v in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update(other.__accumNames)
return self
def __radd__(self, other):
if isinstance(other, int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__(self):
return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict))
def __str__(self):
return (
"["
+ ", ".join(
_ustr(i) if isinstance(i, ParseResults) else repr(i)
for i in self.__toklist
)
+ "]"
)
def _asStringList(self, sep=""):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(_ustr(item))
return out
def asList(self):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [
res.asList() if isinstance(res, ParseResults) else res
for res in self.__toklist
]
def asDict(self):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k, toItem(v)) for k, v in item_fn())
def copy(self):
"""
Returns a new copy of a :class:`ParseResults` object.
"""
ret = ParseResults(self.__toklist)
ret.__tokdict = dict(self.__tokdict.items())
ret.__parent = self.__parent
ret.__accumNames.update(self.__accumNames)
ret.__name = self.__name
return ret
def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict(
(v[1], k) for (k, vlist) in self.__tokdict.items() for v in vlist
)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [nl, indent, "<", selfTag, ">"]
for i, res in enumerate(self.__toklist):
if isinstance(res, ParseResults):
if i in namedItems:
out += [
res.asXML(
namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted,
)
]
else:
out += [
res.asXML(
None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted,
)
]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [
nl,
nextLevelIndent,
"<",
resTag,
">",
xmlBodyText,
"</",
resTag,
">",
]
out += [nl, indent, "</", selfTag, ">"]
return "".join(out)
def __lookup(self, sub):
for k, vlist in self.__tokdict.items():
for v, loc in vlist:
if sub is v:
return k
return None
def getName(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (
len(self) == 1
and len(self.__tokdict) == 1
and next(iter(self.__tokdict.values()))[0][1] in (0, -1)
):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent="", full=True, include_list=True, _depth=0):
"""
Diagnostic method for listing out the contents of
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
that this string can be embedded in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = "\n"
if include_list:
out.append(indent + _ustr(self.asList()))
else:
out.append("")
if full:
if self.haskeys():
items = sorted((str(k), v) for k, v in self.items())
for k, v in items:
if out:
out.append(NL)
out.append("%s%s- %s: " % (indent, (" " * _depth), k))
if isinstance(v, ParseResults):
if v:
out.append(
v.dump(
indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1,
)
)
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv, ParseResults) for vv in self):
v = self
for i, vv in enumerate(v):
if isinstance(vv, ParseResults):
out.append(
"\n%s%s[%d]:\n%s%s%s"
% (
indent,
(" " * (_depth)),
i,
indent,
(" " * (_depth + 1)),
vv.dump(
indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1,
),
)
)
else:
out.append(
"\n%s%s[%d]:\n%s%s%s"
% (
indent,
(" " * (_depth)),
i,
indent,
(" " * (_depth + 1)),
_ustr(vv),
)
)
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the
`pprint <https://docs.python.org/3/library/pprint.html>`_ module.
Accepts additional positional or keyword args as defined for
`pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return (
self.__toklist,
(
self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name,
),
)
def __setstate__(self, state):
self.__toklist = state[0]
self.__tokdict, par, inAccumNames, self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return dir(type(self)) + list(self.keys())
@classmethod
def from_dict(cls, other, name=None):
"""
Helper classmethod to construct a ParseResults from a dict, preserving the
name-value relations as results names. If an optional 'name' argument is
given, a nested ParseResults will be returned
"""
def is_iterable(obj):
try:
iter(obj)
except Exception:
return False
else:
if PY_3:
return not isinstance(obj, (str, bytes))
else:
return not isinstance(obj, basestring)
ret = cls([])
for k, v in other.items():
if isinstance(v, Mapping):
ret += cls.from_dict(v, name=k)
else:
ret += cls([v], name=k, asList=is_iterable(v))
if name is not None:
ret = cls([ret], name=name)
return ret
MutableMapping.register(ParseResults)
def col(loc, strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parseString` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
def lineno(loc, strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note - the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`ParserElement.parseString`
for more information on parsing strings containing ``<TAB>`` s, and
suggested methods to maintain a consistent view of the parsed string, the
parse location, and line and column positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
def line(loc, strg):
"""Returns the line of text containing loc within a string, counting newlines as line separators."""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR + 1 : nextCR]
else:
return strg[lastCR + 1 :]
def _defaultStartDebugAction(instring, loc, expr):
print(
(
"Match "
+ _ustr(expr)
+ " at loc "
+ _ustr(loc)
+ "(%d,%d)" % (lineno(loc, instring), col(loc, instring))
)
)
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
print("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction(instring, loc, expr, exc):
print("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
# ~ 'decorator to trim function calls to match the arity of the target'
# ~ def _trim_arity(func, maxargs=3):
# ~ if func in singleArgBuiltins:
# ~ return lambda s,l,t: func(t)
# ~ limit = 0
# ~ foundArity = False
# ~ def wrapper(*args):
# ~ nonlocal limit,foundArity
# ~ while 1:
# ~ try:
# ~ ret = func(*args[limit:])
# ~ foundArity = True
# ~ return ret
# ~ except TypeError:
# ~ if limit == maxargs or foundArity:
# ~ raise
# ~ limit += 1
# ~ continue
# ~ return wrapper
# this version is Python 2.x-3.x cross-compatible
"decorator to trim function calls to match the arity of the target"
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s, l, t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3, 5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3, 5, 0) else -2
frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]
return [frame_summary[:2]]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0] :])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
try:
del tb
except NameError:
pass
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars(chars):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
@classmethod
def _trim_traceback(cls, tb):
while tb.tb_next:
tb = tb.tb_next
return tb
def __init__(self, savelist=False):
self.parseAction = list()
self.failAction = None
# ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = (None, None, None) # custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy(self):
"""
Make a copy of this :class:`ParserElement`. Useful for defining
different parse actions for the same parsing pattern, using copies of
the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of ``expr.copy()`` is just ``expr()``::
integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
"""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName(self, name):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if __diag__.enable_debug_on_named_expressions:
self.setDebug()
return self
def setResultsName(self, name, listAllMatches=False):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original :class:`ParserElement` object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
``expr("name")`` in place of ``expr.setResultsName("name")``
- see :class:`__call__`.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
return self._setResultsName(name, listAllMatches)
def _setResultsName(self, name, listAllMatches=False):
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches = True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self, breakFlag=True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set ``breakFlag`` to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
# this call to pdb.set_trace() is intentional, not a checkin error
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction(self, *fns, **kwargs):
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` ,
``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
If None is passed as the parse action, all previously added parse actions for this
expression are cleared.
Optional keyword arguments:
- callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`parseString for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
if list(fns) == [
None,
]:
self.parseAction = []
else:
if not all(callable(fn) for fn in fns):
raise TypeError("parse actions must be callable")
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction(self, *fns, **kwargs):
"""
Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`.
See examples in :class:`copy`.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
:class:`setParseAction` for function call signatures. Unlike ``setParseAction``,
functions passed to ``addCondition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
for fn in fns:
self.parseAction.append(
conditionAsParseAction(
fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False)
)
)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction(self, fn):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
``fn(s, loc, expr, err)`` where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw :class:`ParseFatalException`
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc, dummy = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# ~ @profile
def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
TRY, MATCH, FAIL = 0, 1, 2
debugging = self.debug # and doActions)
if debugging or self.failAction:
# ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring)))
if self.debugActions[TRY]:
self.debugActions[TRY](instring, loc, self)
try:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except Exception as err:
# ~ print ("Exception raised:", err)
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(
tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
try:
tokens = fn(instring, tokensStart, retTokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
exc.__cause__ = parse_action_exc
raise exc
if tokens is not None and tokens is not retTokens:
retTokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
except Exception as err:
# ~ print "Exception raised in user parse action:", err
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
try:
tokens = fn(instring, tokensStart, retTokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
exc.__cause__ = parse_action_exc
raise exc
if tokens is not None and tokens is not retTokens:
retTokens = ParseResults(
tokens,
self.resultsName,
asList=self.saveAsList
and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults,
)
if debugging:
# ~ print ("Matched", self, "->", retTokens.asList())
if self.debugActions[MATCH]:
self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens)
return loc, retTokens
def tryParse(self, instring, loc):
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(cache) > size:
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(key_fifo) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = (
{}
) # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return value[0], value[1].copy()
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(
ParserElement.packrat_cache_stats
)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default= ``128``) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method :class:`ParserElement.enablePackrat`.
For best results, call ``enablePackrat()`` immediately after
importing pyparsing.
Example::
from pip._vendor import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString(self, instring, parseAll=False):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
Returns the parsed data as a :class:`ParseResults` object, which may be
accessed as a list, or as a dict or object with attributes if the given parser
includes results names.
If you want the grammar to require that the entire input string be
successfully parsed, then set ``parseAll`` to True (equivalent to ending
the grammar with ``StringEnd()``).
Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the ``loc`` argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling ``parseWithTabs`` on your grammar before calling ``parseString``
(see :class:`parseWithTabs`)
- define your parse action using the full ``(s, loc, toks)`` signature, and
reference the input string using the parse action's ``s`` argument
- explictly expand the tabs in your input string before calling
``parseString``
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
# ~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, "__traceback__", None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
else:
return tokens
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
``maxMatches`` argument, to clip scanning after 'n' matches are found. If
``overlap`` is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See :class:`parseString` for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens, start, end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, "__traceback__", None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
def transformString(self, instring):
"""
Extension to :class:`scanString`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transformString``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transformString()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transformString()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t, s, e in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, "__traceback__", None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
def searchString(self, instring, maxMatches=_MAX_INT):
"""
Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults(
[t for t, s, e in self.scanString(instring, maxMatches)]
)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, "__traceback__", None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional ``maxsplit`` argument, to limit the number of splits;
and the optional ``includeSeparators`` argument (default= ``False``), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t, s, e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other):
"""
Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement
converts them to :class:`Literal`s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
prints::
Hello, World! -> ['Hello', ',', 'World', '!']
``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
Literal('start') + ... + Literal('end')
is equivalent to:
Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
Note that the skipped text is returned with '_skipped' as a results name,
and to support having multiple skips in the same parser, the value returned is
a list of all skipped text.
"""
if other is Ellipsis:
return _PendingSkip(self)
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return And([self, other])
def __radd__(self, other):
"""
Implementation of + operator when left operand is not a :class:`ParserElement`
"""
if other is Ellipsis:
return SkipTo(self)("_skipped*") + self
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns :class:`And` with error stop
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return self + And._ErrorStop() + other
def __rsub__(self, other):
"""
Implementation of - operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other - self
def __mul__(self, other):
"""
Implementation of * operator, allows use of ``expr * 3`` in place of
``expr + expr + expr``. Expressions may also me multiplied by a 2-integer
tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
may also include ``None`` as in:
- ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
- ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
Note that ``expr*(None, n)`` does not raise an exception if
more than n exprs exist in the input stream; that is,
``expr*(None, n)`` does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
``expr*(None, n) + ~expr``
"""
if other is Ellipsis:
other = (0, None)
elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
other = ((0,) + other[1:] + (None,))[:2]
if isinstance(other, int):
minElements, optElements = other, 0
elif isinstance(other, tuple):
other = tuple(o if o is not Ellipsis else None for o in other)
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self * other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError(
"cannot multiply 'ParserElement' and ('%s', '%s') objects",
type(other[0]),
type(other[1]),
)
else:
raise TypeError(
"cannot multiply 'ParserElement' and '%s' objects", type(other)
)
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError(
"second tuple value must be greater or equal to first tuple value"
)
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0, 0)")
if optElements:
def makeOptionalList(n):
if n > 1:
return Optional(self + makeOptionalList(n - 1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self] * minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self] * minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
"""
Implementation of | operator - returns :class:`MatchFirst`
"""
if other is Ellipsis:
return _PendingSkip(self, must_skip=True)
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return MatchFirst([self, other])
def __ror__(self, other):
"""
Implementation of | operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other | self
def __xor__(self, other):
"""
Implementation of ^ operator - returns :class:`Or`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return Or([self, other])
def __rxor__(self, other):
"""
Implementation of ^ operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other ^ self
def __and__(self, other):
"""
Implementation of & operator - returns :class:`Each`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return Each([self, other])
def __rand__(self, other):
"""
Implementation of & operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn(
"Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning,
stacklevel=2,
)
return None
return other & self
def __invert__(self):
"""
Implementation of ~ operator - returns :class:`NotAny`
"""
return NotAny(self)
def __iter__(self):
# must implement __iter__ to override legacy use of sequential access to __getitem__ to
# iterate over a sequence
raise TypeError("%r object is not iterable" % self.__class__.__name__)
def __getitem__(self, key):
"""
use ``[]`` indexing notation as a short form for expression repetition:
- ``expr[n]`` is equivalent to ``expr*n``
- ``expr[m, n]`` is equivalent to ``expr*(m, n)``
- ``expr[n, ...]`` or ``expr[n,]`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr[..., n]`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
- ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
``None`` may be used in place of ``...``.
Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
if more than ``n`` ``expr``s exist in the input stream. If this behavior is
desired, then write ``expr[..., n] + ~expr``.
"""
# convert single arg keys to tuples
try:
if isinstance(key, str):
key = (key,)
iter(key)
except TypeError:
key = (key, key)
if len(key) > 2:
warnings.warn(
"only 1 or 2 index arguments supported ({0}{1})".format(
key[:5], "... [{0}]".format(len(key)) if len(key) > 5 else ""
)
)
# clip to 2 elements
ret = self * tuple(key[:2])
return ret
def __call__(self, name=None):
"""
Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.
If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be
passed as ``True``.
If ``name` is omitted, same as calling :class:`copy`.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
"""
if name is not None:
return self._setResultsName(name)
else:
return self.copy()
def suppress(self):
"""
Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def leaveWhitespace(self):
"""
Disables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars(self, chars):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs(self):
"""
Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string.
Must be called before ``parseString`` when the input grammar contains elements that
match ``<TAB>`` characters.
"""
self.keepTabs = True
return self
def ignore(self, other):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def setDebugActions(self, startAction, successAction, exceptionAction):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (
startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction,
)
self.debug = True
return self
def setDebug(self, flag=True):
"""
Enable display of debugging messages while doing pattern matching.
Set ``flag`` to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using :class:`setDebugActions`. Prior to attempting
to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``.
"""
if flag:
self.setDebugActions(
_defaultStartDebugAction,
_defaultSuccessDebugAction,
_defaultExceptionDebugAction,
)
else:
self.debug = False
return self
def __str__(self):
return self.name
def __repr__(self):
return _ustr(self)
def streamline(self):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion(self, parseElementList):
pass
def validate(self, validateTrace=None):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion([])
def parseFile(self, file_or_filename, parseAll=False):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, "__traceback__", None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, basestring):
return self.matches(other)
elif isinstance(other, ParserElement):
return vars(self) == vars(other)
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return id(self)
def __req__(self, other):
return self == other
def __rne__(self, other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(
self,
tests,
parseAll=True,
comment="#",
fullDump=True,
printResults=True,
failureTests=False,
postParse=None,
file=None,
):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
- comment - (default= ``'#'``) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default= ``True``) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default= ``True``) prints test output to stdout
- failureTests - (default= ``False``) indicates if these tests are expected to fail parsing
- postParse - (default= ``None``) optional callback for successful parse results; called as
`fn(test_string, parse_results)` and returns a string to be added to the test output
- file - (default=``None``) optional file-like object to which test output will be written;
if None, will default to ``sys.stdout``
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if ``failureTests`` is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
if file is None:
file = sys.stdout
print_ = file.write
allResults = []
comments = []
success = True
NL = Literal(r"\n").addParseAction(replaceWith("\n")).ignore(quotedString)
BOM = u"\ufeff"
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ["\n" + "\n".join(comments) if comments else "", t]
comments = []
try:
# convert newline marks to actual newlines, and strip leading BOM if present
t = NL.transformString(t.lstrip(BOM))
result = self.parseString(t, parseAll=parseAll)
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if "\n" in t:
out.append(line(pe.loc, t))
out.append(" " * (col(pe.loc, t) - 1) + "^" + fatal)
else:
out.append(" " * pe.loc + "^" + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
else:
success = success and not failureTests
if postParse is not None:
try:
pp_value = postParse(t, result)
if pp_value is not None:
if isinstance(pp_value, ParseResults):
out.append(pp_value.dump())
else:
out.append(str(pp_value))
else:
out.append(result.dump())
except Exception as e:
out.append(result.dump(full=fullDump))
out.append(
"{0} failed: {1}: {2}".format(
postParse.__name__, type(e).__name__, e
)
)
else:
out.append(result.dump(full=fullDump))
if printResults:
if fullDump:
out.append("")
print_("\n".join(out))
allResults.append((t, result))
return success, allResults
class _PendingSkip(ParserElement):
# internal placeholder class to hold a place were '...' is added to a parser element,
# once another ParserElement is added, this placeholder will be replaced with a SkipTo
def __init__(self, expr, must_skip=False):
super(_PendingSkip, self).__init__()
self.strRepr = str(expr + Empty()).replace("Empty", "...")
self.name = self.strRepr
self.anchor = expr
self.must_skip = must_skip
def __add__(self, other):
skipper = SkipTo(other).setName("...")("_skipped*")
if self.must_skip:
def must_skip(t):
if not t._skipped or t._skipped.asList() == [""]:
del t[0]
t.pop("_skipped", None)
def show_skip(t):
if t._skipped.asList()[-1:] == [""]:
skipped = t.pop("_skipped")
t["_skipped"] = "missing <" + repr(self.anchor) + ">"
return (
self.anchor + skipper().addParseAction(must_skip)
| skipper().addParseAction(show_skip)
) + other
return self.anchor + skipper + other
def __repr__(self):
return self.strRepr
def parseImpl(self, *args):
raise Exception(
"use of `...` expression without following SkipTo target expression"
)
class Token(ParserElement):
"""Abstract :class:`ParserElement` subclass, for defining atomic
matching patterns.
"""
def __init__(self):
super(Token, self).__init__(savelist=False)
class Empty(Token):
"""An empty token, will always match."""
def __init__(self):
super(Empty, self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__(self):
super(NoMatch, self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, doActions=True):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use :class:`CaselessLiteral`.
For keyword matching (force word break before and after the matched string),
use :class:`Keyword` or :class:`CaselessKeyword`.
"""
def __init__(self, matchString):
super(Literal, self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn(
"null string passed to Literal; use Empty() instead",
SyntaxWarning,
stacklevel=2,
)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: modify __class__ to select
# a parseImpl optimized for single-character check
if self.matchLen == 1 and type(self) is Literal:
self.__class__ = _SingleCharLiteral
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar and instring.startswith(
self.match, loc
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class _SingleCharLiteral(Literal):
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar:
return loc + 1, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is,
it must be immediately followed by a non-keyword character. Compare
with :class:`Literal`:
- ``Literal("if")`` will match the leading ``'if'`` in
``'ifAndOnlyIf'``.
- ``Keyword("if")`` will not; it will only match the leading
``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
Accepts two optional constructor arguments in addition to the
keyword string:
- ``identChars`` is a string of characters that would be valid
identifier characters, defaulting to all alphanumerics + "_" and
"$"
- ``caseless`` allows case-insensitive matching, default is ``False``.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use :class:`CaselessKeyword`.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(self, matchString, identChars=None, caseless=False):
super(Keyword, self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn(
"null string passed to Keyword; use Empty() instead",
SyntaxWarning,
stacklevel=2,
)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl(self, instring, loc, doActions=True):
if self.caseless:
if (
(instring[loc : loc + self.matchLen].upper() == self.caselessmatch)
and (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars
)
and (loc == 0 or instring[loc - 1].upper() not in self.identChars)
):
return loc + self.matchLen, self.match
else:
if instring[loc] == self.firstMatchChar:
if (
(self.matchLen == 1 or instring.startswith(self.match, loc))
and (
loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars
)
and (loc == 0 or instring[loc - 1] not in self.identChars)
):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword, self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars(chars):
"""Overrides the default Keyword chars"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for :class:`CaselessKeyword`.)
"""
def __init__(self, matchString):
super(CaselessLiteral, self).__init__(matchString.upper())
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl(self, instring, loc, doActions=True):
if instring[loc : loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of :class:`Keyword`.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for :class:`CaselessLiteral`.)
"""
def __init__(self, matchString, identChars=None):
super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)
class CloseMatch(Token):
"""A variation on :class:`Literal` which matches "close" matches,
that is, strings with at most 'n' mismatching characters.
:class:`CloseMatch` takes parameters:
- ``match_string`` - string to be matched
- ``maxMismatches`` - (``default=1``) maximum number of
mismatches allowed to count as a match
The results from a successful parse will contain the matched text
from the input string and the following named results:
- ``mismatches`` - a list of the positions within the
match_string where mismatches were found
- ``original`` - the original match_string used to compare
against the input string
If ``mismatches`` is an empty list, then the match was an exact
match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch, self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (
self.match_string,
self.maxMismatches,
)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl(self, instring, loc, doActions=True):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc, s_m in enumerate(
zip(instring[loc:maxloc], match_string)
):
src, mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results["original"] = match_string
results["mismatches"] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters, an
optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction. An optional ``excludeChars`` parameter can
list characters that might be found in the input ``bodyChars``
string; useful to define a word of all printables except for one or
two characters, for instance.
:class:`srange` is useful for defining custom character set strings
for defining ``Word`` expressions, using range notation from
regular expression character sets.
A common mistake is to use :class:`Word` to match a specific literal
string, as in ``Word("Address")``. Remember that :class:`Word`
uses the string argument to define *sets* of matchable characters.
This expression would match "Add", "AAA", "dAred", or any other word
made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
exact literal string, use :class:`Literal` or :class:`Keyword`.
pyparsing includes helper strings for building Words:
- :class:`alphas`
- :class:`nums`
- :class:`alphanums`
- :class:`hexnums`
- :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- accented, tilded, umlauted, etc.)
- :class:`punc8bit` (non-alphabetic characters in ASCII range
128-255 - currency, symbols, superscripts, diacriticals, etc.)
- :class:`printables` (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums + '-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__(
self,
initChars,
bodyChars=None,
min=1,
max=0,
exact=0,
asKeyword=False,
excludeChars=None,
):
super(Word, self).__init__()
if excludeChars:
excludeChars = set(excludeChars)
initChars = "".join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = "".join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars:
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if " " not in self.initCharsOrig + self.bodyCharsOrig and (
min == 1 and max == 0 and exact == 0
):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % (
re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),
)
else:
self.reString = "[%s][%s]*" % (
_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),
)
if self.asKeyword:
self.reString = r"\b" + self.reString + r"\b"
try:
self.re = re.compile(self.reString)
except Exception:
self.re = None
else:
self.re_match = self.re.match
self.__class__ = _WordRegex
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.initChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
elif self.asKeyword:
if (
start > 0
and instring[start - 1] in bodychars
or loc < instrlen
and instring[loc] in bodychars
):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(Word, self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s) > 4:
return s[:4] + "..."
else:
return s
if self.initCharsOrig != self.bodyCharsOrig:
self.strRepr = "W:(%s, %s)" % (
charsAsStr(self.initCharsOrig),
charsAsStr(self.bodyCharsOrig),
)
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class _WordRegex(Word):
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
class Char(_WordRegex):
"""A short-cut class for defining ``Word(characters, exact=1)``,
when defining a match of any single character in a string of
characters.
"""
def __init__(self, charset, asKeyword=False, excludeChars=None):
super(Char, self).__init__(
charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars
)
self.reString = "[%s]" % _escapeRegexRangeChars("".join(self.initChars))
if asKeyword:
self.reString = r"\b%s\b" % self.reString
self.re = re.compile(self.reString)
self.re_match = self.re.match
class Regex(Token):
r"""Token for matching strings that match a given regular
expression. Defined with string specifying the regular expression in
a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
If the given regex contains named groups (defined using ``(?P<name>...)``),
these will be preserved as named parse results.
If instead of the Python stdlib re module you wish to use a different RE module
(such as the `regex` module), you can replace it by either building your
Regex object with a compiled RE that was compiled using regex:
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
# use regex module instead of stdlib re module to construct a Regex using
# a compiled regular expression
import regex
parser = pp.Regex(regex.compile(r'[0-9]'))
"""
def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False):
"""The parameters ``pattern`` and ``flags`` are passed
to the ``re.compile()`` function as-is. See the Python
`re module <https://docs.python.org/3/library/re.html>`_ module for an
explanation of the acceptable patterns and flags.
"""
super(Regex, self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn(
"null string passed to Regex; use Empty() instead",
SyntaxWarning,
stacklevel=2,
)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn(
"invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning,
stacklevel=2,
)
raise
elif hasattr(pattern, "pattern") and hasattr(pattern, "match"):
self.re = pattern
self.pattern = self.reString = pattern.pattern
self.flags = flags
else:
raise TypeError(
"Regex may only be constructed with a string or a compiled RE object"
)
self.re_match = self.re.match
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = self.re_match("") is not None
self.asGroupList = asGroupList
self.asMatch = asMatch
if self.asGroupList:
self.parseImpl = self.parseImplAsGroupList
if self.asMatch:
self.parseImpl = self.parseImplAsMatch
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = ParseResults(result.group())
d = result.groupdict()
if d:
for k, v in d.items():
ret[k] = v
return loc, ret
def parseImplAsGroupList(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.groups()
return loc, ret
def parseImplAsMatch(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result
return loc, ret
def __str__(self):
try:
return super(Regex, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
def sub(self, repl):
r"""
Return Regex with an attached parse action to transform the parsed
result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
Example::
make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
print(make_html.transformString("h1:main title:"))
# prints "<h1>main title</h1>"
"""
if self.asGroupList:
warnings.warn(
"cannot use sub() with Regex(asGroupList=True)",
SyntaxWarning,
stacklevel=2,
)
raise SyntaxError()
if self.asMatch and callable(repl):
warnings.warn(
"cannot use sub() with a callable with Regex(asMatch=True)",
SyntaxWarning,
stacklevel=2,
)
raise SyntaxError()
if self.asMatch:
def pa(tokens):
return tokens[0].expand(repl)
else:
def pa(tokens):
return self.re.sub(repl, tokens[0])
return self.addParseAction(pa)
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the
quote delimiting string
- escChar - character to escape quotes, typically backslash
(default= ``None``)
- escQuote - special quote sequence to escape an embedded quote
string (such as SQL's ``""`` to escape an embedded ``"``)
(default= ``None``)
- multiline - boolean indicating whether quotes can span
multiple lines (default= ``False``)
- unquoteResults - boolean indicating whether the matched text
should be unquoted (default= ``True``)
- endQuoteChar - string of one or more characters defining the
end of the quote delimited string (default= ``None`` => same as
quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace
(``'\t'``, ``'\n'``, etc.) to actual whitespace
(default= ``True``)
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__(
self,
quoteChar,
escChar=None,
escQuote=None,
multiline=False,
unquoteResults=True,
endQuoteChar=None,
convertWhitespaceEscapes=True,
):
super(QuotedString, self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn(
"quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2
)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn(
"endQuoteChar cannot be the empty string",
SyntaxWarning,
stacklevel=2,
)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r"%s(?:[^%s%s]" % (
re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ""),
)
else:
self.flags = 0
self.pattern = r"%s(?:[^%s\n\r%s]" % (
re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ""),
)
if len(self.endQuoteChar) > 1:
self.pattern += (
"|(?:"
+ ")|(?:".join(
"%s[^%s]"
% (
re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]),
)
for i in range(len(self.endQuoteChar) - 1, 0, -1)
)
+ ")"
)
if escQuote:
self.pattern += r"|(?:%s)" % re.escape(escQuote)
if escChar:
self.pattern += r"|(?:%s.)" % re.escape(escChar)
self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
self.pattern += r")*%s" % re.escape(self.endQuoteChar)
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
self.re_match = self.re.match
except sre_constants.error:
warnings.warn(
"invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning,
stacklevel=2,
)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = (
instring[loc] == self.firstQuoteChar
and self.re_match(instring, loc)
or None
)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
if isinstance(ret, basestring):
# replace escaped whitespace
if "\\" in ret and self.convertWhitespaceEscapes:
ws_map = {
r"\t": "\t",
r"\n": "\n",
r"\f": "\f",
r"\r": "\r",
}
for wslit, wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__(self):
try:
return super(QuotedString, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (
self.quoteChar,
self.endQuoteChar,
)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given
set (will include whitespace in matched characters if not listed in
the provided exclusion set - see example). Defined with string
containing all disallowed characters, and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(self, notChars, min=1, max=0, exact=0):
super(CharsNotIn, self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use "
"Optional(CharsNotIn()) if zero-length char group is permitted"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = self.minLen == 0
self.mayIndexError = False
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and instring[loc] not in notchars:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally,
whitespace is ignored by pyparsing grammars. This class is included
when some whitespace structures are significant. Define with
a string containing the whitespace characters to be matched; default
is ``" \\t\\r\\n"``. Also takes optional ``min``,
``max``, and ``exact`` arguments, as defined for the
:class:`Word` class.
"""
whiteStrs = {
" ": "<SP>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
u"\u00A0": "<NBSP>",
u"\u1680": "<OGHAM_SPACE_MARK>",
u"\u180E": "<MONGOLIAN_VOWEL_SEPARATOR>",
u"\u2000": "<EN_QUAD>",
u"\u2001": "<EM_QUAD>",
u"\u2002": "<EN_SPACE>",
u"\u2003": "<EM_SPACE>",
u"\u2004": "<THREE-PER-EM_SPACE>",
u"\u2005": "<FOUR-PER-EM_SPACE>",
u"\u2006": "<SIX-PER-EM_SPACE>",
u"\u2007": "<FIGURE_SPACE>",
u"\u2008": "<PUNCTUATION_SPACE>",
u"\u2009": "<THIN_SPACE>",
u"\u200A": "<HAIR_SPACE>",
u"\u200B": "<ZERO_WIDTH_SPACE>",
u"\u202F": "<NNBSP>",
u"\u205F": "<MMSP>",
u"\u3000": "<IDEOGRAPHIC_SPACE>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White, self).__init__()
self.matchWhite = ws
self.setWhitespaceChars(
"".join(c for c in self.whiteChars if c not in self.matchWhite)
)
# ~ self.leaveWhitespace()
self.name = "".join(White.whiteStrs[c] for c in self.matchWhite)
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.matchWhite:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__(self):
super(_PositionToken, self).__init__()
self.name = self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for
tabular report scraping.
"""
def __init__(self, colno):
super(GoToColumn, self).__init__()
self.col = colno
def preParse(self, instring, loc):
if col(loc, instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while (
loc < instrlen
and instring[loc].isspace()
and col(loc, instring) != self.col
):
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc:newloc]
return newloc, ret
class LineStart(_PositionToken):
r"""Matches if current position is at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self):
super(LineStart, self).__init__()
self.errmsg = "Expected start of line"
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the
parse string
"""
def __init__(self):
super(LineEnd, self).__init__()
self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", ""))
self.errmsg = "Expected end of line"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse
string
"""
def __init__(self):
super(StringStart, self).__init__()
self.errmsg = "Expected start of text"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__(self):
super(StringEnd, self).__init__()
self.errmsg = "Expected end of text"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word,
and is not preceded by any character in a given set of
``wordChars`` (default= ``printables``). To emulate the
``\b`` behavior of regular expressions, use
``WordStart(alphanums)``. ``WordStart`` will also match at
the beginning of the string being parsed, or at the beginning of
a line.
"""
def __init__(self, wordChars=printables):
super(WordStart, self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
if (
instring[loc - 1] in self.wordChars
or instring[loc] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and is
not followed by any character in a given set of ``wordChars``
(default= ``printables``). To emulate the ``\b`` behavior of
regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
will also match at the end of the string being parsed, or at the end
of a line.
"""
def __init__(self, wordChars=printables):
super(WordEnd, self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True):
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (
instring[loc] in self.wordChars
or instring[loc - 1] not in self.wordChars
):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and
post-processing parsed tokens.
"""
def __init__(self, exprs, savelist=False):
super(ParseExpression, self).__init__(savelist)
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, basestring):
self.exprs = [self._literalStringClass(exprs)]
elif isinstance(exprs, ParserElement):
self.exprs = [exprs]
elif isinstance(exprs, Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if any(isinstance(expr, basestring) for expr in exprs):
exprs = (
self._literalStringClass(e) if isinstance(e, basestring) else e
for e in exprs
)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def append(self, other):
self.exprs.append(other)
self.strRepr = None
return self
def leaveWhitespace(self):
"""Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def __str__(self):
try:
return super(ParseExpression, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs))
return self.strRepr
def streamline(self):
super(ParseExpression, self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d)
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if len(self.exprs) == 2:
other = self.exprs[0]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = other.exprs[:] + [self.exprs[1]]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def validate(self, validateTrace=None):
tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion([])
def copy(self):
ret = super(ParseExpression, self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
def _setResultsName(self, name, listAllMatches=False):
if __diag__.warn_ungrouped_named_tokens_in_collection:
for e in self.exprs:
if isinstance(e, ParserElement) and e.resultsName:
warnings.warn(
"{0}: setting results name {1!r} on {2} expression "
"collides with {3!r} on contained expression".format(
"warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName,
),
stacklevel=3,
)
return super(ParseExpression, self)._setResultsName(name, listAllMatches)
class And(ParseExpression):
"""
Requires all given :class:`ParseExpression` s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the ``'+'`` operator.
May also be constructed using the ``'-'`` operator, which will
suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"), name_expr("name"), integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop, self).__init__(*args, **kwargs)
self.name = "-"
self.leaveWhitespace()
def __init__(self, exprs, savelist=True):
exprs = list(exprs)
if exprs and Ellipsis in exprs:
tmp = []
for i, expr in enumerate(exprs):
if expr is Ellipsis:
if i < len(exprs) - 1:
skipto_arg = (Empty() + exprs[i + 1]).exprs[-1]
tmp.append(SkipTo(skipto_arg)("_skipped*"))
else:
raise Exception(
"cannot construct And with sequence ending in ..."
)
else:
tmp.append(expr)
exprs[:] = tmp
super(And, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars(self.exprs[0].whiteChars)
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def streamline(self):
# collapse any _PendingSkip's
if self.exprs:
if any(
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
for e in self.exprs[:-1]
):
for i, e in enumerate(self.exprs[:-1]):
if e is None:
continue
if (
isinstance(e, ParseExpression)
and e.exprs
and isinstance(e.exprs[-1], _PendingSkip)
):
e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
self.exprs[i + 1] = None
self.exprs = [e for e in self.exprs if e is not None]
super(And, self).streamline()
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(
instring, loc, doActions, callPreParse=False
)
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, doActions)
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(
instring, len(instring), self.errmsg, self
)
else:
loc, exprtokens = e._parse(instring, loc, doActions)
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other) # And([self, other])
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the expression that matches the longest
string will be used. May be constructed using the ``'^'``
operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(Or, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self):
super(Or, self).streamline()
if __compat__.collect_all_And_tokens:
self.saveAsList = any(e.saveAsList for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse(instring, loc)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
# re-evaluate all matches in descending order of length of match, in case attached actions
# might change whether or how much they match of the input.
matches.sort(key=itemgetter(0), reverse=True)
if not doActions:
# no further conditions or parse actions to change the selection of
# alternative, so the first match will be the best match
best_expr = matches[0][1]
return best_expr._parse(instring, loc, doActions)
longest = -1, None
for loc1, expr1 in matches:
if loc1 <= longest[0]:
# already have a longer match than this one will deliver, we are done
return longest
try:
loc2, toks = expr1._parse(instring, loc, doActions)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
else:
if loc2 >= loc1:
return loc2, toks
# didn't match as much as before
elif loc2 > longest[0]:
longest = loc2, toks
if longest != (-1, None):
return longest
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ixor__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other) # Or([self, other])
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
def _setResultsName(self, name, listAllMatches=False):
if (
not __compat__.collect_all_And_tokens
and __diag__.warn_multiple_tokens_in_named_alternation
):
if any(isinstance(e, And) for e in self.exprs):
warnings.warn(
"{0}: setting results name {1!r} on {2} expression "
"may only return a single token for an And alternative, "
"in future will return the full list of tokens".format(
"warn_multiple_tokens_in_named_alternation",
name,
type(self).__name__,
),
stacklevel=3,
)
return super(Or, self)._setResultsName(name, listAllMatches)
class MatchFirst(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the first one listed is the one that will
match. May be constructed using the ``'|'`` operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(MatchFirst, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self):
super(MatchFirst, self).streamline()
if __compat__.collect_all_And_tokens:
self.saveAsList = any(e.saveAsList for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse(instring, loc, doActions)
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(
instring, len(instring), e.errmsg, self
)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(
instring, loc, "no defined alternatives to match", self
)
def __ior__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other) # MatchFirst([self, other])
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
def _setResultsName(self, name, listAllMatches=False):
if (
not __compat__.collect_all_And_tokens
and __diag__.warn_multiple_tokens_in_named_alternation
):
if any(isinstance(e, And) for e in self.exprs):
warnings.warn(
"{0}: setting results name {1!r} on {2} expression "
"may only return a single token for an And alternative, "
"in future will return the full list of tokens".format(
"warn_multiple_tokens_in_named_alternation",
name,
type(self).__name__,
),
stacklevel=3,
)
return super(MatchFirst, self)._setResultsName(name, listAllMatches)
class Each(ParseExpression):
"""Requires all given :class:`ParseExpression` s to be found, but in
any order. Expressions may be separated by whitespace.
May be constructed using the ``'&'`` operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__(self, exprs, savelist=True):
super(Each, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
self.saveAsList = True
def streamline(self):
super(Each, self).streamline()
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.opt1map = dict(
(id(e.expr), e) for e in self.exprs if isinstance(e, Optional)
)
opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)]
opt2 = [
e
for e in self.exprs
if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))
]
self.optionals = opt1 + opt2
self.multioptionals = [
e.expr for e in self.exprs if isinstance(e, ZeroOrMore)
]
self.multirequired = [
e.expr for e in self.exprs if isinstance(e, OneOrMore)
]
self.required = [
e
for e in self.exprs
if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))
]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse(instring, tmpLoc)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(
instring, loc, "Missing one or more required elements (%s)" % missing
)
# add any unmatched Optionals, in case they have default values defined
matchOrder += [
e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt
]
resultlist = []
for e in matchOrder:
loc, results = e._parse(instring, loc, doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class ParseElementEnhance(ParserElement):
"""Abstract subclass of :class:`ParserElement`, for combining and
post-processing parsed tokens.
"""
def __init__(self, expr, savelist=False):
super(ParseElementEnhance, self).__init__(savelist)
if isinstance(expr, basestring):
if issubclass(self._literalStringClass, Token):
expr = self._literalStringClass(expr)
else:
expr = self._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars(expr.whiteChars)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl(self, instring, loc, doActions=True):
if self.expr is not None:
return self.expr._parse(instring, loc, doActions, callPreParse=False)
else:
raise ParseException("", loc, self.errmsg, self)
def leaveWhitespace(self):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
else:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self):
super(ParseElementEnhance, self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr.checkRecursion(subRecCheckList)
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
try:
return super(ParseElementEnhance, self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr))
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression.
``FollowedBy`` does *not* advance the parsing position within
the input string, it only verifies that the specified parse
expression matches at the current position. ``FollowedBy``
always returns a null token list. If any results names are defined
in the lookahead expression, those *will* be returned for access by
name.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr):
super(FollowedBy, self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
# by using self._expr.parse and deleting the contents of the returned ParseResults list
# we keep any named results that were defined in the FollowedBy expression
_, ret = self.expr._parse(instring, loc, doActions=doActions)
del ret[:]
return loc, ret
class PrecededBy(ParseElementEnhance):
"""Lookbehind matching of the given parse expression.
``PrecededBy`` does not advance the parsing position within the
input string, it only verifies that the specified parse expression
matches prior to the current position. ``PrecededBy`` always
returns a null token list, but if a results name is defined on the
given expression, it is returned.
Parameters:
- expr - expression that must match prior to the current parse
location
- retreat - (default= ``None``) - (int) maximum number of characters
to lookbehind prior to the current parse location
If the lookbehind expression is a string, Literal, Keyword, or
a Word or CharsNotIn with a specified exact or maximum length, then
the retreat parameter is not required. Otherwise, retreat must be
specified to give a maximum number of characters to look back from
the current parse position for a lookbehind match.
Example::
# VB-style variable names with type prefixes
int_var = PrecededBy("#") + pyparsing_common.identifier
str_var = PrecededBy("$") + pyparsing_common.identifier
"""
def __init__(self, expr, retreat=None):
super(PrecededBy, self).__init__(expr)
self.expr = self.expr().leaveWhitespace()
self.mayReturnEmpty = True
self.mayIndexError = False
self.exact = False
if isinstance(expr, str):
retreat = len(expr)
self.exact = True
elif isinstance(expr, (Literal, Keyword)):
retreat = expr.matchLen
self.exact = True
elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
retreat = expr.maxLen
self.exact = True
elif isinstance(expr, _PositionToken):
retreat = 0
self.exact = True
self.retreat = retreat
self.errmsg = "not preceded by " + str(expr)
self.skipWhitespace = False
self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
def parseImpl(self, instring, loc=0, doActions=True):
if self.exact:
if loc < self.retreat:
raise ParseException(instring, loc, self.errmsg)
start = loc - self.retreat
_, ret = self.expr._parse(instring, start)
else:
# retreat specified a maximum lookbehind window, iterate
test_expr = self.expr + StringEnd()
instring_slice = instring[max(0, loc - self.retreat) : loc]
last_expr = ParseException(instring, loc, self.errmsg)
for offset in range(1, min(loc, self.retreat + 1) + 1):
try:
# print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
_, ret = test_expr._parse(
instring_slice, len(instring_slice) - offset
)
except ParseBaseException as pbe:
last_expr = pbe
else:
break
else:
raise last_expr
return loc, ret
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression.
``NotAny`` does *not* advance the parsing position within the
input string, it only verifies that the specified parse expression
does *not* match at the current position. Also, ``NotAny`` does
*not* skip over leading whitespace. ``NotAny`` always returns
a null token list. May be constructed using the '~' operator.
Example::
AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
# take care not to mistake keywords for identifiers
ident = ~(AND | OR | NOT) + Word(alphas)
boolean_term = Optional(NOT) + ident
# very crude boolean expression - to support parenthesis groups and
# operation hierarchy, use infixNotation
boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)
# integers that are followed by "." are actually floats
integer = Word(nums) + ~Char(".")
"""
def __init__(self, expr):
super(NotAny, self).__init__(expr)
# ~ self.leaveWhitespace()
self.skipWhitespace = (
False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
)
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__(self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = self._literalStringClass(ender)
self.stopOn(ender)
def stopOn(self, ender):
if isinstance(ender, basestring):
ender = self._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
return self
def parseImpl(self, instring, loc, doActions=True):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False)
try:
hasIgnoreExprs = not not self.ignoreExprs
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self_expr_parse(instring, preloc, doActions)
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
def _setResultsName(self, name, listAllMatches=False):
if __diag__.warn_ungrouped_named_tokens_in_collection:
for e in [self.expr] + getattr(self.expr, "exprs", []):
if isinstance(e, ParserElement) and e.resultsName:
warnings.warn(
"{0}: setting results name {1!r} on {2} expression "
"collides with {3!r} on contained expression".format(
"warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName,
),
stacklevel=3,
)
return super(_MultipleMatch, self)._setResultsName(name, listAllMatches)
class OneOrMore(_MultipleMatch):
"""Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to :class:`OneOrMore`
"""
def __init__(self, expr, stopOn=None):
super(ZeroOrMore, self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException, IndexError):
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
__optionalNotMatched = _NullToken()
def __init__(self, expr, default=__optionalNotMatched):
super(Optional, self).__init__(expr, savelist=False)
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)
except (ParseException, IndexError):
if self.defaultValue is not self.__optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([self.defaultValue])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [self.defaultValue]
else:
tokens = []
return loc, tokens
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched
expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default= ``False``) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default= ``None``) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default= ``None``) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__(self, other, include=False, ignore=None, failOn=None):
super(SkipTo, self).__init__(other)
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.saveAsList = False
if isinstance(failOn, basestring):
self.failOn = self._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = (
self.failOn.canParseNext if self.failOn is not None else None
)
self_ignoreExpr_tryParse = (
self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
)
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring, loc, doActions, callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the ``Forward``
variable using the '<<' operator.
Note: take care when assigning to ``Forward`` not to overlook
precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the ``Forward``::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See :class:`ParseResults.pprint` for an example of a recursive
parser created using ``Forward``.
"""
def __init__(self, other=None):
super(Forward, self).__init__(other, savelist=False)
def __lshift__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars(self.expr.whiteChars)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace(self):
self.skipWhitespace = False
return self
def streamline(self):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is not None:
return self.strRepr
# Avoid infinite recursion by setting a temporary strRepr
self.strRepr = ": ..."
# Use the string representation of main expression.
retString = "..."
try:
if self.expr is not None:
retString = _ustr(self.expr)[:1000]
else:
retString = "None"
finally:
self.strRepr = self.__class__.__name__ + ": " + retString
return self.strRepr
def copy(self):
if self.expr is not None:
return super(Forward, self).copy()
else:
ret = Forward()
ret <<= self
return ret
def _setResultsName(self, name, listAllMatches=False):
if __diag__.warn_name_set_on_empty_Forward:
if self.expr is None:
warnings.warn(
"{0}: setting results name {0!r} on {1} expression "
"that has no contained expression".format(
"warn_name_set_on_empty_Forward", name, type(self).__name__
),
stacklevel=3,
)
return super(Forward, self)._setResultsName(name, listAllMatches)
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of :class:`ParseExpression`, for converting parsed results.
"""
def __init__(self, expr, savelist=False):
super(TokenConverter, self).__init__(expr) # , savelist)
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the
input string; this can be disabled by specifying
``'adjacent=False'`` in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__(self, expr, joinString="", adjacent=True):
super(Combine, self).__init__(expr)
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super(Combine, self).ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(
["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for
returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__(self, expr):
super(Group, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
return [tokenlist]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also
as a dictionary. Each element can also be referenced using the first
token in the expression as its key. Useful for tabular report
scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at :class:`ParseResults` of accessing fields by results name.
"""
def __init__(self, expr):
super(Dict, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = _ustr(tok[0]).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
dictvalue = tok.copy() # ParseResults(i)
del dictvalue[0]
if len(dictvalue) != 1 or (
isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self.resultsName:
return [tokenlist]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also :class:`delimitedList`.)
"""
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self, s, l, t):
if not self.called:
results = self.callable(s, l, t)
self.called = True
return results
raise ParseException(s, l, "")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions.
When the parse action is called, this decorator will print
``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
When the parse action completes, the decorator will print
``"<<"`` followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s, l, t = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
sys.stderr.write(
">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t)
)
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc))
raise
sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret))
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList(expr, delim=",", combine=False):
"""Helper to define a delimited list of expressions - the delimiter
defaults to ','. By default, the list elements and delimiters can
have intervening whitespace, and comments, but this can be
overridden by passing ``combine=True`` in the constructor. If
``combine`` is set to ``True``, the matching tokens are
returned as a single token string, with the delimiters included;
otherwise, the matching tokens are returned as a list of tokens,
with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..."
if combine:
return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)
else:
return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)
def countedArray(expr, intExpr=None):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the
leading count token is suppressed.
If ``intExpr`` is specified, it should be a pyparsing expression
that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s, l, t):
n = t[0]
arrayExpr << (n and Group(And([expr] * n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t: int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return (intExpr + arrayExpr).setName("(len) " + _ustr(expr) + "...")
def _flatten(L):
ret = []
for i in L:
if isinstance(i, list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches a previous literal, will also match the leading
``"1:1"`` in ``"1:10"``. If this is not desired, use
:class:`matchPreviousExpr`. Do *not* use with packrat parsing
enabled.
"""
rep = Forward()
def copyTokenToRepeater(s, l, t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName("(prev) " + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches by expressions, will *not* match the leading ``"1:1"``
in ``"1:10"``; the expressions are evaluated first, and then
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s, l, t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s, l, t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("", 0, "")
rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName("(prev) " + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
# ~ escape these chars: ^-[]
for c in r"\^-[]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return _ustr(s)
def oneOf(strs, caseless=False, useRegex=True, asKeyword=False):
"""Helper to quickly define a set of alternative Literals, and makes
sure to do longest-first testing when there is a conflict,
regardless of the input order, but returns
a :class:`MatchFirst` for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of
string literals
- caseless - (default= ``False``) - treat all literals as
caseless
- useRegex - (default= ``True``) - as an optimization, will
generate a Regex object; otherwise, will generate
a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
creating a :class:`Regex` raises an exception)
- asKeyword - (default=``False``) - enforce Keyword-style matching on the
generated expressions
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if isinstance(caseless, basestring):
warnings.warn(
"More than one string argument passed to oneOf, pass "
"choices as a list or space-delimited string",
stacklevel=2,
)
if caseless:
isequal = lambda a, b: a.upper() == b.upper()
masks = lambda a, b: b.upper().startswith(a.upper())
parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
else:
isequal = lambda a, b: a == b
masks = lambda a, b: b.startswith(a)
parseElementClass = Keyword if asKeyword else Literal
symbols = []
if isinstance(strs, basestring):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn(
"Invalid argument to oneOf, expected string or iterable",
SyntaxWarning,
stacklevel=2,
)
if not symbols:
return NoMatch()
if not asKeyword:
# if not producing keywords, need to reorder to take care to avoid masking
# longer choices with shorter ones
i = 0
while i < len(symbols) - 1:
cur = symbols[i]
for j, other in enumerate(symbols[i + 1 :]):
if isequal(other, cur):
del symbols[i + j + 1]
break
elif masks(cur, other):
del symbols[i + j + 1]
symbols.insert(i, other)
break
else:
i += 1
if not (caseless or asKeyword) and useRegex:
# ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols]))
try:
if len(symbols) == len("".join(symbols)):
return Regex(
"[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)
).setName(" | ".join(symbols))
else:
return Regex("|".join(re.escape(sym) for sym in symbols)).setName(
" | ".join(symbols)
)
except Exception:
warnings.warn(
"Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning,
stacklevel=2,
)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(
" | ".join(symbols)
)
def dictOf(key, value):
"""Helper to easily and clearly define a dictionary by specifying
the respective patterns for the key and value. Takes care of
defining the :class:`Dict`, :class:`ZeroOrMore`, and
:class:`Group` tokens in the proper order. The key pattern
can include delimiting markers or punctuation, as long as they are
suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the :class:`Dict` results
can include named token fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict(OneOrMore(Group(key + value)))
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b", "i"):
opener, closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s, loc, t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s, l, t: s[t._original_start : t._original_end]
else:
def extractText(s, l, t):
t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions,
even if all but one are non-empty.
"""
return TokenConverter(expr).addParseAction(lambda t: t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parseWithTabs`
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s, l, t: l)
return Group(
locator("locn_start")
+ expr("value")
+ locator.copy().leaveWhitespace()("locn_end")
)
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(
lambda s, l, t: t[0][1]
)
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(
lambda s, l, t: unichr(int(t[0].lstrip(r"\0x"), 16))
)
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(
lambda s, l, t: unichr(int(t[0][1:], 8))
)
_singleChar = (
_escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r"\]", exact=1)
)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = (
Literal("[")
+ Optional("^").setResultsName("negate")
+ Group(OneOrMore(_charRange | _singleChar)).setResultsName("body")
+ "]"
)
def srange(s):
r"""Helper to easily define string ranges for use in Word
construction. Borrows syntax from regexp '[]' string range
definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string
is the expanded character set joined into a single string. The
values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as ``\-``
or ``\]``)
- an escaped hex character with a leading ``'\x'``
(``\x21``, which is a ``'!'`` character) (``\0x##``
is also supported for backwards compatibility)
- an escaped octal character with a leading ``'\0'``
(``\041``, which is a ``'!'`` character)
- a range of any of the above, separated by a dash (``'a-z'``,
etc.)
- any combination of the above (``'aeiouy'``,
``'a-zA-Z0-9_$'``, etc.)
"""
_expanded = (
lambda p: p
if not isinstance(p, ParseResults)
else "".join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
)
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at
a specific column in the input text.
"""
def verifyCol(strg, locn, toks):
if col(locn, strg) != n:
raise ParseException(strg, locn, "matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return
a literal value. Especially useful when used with
:class:`transformString<ParserElement.transformString>` ().
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s, l, t: [replStr]
def removeQuotes(s, l, t):
"""Helper parse action for removing quotation marks from parsed
quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""Helper to define a parse action by mapping a function to all
elements of a ParseResults list. If any additional args are passed,
they are forwarded to the given function as additional arguments
after the token, as in
``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``,
which will convert the parsed data to an integer using base 16.
Example (compare the last to example in :class:`ParserElement.transformString`::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s, l, t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case.
Deprecated in favor of :class:`pyparsing_common.upcaseTokens`"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case.
Deprecated in favor of :class:`pyparsing_common.downcaseTokens`"""
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr, basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)
openTag = (
suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ Optional("/", default=[False])("empty").setParseAction(
lambda s, l, t: t[0] == "/"
)
+ suppress_GT
)
else:
tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(
printables, excludeChars=">"
)
openTag = (
suppress_LT
+ tagStr("tag")
+ Dict(
ZeroOrMore(
Group(
tagAttrName.setParseAction(downcaseTokens)
+ Optional(Suppress("=") + tagAttrValue)
)
)
)
+ Optional("/", default=[False])("empty").setParseAction(
lambda s, l, t: t[0] == "/"
)
+ suppress_GT
)
closeTag = Combine(_L("</") + tagStr + ">", adjacent=False)
openTag.setName("<%s>" % resname)
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
openTag.addParseAction(
lambda t: t.__setitem__(
"start" + "".join(resname.replace(":", " ").title().split()), t.copy()
)
)
closeTag = closeTag(
"end" + "".join(resname.replace(":", " ").title().split())
).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
openTag.tag_body = SkipTo(closeTag())
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML,
given a tag name. Matches tags in either upper or lower case,
attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and
# closing tags as a 2-tuple
a, a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are
# also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
"""
return _makeTags(tagStr, False)
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML,
given a tag name. Matches tags only in the given upper/lower case.
Example: similar to :class:`makeHTMLTags`
"""
return _makeTags(tagStr, True)
def withAttribute(*args, **attrDict):
"""Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``withAttribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`withClass`.
To verify that the attribute exists, but without specifying a value,
pass ``withAttribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k, v) for k, v in attrs]
def pa(s, l, tokens):
for attrName, attrValue in attrs:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(
s,
l,
"attribute '%s' has value '%s', must be '%s'"
% (attrName, tokens[attrName], attrValue),
)
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=""):
"""Simplified version of :class:`withAttribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr: classname})
opAssoc = SimpleNamespace()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation(baseExpr, opList, lpar=Suppress("("), rpar=Suppress(")")):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary
or binary, left- or right-associative. Parse actions can also be
attached to operator expressions. The generated parser will also
recognize the use of parentheses to override operator precedences
(see example below).
Note: if you define a deep operator list, you may see performance
issues when using infixNotation. See
:class:`ParserElement.enablePackrat` for a mechanism to potentially
improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the
nested
- opList - list of tuples, one for each operator precedence level
in the expression grammar; each tuple is of the form ``(opExpr,
numTerms, rightLeftAssoc, parseAction)``, where:
- opExpr is the pyparsing expression for the operator; may also
be a string, which will be converted to a Literal; if numTerms
is 3, opExpr is a tuple of two expressions, for the two
operators separating the 3 terms
- numTerms is the number of terms for this operator (must be 1,
2, or 3)
- rightLeftAssoc is the indicator whether the operator is right
or left associative, using the pyparsing-defined constants
``opAssoc.RIGHT`` and ``opAssoc.LEFT``.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the parse action
tuple member may be omitted); if the parse action is passed
a tuple or list of functions, this is equivalent to calling
``setParseAction(*fn)``
(:class:`ParserElement.setParseAction`)
- lpar - expression for matching left-parentheses
(default= ``Suppress('(')``)
- rpar - expression for matching right-parentheses
(default= ``Suppress(')')``)
Example::
# simple example of four-function arithmetic with ints and
# variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
# captive version of FollowedBy that does not do parse actions or capture results names
class _FB(FollowedBy):
def parseImpl(self, instring, loc, doActions=True):
self.expr.tryParse(instring, loc)
return loc, []
ret = Forward()
lastExpr = baseExpr | (lpar + ret + rpar)
for i, operDef in enumerate(opList):
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError(
"if numterms=3, opExpr must be a tuple or list of two expressions"
)
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr))
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
lastExpr + OneOrMore(opExpr + lastExpr)
)
else:
matchExpr = _FB(lastExpr + lastExpr) + Group(
lastExpr + OneOrMore(lastExpr)
)
elif arity == 3:
matchExpr = _FB(
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
else:
raise ValueError(
"operator must be unary (1), binary (2), or ternary (3)"
)
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
lastExpr + OneOrMore(opExpr + thisExpr)
)
else:
matchExpr = _FB(lastExpr + thisExpr) + Group(
lastExpr + OneOrMore(thisExpr)
)
elif arity == 3:
matchExpr = _FB(
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
else:
raise ValueError(
"operator must be unary (1), binary (2), or ternary (3)"
)
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= matchExpr.setName(termName) | lastExpr
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of :class:`infixNotation`, will be
dropped in a future release."""
dblQuotedString = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
).setName("string enclosed in double quotes")
sglQuotedString = Combine(
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).setName("string enclosed in single quotes")
quotedString = Combine(
Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
).setName("quotedString using single or double quotes")
unicodeString = Combine(_L("u") + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and
closing delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list
(default= ``"("``); can also be a pyparsing expression
- closer - closing character for a nested list
(default= ``")"``); can also be a pyparsing expression
- content - expression for items within the nested lists
(default= ``None``)
- ignoreExpr - expression for ignoring opening and closing
delimiters (default= :class:`quotedString`)
If an expression is not provided for the content argument, the
nested expression will capture all whitespace-delimited content
between delimiters as a list of separate values.
Use the ``ignoreExpr`` argument to define expressions that may
contain opening or closing characters that should not be treated as
opening or closing characters for nesting, such as quotedString or
a comment expression. Specify multiple expressions using an
:class:`Or` or :class:`MatchFirst`. The default is
:class:`quotedString`, but if no expressions are to be ignored, then
pass ``None`` for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR, RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener, basestring) and isinstance(closer, basestring):
if len(opener) == 1 and len(closer) == 1:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
exact=1,
)
)
).setParseAction(lambda t: t[0].strip())
else:
content = empty.copy() + CharsNotIn(
opener + closer + ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t: t[0].strip())
else:
if ignoreExpr is not None:
content = Combine(
OneOrMore(
~ignoreExpr
+ ~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip())
else:
content = Combine(
OneOrMore(
~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip())
else:
raise ValueError(
"opening and closing arguments must be strings if no content expression is given"
)
ret = Forward()
if ignoreExpr is not None:
ret <<= Group(
Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
)
else:
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
ret.setName("nested %s%s expression" % (opener, closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks,
such as those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single
grammar should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond
the current level; set to False for block of left-most
statements (default= ``True``)
A valid block must contain at least one ``blockStatement``.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group(funcDecl + func_body)
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << (funcDef | assignment | identifier)
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
backup_stack = indentStack[:]
def reset_stack():
indentStack[:] = backup_stack
def checkPeerIndent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s, l, t):
curCol = col(l, s)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if not (indentStack and curCol in indentStack):
raise ParseException(s, l, "not an unindent")
if curCol < indentStack[-1]:
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress(), stopOn=StringEnd())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName("INDENT")
PEER = Empty().setParseAction(checkPeerIndent).setName("")
UNDENT = Empty().setParseAction(checkUnindent).setName("UNINDENT")
if indent:
smExpr = Group(
Optional(NL)
+ INDENT
+ OneOrMore(
PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()
)
+ UNDENT
)
else:
smExpr = Group(
Optional(NL)
+ OneOrMore(
PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd()
)
+ UNDENT
)
smExpr.setFailAction(lambda a, b, c, d: reset_stack())
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName("indented block")
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag, anyCloseTag = makeHTMLTags(
Word(alphas, alphanums + "_:").setName("any tag")
)
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), "><& \"'"))
commonHTMLEntity = Regex(
"&(?P<entity>" + "|".join(_htmlEntityMap.keys()) + ");"
).setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").setName(
"C style comment"
)
"Comment of the form ``/* ... */``"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form ``<!-- ... -->``"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form ``// ... (to end of line)``"
cppStyleComment = Combine(
Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dblSlashComment
).setName("C++ style comment")
"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"
javaStyleComment = cppStyleComment
"Same as :class:`cppStyleComment`"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form ``# ... (to end of line)``"
_commasepitem = (
Combine(
OneOrMore(
Word(printables, excludeChars=",")
+ Optional(Word(" \t") + ~Literal(",") + ~LineEnd())
)
)
.streamline()
.setName("commaItem")
)
commaSeparatedList = delimitedList(
Optional(quotedString.copy() | _commasepitem, default="")
).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or
quoted strings, separated by commas.
This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.
"""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""Here are some common low-level expressions that may be useful in
jump-starting parser development:
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
:class:`scientific notation<sci_real>`)
- common :class:`programming identifiers<identifier>`
- network addresses (:class:`MAC<mac_address>`,
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
- ISO8601 :class:`dates<iso8601_date>` and
:class:`datetime<iso8601_datetime>`
- :class:`UUID<uuid>`
- :class:`comma-separated list<comma_separated_list>`
Parse actions:
- :class:`convertToInteger`
- :class:`convertToFloat`
- :class:`convertToDate`
- :class:`convertToDatetime`
- :class:`stripHTMLTags`
- :class:`upcaseTokens`
- :class:`downcaseTokens`
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = (
Regex(r"[+-]?\d+").setName("signed integer").setParseAction(convertToInteger)
)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (
signed_integer().setParseAction(convertToFloat)
+ "/"
+ signed_integer().setParseAction(convertToFloat)
).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0] / t[-1])
mixed_integer = (
fraction | signed_integer + Optional(Optional("-").suppress() + fraction)
).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = (
Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
.setName("real number")
.setParseAction(convertToFloat)
)
"""expression that parses a floating point number and returns a float"""
sci_real = (
Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
.setName("real number with scientific notation")
.setParseAction(convertToFloat)
)
"""expression that parses a floating point number with optional
scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = (
Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
.setName("fnumber")
.setParseAction(convertToFloat)
)
"""any int or real number, returned as float"""
identifier = Word(alphas + "_", alphanums + "_").setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
).setName("IPv4 address")
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).setName(
"full IPv6 address"
)
_short_ipv6_address = (
Optional(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ "::"
+ Optional(_ipv6_part + (":" + _ipv6_part) * (0, 6))
).setName("short IPv6 address")
_short_ipv6_address.addCondition(
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine(
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName(
"IPv6 address"
)
).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
).setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""Helper to create a parse action for converting parsed
datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
).setName("ISO8601 date")
"ISO8601 date (``yyyy-mm-dd``)"
iso8601_datetime = Regex(
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
).setName("ISO8601 datetime")
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").setName("UUID")
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
td, td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body)
Prints::
More info at the pyparsing wiki page
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = (
Combine(
OneOrMore(
~Literal(",")
+ ~LineEnd()
+ Word(printables, excludeChars=",")
+ Optional(White(" \t"))
)
)
.streamline()
.setName("commaItem")
)
comma_separated_list = delimitedList(
Optional(quotedString.copy() | _commasepitem, default="")
).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
class _lazyclassproperty(object):
def __init__(self, fn):
self.fn = fn
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
def __get__(self, obj, cls):
if cls is None:
cls = type(obj)
if not hasattr(cls, "_intern") or any(
cls._intern is getattr(superclass, "_intern", [])
for superclass in cls.__mro__[1:]
):
cls._intern = {}
attrname = self.fn.__name__
if attrname not in cls._intern:
cls._intern[attrname] = self.fn(cls)
return cls._intern[attrname]
class unicode_set(object):
"""
A set of Unicode characters, for language-specific strings for
``alphas``, ``nums``, ``alphanums``, and ``printables``.
A unicode_set is defined by a list of ranges in the Unicode character
set, in a class attribute ``_ranges``, such as::
_ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
A unicode set can also be defined using multiple inheritance of other unicode sets::
class CJK(Chinese, Japanese, Korean):
pass
"""
_ranges = []
@classmethod
def _get_chars_for_ranges(cls):
ret = []
for cc in cls.__mro__:
if cc is unicode_set:
break
for rr in cc._ranges:
ret.extend(range(rr[0], rr[-1] + 1))
return [unichr(c) for c in sorted(set(ret))]
@_lazyclassproperty
def printables(cls):
"all non-whitespace characters in this range"
return u"".join(filterfalse(unicode.isspace, cls._get_chars_for_ranges()))
@_lazyclassproperty
def alphas(cls):
"all alphabetic characters in this range"
return u"".join(filter(unicode.isalpha, cls._get_chars_for_ranges()))
@_lazyclassproperty
def nums(cls):
"all numeric digit characters in this range"
return u"".join(filter(unicode.isdigit, cls._get_chars_for_ranges()))
@_lazyclassproperty
def alphanums(cls):
"all alphanumeric characters in this range"
return cls.alphas + cls.nums
class pyparsing_unicode(unicode_set):
"""
A namespace class for defining common language unicode_sets.
"""
_ranges = [(32, sys.maxunicode)]
class Latin1(unicode_set):
"Unicode set for Latin-1 Unicode Character Range"
_ranges = [
(0x0020, 0x007e),
(0x00a0, 0x00ff),
]
class LatinA(unicode_set):
"Unicode set for Latin-A Unicode Character Range"
_ranges = [
(0x0100, 0x017f),
]
class LatinB(unicode_set):
"Unicode set for Latin-B Unicode Character Range"
_ranges = [
(0x0180, 0x024f),
]
class Greek(unicode_set):
"Unicode set for Greek Unicode Character Ranges"
_ranges = [
(0x0370, 0x03ff),
(0x1f00, 0x1f15),
(0x1f18, 0x1f1d),
(0x1f20, 0x1f45),
(0x1f48, 0x1f4d),
(0x1f50, 0x1f57),
(0x1f59,),
(0x1f5b,),
(0x1f5d,),
(0x1f5f, 0x1f7d),
(0x1f80, 0x1fb4),
(0x1fb6, 0x1fc4),
(0x1fc6, 0x1fd3),
(0x1fd6, 0x1fdb),
(0x1fdd, 0x1fef),
(0x1ff2, 0x1ff4),
(0x1ff6, 0x1ffe),
]
class Cyrillic(unicode_set):
"Unicode set for Cyrillic Unicode Character Range"
_ranges = [(0x0400, 0x04ff)]
class Chinese(unicode_set):
"Unicode set for Chinese Unicode Character Range"
_ranges = [
(0x4e00, 0x9fff),
(0x3000, 0x303f),
]
class Japanese(unicode_set):
"Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
_ranges = []
class Kanji(unicode_set):
"Unicode set for Kanji Unicode Character Range"
_ranges = [
(0x4e00, 0x9fbf),
(0x3000, 0x303f),
]
class Hiragana(unicode_set):
"Unicode set for Hiragana Unicode Character Range"
_ranges = [
(0x3040, 0x309f),
]
class Katakana(unicode_set):
"Unicode set for Katakana Unicode Character Range"
_ranges = [
(0x30a0, 0x30ff),
]
class Korean(unicode_set):
"Unicode set for Korean Unicode Character Range"
_ranges = [
(0xac00, 0xd7af),
(0x1100, 0x11ff),
(0x3130, 0x318f),
(0xa960, 0xa97f),
(0xd7b0, 0xd7ff),
(0x3000, 0x303f),
]
class CJK(Chinese, Japanese, Korean):
"Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
pass
class Thai(unicode_set):
"Unicode set for Thai Unicode Character Range"
_ranges = [
(0x0e01, 0x0e3a),
(0x0e3f, 0x0e5b),
]
class Arabic(unicode_set):
"Unicode set for Arabic Unicode Character Range"
_ranges = [
(0x0600, 0x061b),
(0x061e, 0x06ff),
(0x0700, 0x077f),
]
class Hebrew(unicode_set):
"Unicode set for Hebrew Unicode Character Range"
_ranges = [
(0x0590, 0x05ff),
]
class Devanagari(unicode_set):
"Unicode set for Devanagari Unicode Character Range"
_ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)]
pyparsing_unicode.Japanese._ranges = (
pyparsing_unicode.Japanese.Kanji._ranges
+ pyparsing_unicode.Japanese.Hiragana._ranges
+ pyparsing_unicode.Japanese.Katakana._ranges
)
# define ranges in language character sets
if PY_3:
setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic)
setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese)
setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic)
setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek)
setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew)
setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese)
setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji)
setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana)
setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana)
setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean)
setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai)
setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari)
class pyparsing_test:
"""
namespace class for classes useful in writing unit tests
"""
class reset_pyparsing_context:
"""
Context manager to be used when writing unit tests that modify pyparsing config values:
- packrat parsing
- default whitespace characters.
- default keyword characters
- literal string auto-conversion class
- __diag__ settings
Example:
with reset_pyparsing_context():
# test that literals used to construct a grammar are automatically suppressed
ParserElement.inlineLiteralsUsing(Suppress)
term = Word(alphas) | Word(nums)
group = Group('(' + term[...] + ')')
# assert that the '()' characters are not included in the parsed tokens
self.assertParseAndCheckLisst(group, "(abc 123 def)", ['abc', '123', 'def'])
# after exiting context manager, literals are converted to Literal expressions again
"""
def __init__(self):
self._save_context = {}
def save(self):
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
self._save_context[
"literal_string_class"
] = ParserElement._literalStringClass
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
self._save_context["packrat_parse"] = ParserElement._parse
self._save_context["__diag__"] = {
name: getattr(__diag__, name) for name in __diag__._all_names
}
self._save_context["__compat__"] = {
"collect_all_And_tokens": __compat__.collect_all_And_tokens
}
return self
def restore(self):
# reset pyparsing global state
if (
ParserElement.DEFAULT_WHITE_CHARS
!= self._save_context["default_whitespace"]
):
ParserElement.setDefaultWhitespaceChars(
self._save_context["default_whitespace"]
)
Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
ParserElement.inlineLiteralsUsing(
self._save_context["literal_string_class"]
)
for name, value in self._save_context["__diag__"].items():
setattr(__diag__, name, value)
ParserElement._packratEnabled = self._save_context["packrat_enabled"]
ParserElement._parse = self._save_context["packrat_parse"]
__compat__.collect_all_And_tokens = self._save_context["__compat__"]
def __enter__(self):
return self.save()
def __exit__(self, *args):
return self.restore()
class TestParseResultsAsserts:
"""
A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
"""
def assertParseResultsEquals(
self, result, expected_list=None, expected_dict=None, msg=None
):
"""
Unit test assertion to compare a ParseResults object with an optional expected_list,
and compare any defined results names with an optional expected_dict.
"""
if expected_list is not None:
self.assertEqual(expected_list, result.asList(), msg=msg)
if expected_dict is not None:
self.assertEqual(expected_dict, result.asDict(), msg=msg)
def assertParseAndCheckList(
self, expr, test_string, expected_list, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ParseResults.asList() is equal to the expected_list.
"""
result = expr.parseString(test_string, parseAll=True)
if verbose:
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
def assertParseAndCheckDict(
self, expr, test_string, expected_dict, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ParseResults.asDict() is equal to the expected_dict.
"""
result = expr.parseString(test_string, parseAll=True)
if verbose:
print(result.dump())
self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
def assertRunTestResults(
self, run_tests_report, expected_parse_results=None, msg=None
):
"""
Unit test assertion to evaluate output of ParserElement.runTests(). If a list of
list-dict tuples is given as the expected_parse_results argument, then these are zipped
with the report tuples returned by runTests and evaluated using assertParseResultsEquals.
Finally, asserts that the overall runTests() success value is True.
:param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
:param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
"""
run_test_success, run_test_results = run_tests_report
if expected_parse_results is not None:
merged = [
(rpt[0], rpt[1], expected)
for rpt, expected in zip(run_test_results, expected_parse_results)
]
for test_string, result, expected in merged:
# expected should be a tuple containing a list and/or a dict or an exception,
# and optional failure message string
# an empty tuple will skip any result validation
fail_msg = next(
(exp for exp in expected if isinstance(exp, str)), None
)
expected_exception = next(
(
exp
for exp in expected
if isinstance(exp, type) and issubclass(exp, Exception)
),
None,
)
if expected_exception is not None:
with self.assertRaises(
expected_exception=expected_exception, msg=fail_msg or msg
):
if isinstance(result, Exception):
raise result
else:
expected_list = next(
(exp for exp in expected if isinstance(exp, list)), None
)
expected_dict = next(
(exp for exp in expected if isinstance(exp, dict)), None
)
if (expected_list, expected_dict) != (None, None):
self.assertParseResultsEquals(
result,
expected_list=expected_list,
expected_dict=expected_dict,
msg=fail_msg or msg,
)
else:
# warning here maybe?
print("no validation for {!r}".format(test_string))
# do this last, in case some specific test results can be reported instead
self.assertTrue(
run_test_success, msg=msg if msg is not None else "failed runTests"
)
@contextmanager
def assertRaisesParseException(self, exc_type=ParseException, msg=None):
with self.assertRaises(exc_type, msg=msg):
yield
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = "*" | columnNameList
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = (
selectToken("command")
+ columnSpec("columns")
+ fromToken
+ tableNameList("tables")
)
# demo runTests method, including embedded comments in test string
simpleSQL.runTests(
"""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
"""
)
pyparsing_common.number.runTests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)
# any int or real number, returned as float
pyparsing_common.fnumber.runTests(
"""
100
-100
+100
3.14159
6.02e23
1e-12
"""
)
pyparsing_common.hex_integer.runTests(
"""
100
FF
"""
)
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests(
"""
12345678-1234-5678-1234-567812345678
"""
)
| [
"[email protected]"
] | |
71422348907630e0d2bf271ab6308a48ea7cc115 | 6ddcb131e5f2806acde46a525ff8d46bfbe0990e | /enaml/core/parse_tab/lextab.py | 4fef6763304b572db5738b3ca643facb224ae474 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agrawalprash/enaml | 5ce1823188eb51e5b83117ebee6c3655f53e5157 | 96828b254ac9fdfa2e5b6b31eff93a4933cbc0aa | refs/heads/master | 2021-01-15T23:35:21.351626 | 2012-09-05T03:40:07 | 2012-09-05T03:40:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,273 | py | # enaml.core.parse_tab.lextab.py. This file automatically created by PLY (version 3.4). Don't edit!
_tabversion = '3.4'
_lextokens = {'LPAR': 1, 'ENDMARKER': 1, 'LESS': 1, 'AMPEREQUAL': 1, 'CIRCUMFLEX': 1, 'WS': 1, 'WITH': 1, 'MINUS': 1, 'NEWLINE': 1, 'EXCEPT': 1, 'PLUS': 1, 'PERCENTEQUAL': 1, 'ELLIPSIS': 1, 'EQEQUAL': 1, 'RIGHTSHIFTEQUAL': 1, 'EXEC': 1, 'STRING_START_SINGLE': 1, 'SLASH': 1, 'PASS': 1, 'NOTEQUAL': 1, 'NAME': 1, 'INDENT': 1, 'MINUSEQUAL': 1, 'ENAMLDEF': 1, 'DEDENT': 1, 'STRING_START_TRIPLE': 1, 'STAR': 1, 'DEL': 1, 'PRINT': 1, 'DOUBLESTAR': 1, 'DEF': 1, 'CIRCUMFLEXEQUAL': 1, 'COLON': 1, 'DOUBLECOLON': 1, 'FOR': 1, 'DOUBLESTAREQUAL': 1, 'ELSE': 1, 'TRY': 1, 'AND': 1, 'LBRACE': 1, 'AS': 1, 'OR': 1, 'LEFTSHIFT': 1, 'CONTINUE': 1, 'NOT': 1, 'LAMBDA': 1, 'RAISE': 1, 'GLOBAL': 1, 'WHILE': 1, 'VBAR': 1, 'RETURN': 1, 'DOT': 1, 'LEFTSHIFTEQUAL': 1, 'TILDE': 1, 'RSQB': 1, 'PERCENT': 1, 'DOUBLESLASH': 1, 'RBRACE': 1, 'EQUAL': 1, 'PLUSEQUAL': 1, 'IMPORT': 1, 'LESSEQUAL': 1, 'LSQB': 1, 'GREATER': 1, 'VBAREQUAL': 1, 'BREAK': 1, 'STRING_CONTINUE': 1, 'STAREQUAL': 1, 'ELIF': 1, 'SLASHEQUAL': 1, 'NUMBER': 1, 'RPAR': 1, 'ASSERT': 1, 'STRING_END': 1, 'GREATEREQUAL': 1, 'SEMI': 1, 'DOUBLESLASHEQUAL': 1, 'COMMA': 1, 'CLASS': 1, 'RIGHTSHIFT': 1, 'STRING': 1, 'COLONEQUAL': 1, 'IS': 1, 'YIELD': 1, 'FINALLY': 1, 'AT': 1, 'AMPER': 1, 'IN': 1, 'IF': 1, 'FROM': 1}
_lexreflags = 0
_lexliterals = ''
_lexstateinfo = {'TRIPLEQ2': 'exclusive', 'TRIPLEQ1': 'exclusive', 'INITIAL': 'inclusive', 'SINGLEQ2': 'exclusive', 'SINGLEQ1': 'exclusive'}
_lexstatere = {'TRIPLEQ2': [('(?P<t_SINGLEQ1_SINGLEQ2_TRIPLEQ1_TRIPLEQ2_escaped>\\\\(.|\\n))|(?P<t_TRIPLEQ2_simple>[^"\\\\]+)|(?P<t_TRIPLEQ2_q2_but_not_triple>"(?!""))|(?P<t_TRIPLEQ2_end>""")', [None, ('t_SINGLEQ1_SINGLEQ2_TRIPLEQ1_TRIPLEQ2_escaped', 'escaped'), None, ('t_TRIPLEQ2_simple', 'simple'), ('t_TRIPLEQ2_q2_but_not_triple', 'q2_but_not_triple'), ('t_TRIPLEQ2_end', 'end')])], 'TRIPLEQ1': [("(?P<t_SINGLEQ1_SINGLEQ2_TRIPLEQ1_TRIPLEQ2_escaped>\\\\(.|\\n))|(?P<t_TRIPLEQ1_simple>[^'\\\\]+)|(?P<t_TRIPLEQ1_q1_but_not_triple>'(?!''))|(?P<t_TRIPLEQ1_end>''')", [None, ('t_SINGLEQ1_SINGLEQ2_TRIPLEQ1_TRIPLEQ2_escaped', 'escaped'), None, ('t_TRIPLEQ1_simple', 'simple'), ('t_TRIPLEQ1_q1_but_not_triple', 'q1_but_not_triple'), ('t_TRIPLEQ1_end', 'end')])], 'INITIAL': [('(?P<t_comment>[ ]*\\#[^\\r\\n]*)|(?P<t_WS> [ \\t\\f]+ )|(?P<t_escaped_newline>\\\\\\n)|(?P<t_newline>\\n+)|(?P<t_LPAR>\\()|(?P<t_RPAR>\\))|(?P<t_LBRACE>\\{)|(?P<t_RBRACE>\\})|(?P<t_LSQB>\\[)|(?P<t_RSQB>\\])|(?P<t_start_triple_quoted_q1_string>[uU]?[rR]?\'\'\')|(?P<t_start_triple_quoted_q2_string>[uU]?[rR]?""")|(?P<t_start_single_quoted_q1_string>[uU]?[rR]?\')|(?P<t_start_single_quoted_q2_string>[uU]?[rR]?")|(?P<t_start_raw_python>::[\\t\\ ]*python[\\t\\ ]*::[\\t\\ ]*)|(?P<t_end_raw_python>::[\\t\\ ]*end[\\t\\ ]*::[\\t\\ ]*)|(?P<t_NAME>[a-zA-Z_][a-zA-Z0-9_]*)|(?P<t_NUMBER>((\\d+[jJ]|((\\d+\\.\\d*|\\.\\d+)([eE][-+]?\\d+)?|\\d+[eE][-+]?\\d+)[jJ])|((\\d+\\.\\d*|\\.\\d+)([eE][-+]?\\d+)?|\\d+[eE][-+]?\\d+)|(0[xX][\\da-fA-F]+[lL]?|0[bB][01]+[lL]?|(0[oO][0-7]+)|(0[0-7]*)[lL]?|[1-9]\\d*[lL]?)))|(?P<t_ELLIPSIS>\\.\\.\\.)|(?P<t_DOUBLESTAREQUAL>\\*\\*=)|(?P<t_DOUBLESTAR>\\*\\*)|(?P<t_LEFTSHIFTEQUAL><<=)|(?P<t_RIGHTSHIFTEQUAL>>>=)|(?P<t_VBAREQUAL>\\|=)|(?P<t_STAREQUAL>\\*=)|(?P<t_CIRCUMFLEXEQUAL>\\^=)|(?P<t_DOUBLESLASHEQUAL>//=)|(?P<t_PLUSEQUAL>\\+=)|(?P<t_VBAR>\\|)|(?P<t_LEFTSHIFT><<)|(?P<t_EQEQUAL>==)|(?P<t_PLUS>\\+)|(?P<t_PERCENTEQUAL>%=)|(?P<t_SLASHEQUAL>/=)|(?P<t_COLONEQUAL>:=)|(?P<t_NOTEQUAL>!=)|(?P<t_STAR>\\*)|(?P<t_GREATEREQUAL>>=)|(?P<t_CIRCUMFLEX>\\^)|(?P<t_DOUBLESLASH>//)|(?P<t_DOT>\\.)|(?P<t_MINUSEQUAL>-=)|(?P<t_DOUBLECOLON>::)|(?P<t_AMPEREQUAL>&=)|(?P<t_RIGHTSHIFT>>>)|(?P<t_LESSEQUAL><=)|(?P<t_EQUAL>=)|(?P<t_AMPER>&)|(?P<t_SLASH>/)|(?P<t_GREATER>>)|(?P<t_LESS><)|(?P<t_COMMA>,)|(?P<t_PERCENT>%)|(?P<t_TILDE>~)|(?P<t_SEMI>;)|(?P<t_MINUS>-)|(?P<t_COLON>:)|(?P<t_AT>@)', [None, ('t_comment', 'comment'), ('t_WS', 'WS'), ('t_escaped_newline', 'escaped_newline'), ('t_newline', 'newline'), ('t_LPAR', 'LPAR'), ('t_RPAR', 'RPAR'), ('t_LBRACE', 'LBRACE'), ('t_RBRACE', 'RBRACE'), ('t_LSQB', 'LSQB'), ('t_RSQB', 'RSQB'), ('t_start_triple_quoted_q1_string', 'start_triple_quoted_q1_string'), ('t_start_triple_quoted_q2_string', 'start_triple_quoted_q2_string'), ('t_start_single_quoted_q1_string', 'start_single_quoted_q1_string'), ('t_start_single_quoted_q2_string', 'start_single_quoted_q2_string'), ('t_start_raw_python', 'start_raw_python'), ('t_end_raw_python', 'end_raw_python'), ('t_NAME', 'NAME'), (None, 'NUMBER'), None, None, None, None, None, None, None, None, None, None, None, (None, 'ELLIPSIS'), (None, 'DOUBLESTAREQUAL'), (None, 'DOUBLESTAR'), (None, 'LEFTSHIFTEQUAL'), (None, 'RIGHTSHIFTEQUAL'), (None, 'VBAREQUAL'), (None, 'STAREQUAL'), (None, 'CIRCUMFLEXEQUAL'), (None, 'DOUBLESLASHEQUAL'), (None, 'PLUSEQUAL'), (None, 'VBAR'), (None, 'LEFTSHIFT'), (None, 'EQEQUAL'), (None, 'PLUS'), (None, 'PERCENTEQUAL'), (None, 'SLASHEQUAL'), (None, 'COLONEQUAL'), (None, 'NOTEQUAL'), (None, 'STAR'), (None, 'GREATEREQUAL'), (None, 'CIRCUMFLEX'), (None, 'DOUBLESLASH'), (None, 'DOT'), (None, 'MINUSEQUAL'), (None, 'DOUBLECOLON'), (None, 'AMPEREQUAL'), (None, 'RIGHTSHIFT'), (None, 'LESSEQUAL'), (None, 'EQUAL'), (None, 'AMPER'), (None, 'SLASH'), (None, 'GREATER'), (None, 'LESS'), (None, 'COMMA'), (None, 'PERCENT'), (None, 'TILDE'), (None, 'SEMI'), (None, 'MINUS'), (None, 'COLON'), (None, 'AT')])], 'SINGLEQ2': [('(?P<t_SINGLEQ1_SINGLEQ2_TRIPLEQ1_TRIPLEQ2_escaped>\\\\(.|\\n))|(?P<t_SINGLEQ2_simple>[^"\\\\\\n]+)|(?P<t_SINGLEQ2_end>")', [None, ('t_SINGLEQ1_SINGLEQ2_TRIPLEQ1_TRIPLEQ2_escaped', 'escaped'), None, ('t_SINGLEQ2_simple', 'simple'), ('t_SINGLEQ2_end', 'end')])], 'SINGLEQ1': [("(?P<t_SINGLEQ1_SINGLEQ2_TRIPLEQ1_TRIPLEQ2_escaped>\\\\(.|\\n))|(?P<t_SINGLEQ1_simple>[^'\\\\\\n]+)|(?P<t_SINGLEQ1_end>')", [None, ('t_SINGLEQ1_SINGLEQ2_TRIPLEQ1_TRIPLEQ2_escaped', 'escaped'), None, ('t_SINGLEQ1_simple', 'simple'), ('t_SINGLEQ1_end', 'end')])]}
_lexstateignore = {'TRIPLEQ2': '', 'TRIPLEQ1': '', 'INITIAL': '', 'SINGLEQ2': '', 'SINGLEQ1': ''}
_lexstateerrorf = {'TRIPLEQ2': 't_TRIPLEQ2_error', 'TRIPLEQ1': 't_TRIPLEQ1_error', 'INITIAL': 't_error', 'SINGLEQ2': 't_SINGLEQ2_error', 'SINGLEQ1': 't_SINGLEQ1_error'}
| [
"[email protected]"
] | |
288e76ce905f639a74ba721fd38b41fc04c40f45 | 16e498600e8e9f1c0f3f0e2c4b2df2dcb56b9adc | /registration/admin.py | ea137c1ff68e3fdf9cd248567b622cc64d6f5948 | [
"BSD-3-Clause"
] | permissive | kklimonda/django-registration | b220338f0775ea7313a481438cade9341ef078c3 | 9d4099011c64d0e9d1a502a7b230fa2547d7f771 | refs/heads/master | 2021-01-17T07:28:05.795437 | 2013-04-14T03:06:20 | 2013-04-14T03:06:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | from django.contrib import admin
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from .models import RegistrationProfile
class RegistrationAdmin(admin.ModelAdmin):
actions = ['activate_users', 'resend_activation_email']
list_display = ('user', 'activation_key_expired')
raw_id_fields = ['user']
search_fields = ('user__username', 'user__first_name', 'user__last_name', 'user__email')
def activate_users(self, request, queryset):
"""
Activates the selected users, if they are not alrady
activated.
"""
for profile in queryset:
RegistrationProfile.objects.activate_user(profile.activation_key)
activate_users.short_description = _("Activate users")
def resend_activation_email(self, request, queryset):
"""
Re-sends activation emails for the selected users.
Note that this will *only* send activation emails for users
who are eligible to activate; emails will not be sent to users
whose activation keys have expired or who have already
activated.
"""
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
for profile in queryset:
if not profile.activation_key_expired():
profile.send_activation_email(site)
resend_activation_email.short_description = _("Re-send activation emails")
admin.site.register(RegistrationProfile, RegistrationAdmin)
| [
"[email protected]"
] | |
64b4dc3e651ef1a0377081e5697aad87c4789d60 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/73/usersdata/214/39782/submittedfiles/triangulo.py | fc7eb86f1197185aca4df06deba4e940fa9bee35 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # -*- coding: utf-8 -*-
import math
a=int(input('Digite a:'))
b=int(input('Digite a:'))
c=int(input('Digite a:')
a>=b>=c>0
if a<b+c:
print('S')
else:
print('N')
if a+b<c:
if(a**2)==(b**2)+(c**2):
print ('Re')
if(a**2)==(b**2)+(c**2):
print('Ob')
if(a**2)==(b**2)+(c**2):
print('Ac')
if a==b==c:
print('Eq')
if b==c!=a:
print('Is')
if (a!=b) and (b!=c):
print ('Es')
| [
"[email protected]"
] | |
99df5d6a98c97acee2ab5bf8db8f1a542093aa9b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/195/61824/submittedfiles/testes.py | 569ee6ed3f4b3b4a00c2e1c54c88be2f1a92f096 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # -*- coding: utf-8 -*-
import numpy as np
def transposta(b):
b=[]
for i in range(0,b.shape[0],1):
for j in range(0,b.shape[1],1):
b[i,j]==a[j,i]
b[i,j]=float(input('elemento:'))
n=int(input('linas:'))
m=int(input('colunas:'))
b=b.zeros((n,m))
print(transposta(b)) | [
"[email protected]"
] | |
5bae0b9fbbefb10205ba1adbbadbfbd3ca07d7f8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_funnelled.py | 1b5788bd13ce44f929c9be42befa4a4c2f4fd558 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _FUNNELLED():
def __init__(self,):
self.name = "FUNNELLED"
self.definitions = funnel
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['funnel']
| [
"[email protected]"
] | |
f94c61f57bdbdc5ec8ae4d562de764d1a624b3f0 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/util/tf_inspect_test.py | d8ddf34a5d3a75abc3f3553698994a227450044c | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 24,023 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_inspect."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def test_decorator(decorator_name, decorator_doc=None):
def make_tf_decorator(target):
return tf_decorator.TFDecorator(decorator_name, target, decorator_doc)
return make_tf_decorator
def test_undecorated_function():
pass
@test_decorator('decorator 1')
@test_decorator('decorator 2')
@test_decorator('decorator 3')
def test_decorated_function(x):
"""Test Decorated Function Docstring."""
return x * 2
@test_decorator('decorator')
def test_decorated_function_with_defaults(a, b=2, c='Hello'):
"""Test Decorated Function With Defaults Docstring."""
return [a, b, c]
@test_decorator('decorator')
class TestDecoratedClass(object):
"""Test Decorated Class."""
def __init__(self):
pass
def two(self):
return 2
class TfInspectTest(test.TestCase):
def testCurrentFrame(self):
self.assertEqual(inspect.currentframe(), tf_inspect.currentframe())
def testGetArgSpecOnDecoratorsThatDontProvideArgspec(self):
argspec = tf_inspect.getargspec(test_decorated_function_with_defaults)
self.assertEqual(['a', 'b', 'c'], argspec.args)
self.assertEqual((2, 'Hello'), argspec.defaults)
def testGetArgSpecOnDecoratorThatChangesArgspec(self):
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
decorator = tf_decorator.TFDecorator('', test_undecorated_function, '',
argspec)
self.assertEqual(argspec, tf_inspect.getargspec(decorator))
def testGetArgSpecIgnoresDecoratorsThatDontProvideArgspec(self):
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator)
self.assertEqual(argspec, tf_inspect.getargspec(outer_decorator))
def testGetArgSpecReturnsOutermostDecoratorThatChangesArgspec(self):
outer_argspec = tf_inspect.ArgSpec(
args=['a'], varargs=None, keywords=None, defaults=None)
inner_argspec = tf_inspect.ArgSpec(
args=['b'], varargs=None, keywords=None, defaults=None)
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', inner_argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator, '',
outer_argspec)
self.assertEqual(outer_argspec, tf_inspect.getargspec(outer_decorator))
def testGetArgSpecOnPartialPositionalArgumentOnly(self):
"""Tests getargspec on partial function with only positional arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, 7)
argspec = tf_inspect.ArgSpec(
args=['n'], varargs=None, keywords=None, defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialArgumentWithConvertibleToFalse(self):
"""Tests getargspec on partial function with args that convert to False."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, m=0)
exception_message = (r"Some arguments \['n'\] do not have default value, "
"but they are positioned after those with default "
"values. This can not be expressed with ArgSpec.")
with self.assertRaisesRegexp(ValueError, exception_message):
tf_inspect.getargspec(partial_func)
def testGetArgSpecOnPartialInvalidArgspec(self):
"""Tests getargspec on partial function that doesn't have valid argspec."""
def func(m, n, l, k=4):
return 2 * m + l + n * k
partial_func = functools.partial(func, n=7)
exception_message = (r"Some arguments \['l'\] do not have default value, "
"but they are positioned after those with default "
"values. This can not be expressed with ArgSpec.")
with self.assertRaisesRegexp(ValueError, exception_message):
tf_inspect.getargspec(partial_func)
def testGetArgSpecOnPartialValidArgspec(self):
"""Tests getargspec on partial function with valid argspec."""
def func(m, n, l, k=4):
return 2 * m + l + n * k
partial_func = functools.partial(func, n=7, l=2)
argspec = tf_inspect.ArgSpec(
args=['m', 'n', 'l', 'k'],
varargs=None,
keywords=None,
defaults=(7, 2, 4))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialNoArgumentsLeft(self):
"""Tests getargspec on partial function that prunes all arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, 7, 10)
argspec = tf_inspect.ArgSpec(
args=[], varargs=None, keywords=None, defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialKeywordArgument(self):
"""Tests getargspec on partial function that prunes some arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, n=7)
argspec = tf_inspect.ArgSpec(
args=['m', 'n'], varargs=None, keywords=None, defaults=(7,))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialKeywordArgumentWithDefaultValue(self):
"""Tests getargspec on partial function that prunes argument by keyword."""
def func(m=1, n=2):
return 2 * m + n
partial_func = functools.partial(func, n=7)
argspec = tf_inspect.ArgSpec(
args=['m', 'n'], varargs=None, keywords=None, defaults=(1, 7))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithVarargs(self):
"""Tests getargspec on partial function with variable arguments."""
def func(m, *arg):
return m + len(arg)
partial_func = functools.partial(func, 7, 8)
argspec = tf_inspect.ArgSpec(
args=[], varargs='arg', keywords=None, defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithVarkwargs(self):
"""Tests getargspec on partial function with variable keyword arguments."""
def func(m, n, **kwarg):
return m * n + len(kwarg)
partial_func = functools.partial(func, 7)
argspec = tf_inspect.ArgSpec(
args=['n'], varargs=None, keywords='kwarg', defaults=None)
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithDecorator(self):
"""Tests getargspec on decorated partial function."""
@test_decorator('decorator')
def func(m=1, n=2):
return 2 * m + n
partial_func = functools.partial(func, n=7)
argspec = tf_inspect.ArgSpec(
args=['m', 'n'], varargs=None, keywords=None, defaults=(1, 7))
self.assertEqual(argspec, tf_inspect.getargspec(partial_func))
def testGetArgSpecOnPartialWithDecoratorThatChangesArgspec(self):
"""Tests getargspec on partial function with decorated argspec."""
argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
decorator = tf_decorator.TFDecorator('', test_undecorated_function, '',
argspec)
partial_argspec = tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(2, 1, 'hello'))
partial_with_decorator = functools.partial(decorator, a=2)
self.assertEqual(argspec, tf_inspect.getargspec(decorator))
self.assertEqual(partial_argspec,
tf_inspect.getargspec(partial_with_decorator))
def testGetArgSpecOnCallableObject(self):
class Callable(object):
def __call__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.ArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
test_obj = Callable()
self.assertEqual(argspec, tf_inspect.getargspec(test_obj))
def testGetArgSpecOnInitClass(self):
class InitClass(object):
def __init__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.ArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
self.assertEqual(argspec, tf_inspect.getargspec(InitClass))
def testGetArgSpecOnNewClass(self):
class NewClass(object):
def __new__(cls, a, b=1, c='hello'):
pass
argspec = tf_inspect.ArgSpec(
args=['cls', 'a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(1, 'hello'))
self.assertEqual(argspec, tf_inspect.getargspec(NewClass))
def testGetFullArgSpecOnDecoratorsThatDontProvideFullArgSpec(self):
argspec = tf_inspect.getfullargspec(test_decorated_function_with_defaults)
self.assertEqual(['a', 'b', 'c'], argspec.args)
self.assertEqual((2, 'Hello'), argspec.defaults)
def testGetFullArgSpecOnDecoratorThatChangesFullArgSpec(self):
argspec = tf_inspect.FullArgSpec(
args=['a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
decorator = tf_decorator.TFDecorator('', test_undecorated_function, '',
argspec)
self.assertEqual(argspec, tf_inspect.getfullargspec(decorator))
def testGetFullArgSpecIgnoresDecoratorsThatDontProvideFullArgSpec(self):
argspec = tf_inspect.FullArgSpec(
args=['a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator)
self.assertEqual(argspec, tf_inspect.getfullargspec(outer_decorator))
def testGetFullArgSpecReturnsOutermostDecoratorThatChangesFullArgSpec(self):
outer_argspec = tf_inspect.FullArgSpec(
args=['a'],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
inner_argspec = tf_inspect.FullArgSpec(
args=['b'],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
inner_decorator = tf_decorator.TFDecorator('', test_undecorated_function,
'', inner_argspec)
outer_decorator = tf_decorator.TFDecorator('', inner_decorator, '',
outer_argspec)
self.assertEqual(outer_argspec, tf_inspect.getfullargspec(outer_decorator))
def testGetFullArgsSpecForPartial(self):
def func(a, b):
del a, b
partial_function = functools.partial(func, 1)
argspec = tf_inspect.FullArgSpec(
args=['b'],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_function))
def testGetFullArgSpecOnPartialNoArgumentsLeft(self):
"""Tests getfullargspec on partial function that prunes all arguments."""
def func(m, n):
return 2 * m + n
partial_func = functools.partial(func, 7, 10)
argspec = tf_inspect.FullArgSpec(
args=[],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_func))
def testGetFullArgSpecOnPartialWithVarargs(self):
"""Tests getfullargspec on partial function with variable arguments."""
def func(m, *arg):
return m + len(arg)
partial_func = functools.partial(func, 7, 8)
argspec = tf_inspect.FullArgSpec(
args=[],
varargs='arg',
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_func))
def testGetFullArgSpecOnPartialWithVarkwargs(self):
"""Tests getfullargspec.
Tests on partial function with variable keyword arguments.
"""
def func(m, n, **kwarg):
return m * n + len(kwarg)
partial_func = functools.partial(func, 7)
argspec = tf_inspect.FullArgSpec(
args=['n'],
varargs=None,
varkw='kwarg',
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(partial_func))
def testGetFullArgSpecOnCallableObject(self):
class Callable(object):
def __call__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.FullArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
test_obj = Callable()
self.assertEqual(argspec, tf_inspect.getfullargspec(test_obj))
def testGetFullArgSpecOnInitClass(self):
class InitClass(object):
def __init__(self, a, b=1, c='hello'):
pass
argspec = tf_inspect.FullArgSpec(
args=['self', 'a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(InitClass))
def testGetFullArgSpecOnNewClass(self):
class NewClass(object):
def __new__(cls, a, b=1, c='hello'):
pass
argspec = tf_inspect.FullArgSpec(
args=['cls', 'a', 'b', 'c'],
varargs=None,
varkw=None,
defaults=(1, 'hello'),
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
self.assertEqual(argspec, tf_inspect.getfullargspec(NewClass))
def testGetDoc(self):
self.assertEqual('Test Decorated Function With Defaults Docstring.',
tf_inspect.getdoc(test_decorated_function_with_defaults))
def testGetFile(self):
self.assertTrue('tf_inspect_test.py' in tf_inspect.getfile(
test_decorated_function_with_defaults))
self.assertTrue('tf_decorator.py' in tf_inspect.getfile(
test_decorator('decorator')(tf_decorator.unwrap)))
def testGetMembers(self):
self.assertEqual(
inspect.getmembers(TestDecoratedClass),
tf_inspect.getmembers(TestDecoratedClass))
def testGetModule(self):
self.assertEqual(
inspect.getmodule(TestDecoratedClass),
tf_inspect.getmodule(TestDecoratedClass))
self.assertEqual(
inspect.getmodule(test_decorated_function),
tf_inspect.getmodule(test_decorated_function))
self.assertEqual(
inspect.getmodule(test_undecorated_function),
tf_inspect.getmodule(test_undecorated_function))
def testGetSource(self):
expected = '''@test_decorator('decorator')
def test_decorated_function_with_defaults(a, b=2, c='Hello'):
"""Test Decorated Function With Defaults Docstring."""
return [a, b, c]
'''
self.assertEqual(
expected, tf_inspect.getsource(test_decorated_function_with_defaults))
def testGetSourceFile(self):
self.assertEqual(
__file__,
tf_inspect.getsourcefile(test_decorated_function_with_defaults))
def testGetSourceLines(self):
expected = inspect.getsourcelines(
test_decorated_function_with_defaults.decorated_target)
self.assertEqual(
expected,
tf_inspect.getsourcelines(test_decorated_function_with_defaults))
def testIsBuiltin(self):
self.assertEqual(
tf_inspect.isbuiltin(TestDecoratedClass),
inspect.isbuiltin(TestDecoratedClass))
self.assertEqual(
tf_inspect.isbuiltin(test_decorated_function),
inspect.isbuiltin(test_decorated_function))
self.assertEqual(
tf_inspect.isbuiltin(test_undecorated_function),
inspect.isbuiltin(test_undecorated_function))
self.assertEqual(tf_inspect.isbuiltin(range), inspect.isbuiltin(range))
self.assertEqual(tf_inspect.isbuiltin(max), inspect.isbuiltin(max))
def testIsClass(self):
self.assertTrue(tf_inspect.isclass(TestDecoratedClass))
self.assertFalse(tf_inspect.isclass(test_decorated_function))
def testIsFunction(self):
self.assertTrue(tf_inspect.isfunction(test_decorated_function))
self.assertFalse(tf_inspect.isfunction(TestDecoratedClass))
def testIsMethod(self):
self.assertTrue(tf_inspect.ismethod(TestDecoratedClass().two))
self.assertFalse(tf_inspect.ismethod(test_decorated_function))
def testIsModule(self):
self.assertTrue(
tf_inspect.ismodule(inspect.getmodule(inspect.currentframe())))
self.assertFalse(tf_inspect.ismodule(test_decorated_function))
def testIsRoutine(self):
self.assertTrue(tf_inspect.isroutine(len))
self.assertFalse(tf_inspect.isroutine(TestDecoratedClass))
def testStack(self):
expected_stack = inspect.stack()
actual_stack = tf_inspect.stack()
self.assertEqual(len(expected_stack), len(actual_stack))
self.assertEqual(expected_stack[0][0], actual_stack[0][0]) # Frame object
self.assertEqual(expected_stack[0][1], actual_stack[0][1]) # Filename
self.assertEqual(expected_stack[0][2],
actual_stack[0][2] - 1) # Line number
self.assertEqual(expected_stack[0][3], actual_stack[0][3]) # Function name
self.assertEqual(expected_stack[1:], actual_stack[1:])
class TfInspectGetCallArgsTest(test.TestCase):
def testReturnsEmptyWhenUnboundFuncHasNoParameters(self):
def empty():
pass
self.assertEqual({}, tf_inspect.getcallargs(empty))
def testClashingParameterNames(self):
def func(positional, func=1, func_and_positional=2, kwargs=3):
return positional, func, func_and_positional, kwargs
kwargs = {}
self.assertEqual(
tf_inspect.getcallargs(func, 0, **kwargs), {
'positional': 0,
'func': 1,
'func_and_positional': 2,
'kwargs': 3
})
kwargs = dict(func=4, func_and_positional=5, kwargs=6)
self.assertEqual(
tf_inspect.getcallargs(func, 0, **kwargs), {
'positional': 0,
'func': 4,
'func_and_positional': 5,
'kwargs': 6
})
def testUnboundFuncWithOneParamPositional(self):
def func(a):
return a
self.assertEqual({'a': 5}, tf_inspect.getcallargs(func, 5))
def testUnboundFuncWithTwoParamsPositional(self):
def func(a, b):
return (a, b)
self.assertEqual({'a': 10, 'b': 20}, tf_inspect.getcallargs(func, 10, 20))
def testUnboundFuncWithOneParamKeyword(self):
def func(a):
return a
self.assertEqual({'a': 5}, tf_inspect.getcallargs(func, a=5))
def testUnboundFuncWithTwoParamsKeyword(self):
def func(a, b):
return (a, b)
self.assertEqual({'a': 6, 'b': 7}, tf_inspect.getcallargs(func, a=6, b=7))
def testUnboundFuncWithOneParamDefault(self):
def func(a=13):
return a
self.assertEqual({'a': 13}, tf_inspect.getcallargs(func))
def testUnboundFuncWithOneParamDefaultOnePositional(self):
def func(a=0):
return a
self.assertEqual({'a': 1}, tf_inspect.getcallargs(func, 1))
def testUnboundFuncWithTwoParamsDefaultOnePositional(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 5, 'b': 2}, tf_inspect.getcallargs(func, 5))
def testUnboundFuncWithTwoParamsDefaultTwoPositional(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 4}, tf_inspect.getcallargs(func, 3, 4))
def testUnboundFuncWithOneParamDefaultOneKeyword(self):
def func(a=1):
return a
self.assertEqual({'a': 3}, tf_inspect.getcallargs(func, a=3))
def testUnboundFuncWithTwoParamsDefaultOneKeywordFirst(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 2}, tf_inspect.getcallargs(func, a=3))
def testUnboundFuncWithTwoParamsDefaultOneKeywordSecond(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 1, 'b': 4}, tf_inspect.getcallargs(func, b=4))
def testUnboundFuncWithTwoParamsDefaultTwoKeywords(self):
def func(a=1, b=2):
return (a, b)
self.assertEqual({'a': 3, 'b': 4}, tf_inspect.getcallargs(func, a=3, b=4))
def testBoundFuncWithOneParam(self):
class Test(object):
def bound(self):
pass
t = Test()
self.assertEqual({'self': t}, tf_inspect.getcallargs(t.bound))
def testBoundFuncWithManyParamsAndDefaults(self):
class Test(object):
def bound(self, a, b=2, c='Hello'):
return (a, b, c)
t = Test()
self.assertEqual({
'self': t,
'a': 3,
'b': 2,
'c': 'Goodbye'
}, tf_inspect.getcallargs(t.bound, 3, c='Goodbye'))
def testClassMethod(self):
class Test(object):
@classmethod
def test(cls, a, b=3, c='hello'):
return (a, b, c)
self.assertEqual({
'cls': Test,
'a': 5,
'b': 3,
'c': 'goodbye'
}, tf_inspect.getcallargs(Test.test, 5, c='goodbye'))
def testUsesOutermostDecoratorsArgSpec(self):
def func():
pass
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
decorated = tf_decorator.make_decorator(
func,
wrapper,
decorator_argspec=tf_inspect.ArgSpec(
args=['a', 'b', 'c'],
varargs=None,
keywords=None,
defaults=(3, 'hello')))
self.assertEqual({
'a': 4,
'b': 3,
'c': 'goodbye'
}, tf_inspect.getcallargs(decorated, 4, c='goodbye'))
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
fdbc362014380cedc9ec820a8c28b657237f2444 | dd8511e1209646823f7ec2a2ce669171f9b0a5cc | /plato/tools/common/test_symbolic_predictors.py | 3dfd88b375367eee00f840f7afbef08a991bd643 | [] | no_license | codeaudit/plato | 9e8df28e589d6c43aef2271e9f940076ef4a143d | ba19f92a42729e9d3cf5da05746dead83db3f41c | refs/heads/master | 2021-01-12T11:32:34.796675 | 2016-10-07T14:05:00 | 2016-10-07T14:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | from plato.tools.mlp.mlp import MultiLayerPerceptron
from plato.tools.optimization.cost import negative_log_likelihood_dangerous
from plato.tools.common.online_predictors import GradientBasedPredictor
from plato.tools.optimization.optimizers import SimpleGradientDescent
from utils.benchmarks.train_and_test import percent_argmax_correct
from utils.tools.iteration import zip_minibatch_iterate
from utils.datasets.synthetic_clusters import get_synthetic_clusters_dataset
__author__ = 'peter'
def test_symbolic_predicors():
"""
This test is meant to serves as both a test and tutorial for how to use a symbolic predictor.
It shows how to construct a symbolic predictor using a function, cost function, and optimizer.
It then trains this predictor on a synthetic toy dataset and demonstrates that it has learned.
"""
dataset = get_synthetic_clusters_dataset()
symbolic_predictor = GradientBasedPredictor(
function = MultiLayerPerceptron.from_init(
layer_sizes = [dataset.input_size, 100, dataset.n_categories],
output_activation='softmax',
w_init = 0.1,
rng = 3252
),
cost_function=negative_log_likelihood_dangerous,
optimizer=SimpleGradientDescent(eta = 0.1),
)
predictor = symbolic_predictor.compile()
# .compile() turns the symbolic predictor into an IPredictor object, which can be called with numpy arrays.
init_score = percent_argmax_correct(predictor.predict(dataset.test_set.input), dataset.test_set.target)
for x_m, y_m in zip_minibatch_iterate([dataset.training_set.input, dataset.training_set.target], minibatch_size=10, n_epochs=20):
predictor.train(x_m, y_m)
final_score = percent_argmax_correct(predictor.predict(dataset.test_set.input), dataset.test_set.target)
print 'Initial score: %s%%. Final score: %s%%' % (init_score, final_score)
assert init_score < 30
assert final_score > 98
if __name__ == '__main__':
test_symbolic_predicors()
| [
"[email protected]"
] | |
e8d66db9f808801439a592d691e8ab8563e3eb6b | 1eb7fa8b1745d4e51cefb4eceb44621862516aa6 | /Company Interview/FB/SerializeAndDeserialize.py | 33ca3c4b67d0cd6cb974244469491f275d5f25f1 | [] | no_license | geniousisme/CodingInterview | bd93961d728f1fe266ad5edf91adc5d024e5ca48 | a64bca9c07a7be8d4060c4b96e89d8d429a7f1a3 | refs/heads/master | 2021-01-10T11:15:31.305787 | 2017-03-06T00:03:13 | 2017-03-06T00:03:13 | 43,990,453 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec(object):
def serialize(self, root):
def serializer(root):
if root is None:
res.append('#')
return
res.append(str(root.val))
serializer(root.left)
serializer(root.right)
res = []
serializer(root)
return ' '.join(res)
def deserialize(self, data):
def deserializer():
node_val = next(node_vals)
if node_val == '#':
return None
node = TreeNode(int(node_val))
node.left = deserializer()
node.right = deserializer()
return node
node_vals = iter(data.split())
return deserializer()
if __name__ == "__main__":
codec = Codec()
t1 = TreeNode(1)
t2 = TreeNode(2)
t3 = TreeNode(3)
t4 = TreeNode(4)
t5 = TreeNode(5)
t6 = TreeNode(6)
t7 = TreeNode(7)
t1.left = t2
t1.right = t3
t2.left = t4
t2.right = t5
t3.left = t6
t3.right = t7
print codec.serialize(t1)
| [
"[email protected]"
] | |
3ea21ddb2ae9b03c9181387ce1af7ce6b652d1cb | e18b3cb22c09cb6b2fff7555eeaeddba1513ac1f | /python_stack/flask_fundamentals/whats_my_name/app.py | 4bafe3b4c5f7d1bd350de8f8e61c352ad859b72d | [] | no_license | LawerenceLee/coding_dojo_projects | e71760850f3164fbd217004d0ea2f38c5bddd2d8 | 099b1f862ec520bab93c58235151680bb74c0bf6 | refs/heads/master | 2021-05-10T16:11:40.466412 | 2018-05-09T02:24:04 | 2018-05-09T02:24:04 | 118,569,970 | 1 | 1 | null | 2018-03-24T22:37:58 | 2018-01-23T06:48:41 | Python | UTF-8 | Python | false | false | 417 | py | from flask import (Flask, render_template, redirect, request, url_for)
DEBUG = True
PORT = 8000
HOST = "0.0.0.0"
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route("/process", methods=['POST'])
def process():
print(request.form['your-name'])
return redirect(url_for("index"))
if __name__ == "__main__":
app.run(debug=DEBUG, host=HOST, port=PORT) | [
"[email protected]"
] | |
8b648420b85b3333e48cd30d97adff051fcc4d67 | 2a1969afe3818412140efb25921f35610dd9023d | /python/pythonGUI/PyQt5/my note/examples/3_3_box_layout_vbox_hbox.py | dedee22f54e3d8a2e9f2906c1cc1aad0231bdc2f | [] | no_license | Light2077/LightNote | 149cf42089f15bbebd62e27fe5aa6afe67f25779 | cd733014f8be44207d624a5fd02dfddcd776aad1 | refs/heads/master | 2023-09-01T07:49:05.494481 | 2023-08-24T10:00:09 | 2023-08-24T10:00:09 | 224,410,710 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | import sys
from PyQt5.QtWidgets import (
QApplication,
QMainWindow,
QWidget,
QVBoxLayout,
QHBoxLayout,
QPushButton,
)
class Example(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setGeometry(200, 200, 300, 150)
self.setWindowTitle("VBox+HBox")
vbox = QVBoxLayout()
btn1 = QPushButton("A", self)
btn2 = QPushButton("B", self)
btn3 = QPushButton("C", self)
vbox.addWidget(btn1)
vbox.addWidget(btn2)
vbox.addWidget(btn3)
hbox = QHBoxLayout()
btn4 = QPushButton("D", self)
btn5 = QPushButton("E", self)
btn6 = QPushButton("F", self)
hbox.addWidget(btn4)
hbox.addWidget(btn5)
hbox.addWidget(btn6)
vbox.addLayout(hbox)
w = QWidget()
self.setCentralWidget(w)
w.setLayout(vbox)
self.show()
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
fc7df984c9bb03b366acaf1ce8f789b720508ac0 | e0045eec29aab56212c00f9293a21eb3b4b9fe53 | /purchase_stock/models/purchase.py | 9489f3eb572c7eb8f7d435338ab2b54fb00278bf | [] | no_license | tamam001/ALWAFI_P1 | a3a9268081b9befc668a5f51c29ce5119434cc21 | 402ea8687c607fbcb5ba762c2020ebc4ee98e705 | refs/heads/master | 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,123 | py | # -*- coding: utf-8 -*-
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.tools.float_utils import float_compare
from odoo.exceptions import UserError
from odoo.addons.purchase.models.purchase import PurchaseOrder as Purchase
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
@api.model
def _default_picking_type(self):
type_obj = self.env['stock.picking.type']
company_id = self.env.context.get('company_id') or self.env.user.company_id.id
types = type_obj.search([('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)])
if not types:
types = type_obj.search([('code', '=', 'incoming'), ('warehouse_id', '=', False)])
return types[:1]
incoterm_id = fields.Many2one('account.incoterms', 'Incoterm', states={'done': [('readonly', True)]}, help="International Commercial Terms are a series of predefined commercial terms used in international transactions.")
picking_count = fields.Integer(compute='_compute_picking', string='Picking count', default=0, store=True)
picking_ids = fields.Many2many('stock.picking', compute='_compute_picking', string='Receptions', copy=False, store=True)
picking_type_id = fields.Many2one('stock.picking.type', 'Deliver To', states=Purchase.READONLY_STATES, required=True, default=_default_picking_type,
help="This will determine operation type of incoming shipment")
default_location_dest_id_usage = fields.Selection(related='picking_type_id.default_location_dest_id.usage', string='Destination Location Type',
help="Technical field used to display the Drop Ship Address", readonly=True)
group_id = fields.Many2one('procurement.group', string="Procurement Group", copy=False)
is_shipped = fields.Boolean(compute="_compute_is_shipped")
@api.depends('order_line.move_ids.returned_move_ids',
'order_line.move_ids.state',
'order_line.move_ids.picking_id')
def _compute_picking(self):
for order in self:
pickings = self.env['stock.picking']
for line in order.order_line:
# We keep a limited scope on purpose. Ideally, we should also use move_orig_ids and
# do some recursive search, but that could be prohibitive if not done correctly.
moves = line.move_ids | line.move_ids.mapped('returned_move_ids')
pickings |= moves.mapped('picking_id')
order.picking_ids = pickings
order.picking_count = len(pickings)
@api.depends('picking_ids', 'picking_ids.state')
def _compute_is_shipped(self):
for order in self:
if order.picking_ids and all([x.state in ['done', 'cancel'] for x in order.picking_ids]):
order.is_shipped = True
@api.onchange('picking_type_id')
def _onchange_picking_type_id(self):
if self.picking_type_id.default_location_dest_id.usage != 'customer':
self.dest_address_id = False
# --------------------------------------------------
# CRUD
# --------------------------------------------------
def write(self, vals):
if vals.get('order_line') and self.state == 'purchase':
for order in self:
pre_order_line_qty = {order_line: order_line.product_qty for order_line in order.mapped('order_line')}
res = super(PurchaseOrder, self).write(vals)
if vals.get('order_line') and self.state == 'purchase':
for order in self:
to_log = {}
for order_line in order.order_line:
if pre_order_line_qty.get(order_line, False) and float_compare(pre_order_line_qty[order_line], order_line.product_qty, precision_rounding=order_line.product_uom.rounding) > 0:
to_log[order_line] = (order_line.product_qty, pre_order_line_qty[order_line])
if to_log:
order._log_decrease_ordered_quantity(to_log)
return res
# --------------------------------------------------
# Actions
# --------------------------------------------------
@api.multi
def button_approve(self, force=False):
result = super(PurchaseOrder, self).button_approve(force=force)
self._create_picking()
return result
@api.multi
def button_cancel(self):
for order in self:
for pick in order.picking_ids:
if pick.state == 'done':
raise UserError(_('Unable to cancel purchase order %s as some receptions have already been done.') % (order.name))
# If the product is MTO, change the procure_method of the the closest move to purchase to MTS.
# The purpose is to link the po that the user will manually generate to the existing moves's chain.
if order.state in ('draft', 'sent', 'to approve'):
for order_line in order.order_line:
if order_line.move_dest_ids:
move_dest_ids = order_line.move_dest_ids.filtered(lambda m: m.state not in ('done', 'cancel'))
siblings_states = (move_dest_ids.mapped('move_orig_ids')).mapped('state')
if all(state in ('done', 'cancel') for state in siblings_states):
move_dest_ids.write({'procure_method': 'make_to_stock'})
move_dest_ids._recompute_state()
for pick in order.picking_ids.filtered(lambda r: r.state != 'cancel'):
pick.action_cancel()
order.order_line.write({'move_dest_ids':[(5,0,0)]})
return super(PurchaseOrder, self).button_cancel()
@api.multi
def action_view_picking(self):
""" This function returns an action that display existing picking orders of given purchase order ids. When only one found, show the picking immediately.
"""
action = self.env.ref('stock.action_picking_tree_all')
result = action.read()[0]
# override the context to get rid of the default filtering on operation type
result['context'] = {}
pick_ids = self.mapped('picking_ids')
# choose the view_mode accordingly
if not pick_ids or len(pick_ids) > 1:
result['domain'] = "[('id','in',%s)]" % (pick_ids.ids)
elif len(pick_ids) == 1:
res = self.env.ref('stock.view_picking_form', False)
result['views'] = [(res and res.id or False, 'form')]
result['res_id'] = pick_ids.id
return result
# --------------------------------------------------
# Business methods
# --------------------------------------------------
def _log_decrease_ordered_quantity(self, purchase_order_lines_quantities):
def _keys_in_sorted(move):
""" sort by picking and the responsible for the product the
move.
"""
return (move.picking_id.id, move.product_id.responsible_id.id)
def _keys_in_groupby(move):
""" group by picking and the responsible for the product the
move.
"""
return (move.picking_id, move.product_id.responsible_id)
def _render_note_exception_quantity_po(order_exceptions):
order_line_ids = self.env['purchase.order.line'].browse([order_line.id for order in order_exceptions.values() for order_line in order[0]])
purchase_order_ids = order_line_ids.mapped('order_id')
move_ids = self.env['stock.move'].concat(*rendering_context.keys())
impacted_pickings = move_ids.mapped('picking_id')._get_impacted_pickings(move_ids) - move_ids.mapped('picking_id')
values = {
'purchase_order_ids': purchase_order_ids,
'order_exceptions': order_exceptions.values(),
'impacted_pickings': impacted_pickings,
}
return self.env.ref('purchase_stock.exception_on_po').render(values=values)
documents = self.env['stock.picking']._log_activity_get_documents(purchase_order_lines_quantities, 'move_ids', 'DOWN', _keys_in_sorted, _keys_in_groupby)
filtered_documents = {}
for (parent, responsible), rendering_context in documents.items():
if parent._name == 'stock.picking':
if parent.state == 'cancel':
continue
filtered_documents[(parent, responsible)] = rendering_context
self.env['stock.picking']._log_activity(_render_note_exception_quantity_po, filtered_documents)
@api.multi
def _get_destination_location(self):
self.ensure_one()
if self.dest_address_id:
return self.dest_address_id.property_stock_customer.id
return self.picking_type_id.default_location_dest_id.id
@api.model
def _prepare_picking(self):
if not self.group_id:
self.group_id = self.group_id.create({
'name': self.name,
'partner_id': self.partner_id.id
})
if not self.partner_id.property_stock_supplier.id:
raise UserError(_("You must set a Vendor Location for this partner %s") % self.partner_id.name)
return {
'picking_type_id': self.picking_type_id.id,
'partner_id': self.partner_id.id,
'date': self.date_order,
'origin': self.name,
'location_dest_id': self._get_destination_location(),
'location_id': self.partner_id.property_stock_supplier.id,
'company_id': self.company_id.id,
}
@api.multi
def _create_picking(self):
StockPicking = self.env['stock.picking']
for order in self:
if any([ptype in ['product', 'consu'] for ptype in order.order_line.mapped('product_id.type')]):
pickings = order.picking_ids.filtered(lambda x: x.state not in ('done', 'cancel'))
if not pickings:
res = order._prepare_picking()
picking = StockPicking.create(res)
else:
picking = pickings[0]
moves = order.order_line._create_stock_moves(picking)
moves = moves.filtered(lambda x: x.state not in ('done', 'cancel'))._action_confirm()
seq = 0
for move in sorted(moves, key=lambda move: move.date_expected):
seq += 5
move.sequence = seq
moves._action_assign()
picking.message_post_with_view('mail.message_origin_link',
values={'self': picking, 'origin': order},
subtype_id=self.env.ref('mail.mt_note').id)
return True
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
move_ids = fields.One2many('stock.move', 'purchase_line_id', string='Reservation', readonly=True, ondelete='set null', copy=False)
orderpoint_id = fields.Many2one('stock.warehouse.orderpoint', 'Orderpoint')
move_dest_ids = fields.One2many('stock.move', 'created_purchase_line_id', 'Downstream Moves')
@api.model
def create(self, values):
line = super(PurchaseOrderLine, self).create(values)
if line.order_id.state == 'purchase':
line._create_or_update_picking()
return line
@api.multi
def write(self, values):
result = super(PurchaseOrderLine, self).write(values)
# Update expected date of corresponding moves
if 'date_planned' in values:
self.env['stock.move'].search([
('purchase_line_id', 'in', self.ids), ('state', '!=', 'done')
]).write({'date_expected': values['date_planned']})
if 'product_qty' in values:
self.filtered(lambda l: l.order_id.state == 'purchase')._create_or_update_picking()
return result
# --------------------------------------------------
# Business methods
# --------------------------------------------------
@api.multi
def _create_or_update_picking(self):
for line in self:
if line.product_id.type in ('product', 'consu'):
# Prevent decreasing below received quantity
if float_compare(line.product_qty, line.qty_received, line.product_uom.rounding) < 0:
raise UserError(_('You cannot decrease the ordered quantity below the received quantity.\n'
'Create a return first.'))
if float_compare(line.product_qty, line.qty_invoiced, line.product_uom.rounding) == -1:
# If the quantity is now below the invoiced quantity, create an activity on the vendor bill
# inviting the user to create a refund.
activity = self.env['mail.activity'].sudo().create({
'activity_type_id': self.env.ref('mail.mail_activity_data_todo').id,
'note': _('The quantities on your purchase order indicate less than billed. You should ask for a refund. '),
'res_id': line.invoice_lines[0].invoice_id.id,
'res_model_id': self.env.ref('account.model_account_invoice').id,
})
activity._onchange_activity_type_id()
# If the user increased quantity of existing line or created a new line
pickings = line.order_id.picking_ids.filtered(lambda x: x.state not in ('done', 'cancel') and x.location_dest_id.usage in ('internal', 'transit'))
picking = pickings and pickings[0] or False
if not picking:
res = line.order_id._prepare_picking()
picking = self.env['stock.picking'].create(res)
move_vals = line._prepare_stock_moves(picking)
for move_val in move_vals:
self.env['stock.move']\
.create(move_val)\
._action_confirm()\
._action_assign()
@api.multi
def _get_stock_move_price_unit(self):
self.ensure_one()
line = self[0]
order = line.order_id
price_unit = line.price_unit
if line.taxes_id:
price_unit = line.taxes_id.with_context(round=False).compute_all(
price_unit, currency=line.order_id.currency_id, quantity=1.0, product=line.product_id, partner=line.order_id.partner_id
)['total_excluded']
if line.product_uom.id != line.product_id.uom_id.id:
price_unit *= line.product_uom.factor / line.product_id.uom_id.factor
if order.currency_id != order.company_id.currency_id:
price_unit = order.currency_id._convert(
price_unit, order.company_id.currency_id, self.company_id, self.date_order or fields.Date.today(), round=False)
return price_unit
@api.multi
def _prepare_stock_moves(self, picking):
""" Prepare the stock moves data for one order line. This function returns a list of
dictionary ready to be used in stock.move's create()
"""
self.ensure_one()
res = []
if self.product_id.type not in ['product', 'consu']:
return res
qty = 0.0
price_unit = self._get_stock_move_price_unit()
for move in self.move_ids.filtered(lambda x: x.state != 'cancel' and not x.location_dest_id.usage == "supplier"):
qty += move.product_uom._compute_quantity(move.product_uom_qty, self.product_uom, rounding_method='HALF-UP')
template = {
'name': self.name or '',
'product_id': self.product_id.id,
'product_uom': self.product_uom.id,
'date': self.order_id.date_order,
'date_expected': self.date_planned,
'location_id': self.order_id.partner_id.property_stock_supplier.id,
'location_dest_id': self.order_id._get_destination_location(),
'picking_id': picking.id,
'partner_id': self.order_id.dest_address_id.id,
'move_dest_ids': [(4, x) for x in self.move_dest_ids.ids],
'state': 'draft',
'purchase_line_id': self.id,
'company_id': self.order_id.company_id.id,
'price_unit': price_unit,
'picking_type_id': self.order_id.picking_type_id.id,
'group_id': self.order_id.group_id.id,
'origin': self.order_id.name,
'route_ids': self.order_id.picking_type_id.warehouse_id and [(6, 0, [x.id for x in self.order_id.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id': self.order_id.picking_type_id.warehouse_id.id,
}
diff_quantity = self.product_qty - qty
if float_compare(diff_quantity, 0.0, precision_rounding=self.product_uom.rounding) > 0:
quant_uom = self.product_id.uom_id
get_param = self.env['ir.config_parameter'].sudo().get_param
if self.product_uom.id != quant_uom.id and get_param('stock.propagate_uom') != '1':
product_qty = self.product_uom._compute_quantity(diff_quantity, quant_uom, rounding_method='HALF-UP')
template['product_uom'] = quant_uom.id
template['product_uom_qty'] = product_qty
else:
template['product_uom_qty'] = diff_quantity
res.append(template)
return res
@api.multi
def _create_stock_moves(self, picking):
values = []
for line in self:
for val in line._prepare_stock_moves(picking):
values.append(val)
return self.env['stock.move'].create(values)
def _update_received_qty(self):
for line in self:
total = 0.0
for move in line.move_ids:
if move.state == 'done':
if move.location_dest_id.usage == "supplier":
if move.to_refund:
total -= move.product_uom._compute_quantity(move.product_uom_qty, line.product_uom)
elif move.origin_returned_move_id._is_dropshipped() and not move._is_dropshipped_returned():
# Edge case: the dropship is returned to the stock, no to the supplier.
# In this case, the received quantity on the PO is set although we didn't
# receive the product physically in our stock. To avoid counting the
# quantity twice, we do nothing.
pass
else:
total += move.product_uom._compute_quantity(move.product_uom_qty, line.product_uom)
line.qty_received = total
def _merge_in_existing_line(self, product_id, product_qty, product_uom, location_id, name, origin, values):
""" This function purpose is to be override with the purpose to forbide _run_buy method
to merge a new po line in an existing one.
"""
return True
| [
"[email protected]"
] | |
223995c1aa28ee01b0a3621450a20580925c9a85 | f8ff84f02d6dfa66d003890c4f51ea575232ba93 | /cinder/cinder/tests/unit/test_huawei_drivers.py | 52bf49c2f41a37016e480d8a8f05c0c4cacec09b | [
"Apache-2.0"
] | permissive | zarson/stack | 8d341463bdf0136447bf1ada5be943df8ba55a4b | 827003bc566ed992f754618063a771694e51cfca | refs/heads/master | 2021-06-03T00:49:19.075199 | 2016-05-12T07:45:35 | 2016-05-12T07:45:35 | 58,616,957 | 0 | 1 | null | 2020-07-24T01:59:08 | 2016-05-12T07:08:17 | Python | UTF-8 | Python | false | false | 148,572 | py | # Copyright (c) 2016 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for huawei drivers."""
import copy
import ddt
import json
import mock
import re
import tempfile
import time
from xml.dom import minidom
from oslo_log import log as logging
from cinder import exception
from cinder import test
from cinder.tests.unit import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import fc_zone_helper
from cinder.volume.drivers.huawei import huawei_conf
from cinder.volume.drivers.huawei import huawei_driver
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import hypermetro
from cinder.volume.drivers.huawei import replication
from cinder.volume.drivers.huawei import rest_client
from cinder.volume.drivers.huawei import smartx
LOG = logging.getLogger(__name__)
hypermetro_devices = """{
"remote_device": {
"RestURL": "http://192.0.2.69:8082/deviceManager/rest",
"UserName": "admin",
"UserPassword": "Admin@storage1",
"StoragePool": "StoragePool001",
"domain_name": "hypermetro-domain",
"remote_target_ip": "192.0.2.241"
}
}
"""
test_volume = {
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu001@backend001#OpenStack_Pool',
'provider_location': '11',
'status': 'available',
'admin_metadata': {'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'},
}
fake_smartx_value = {'smarttier': 'true',
'smartcache': 'true',
'smartpartition': 'true',
'thin_provisioning_support': 'true',
'thick_provisioning_support': False,
'policy': '2',
'cachename': 'cache-test',
'partitionname': 'partition-test',
}
fake_hypermetro_opts = {'hypermetro': 'true',
'smarttier': False,
'smartcache': False,
'smartpartition': False,
'thin_provisioning_support': False,
'thick_provisioning_support': False,
}
hyper_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu@huawei#OpenStack_Pool',
'provider_location': '11',
'volume_metadata': [{'key': 'hypermetro_id',
'value': '1'},
{'key': 'remote_lun_id',
'value': '11'}],
'admin_metadata': {},
}
sync_replica_specs = {'replication_enabled': '<is> True',
'replication_type': '<in> sync'}
async_replica_specs = {'replication_enabled': '<is> True',
'replication_type': '<in> async'}
TEST_PAIR_ID = "3400a30d844d0004"
replication_volume = {
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu@huawei#OpenStack_Pool',
'provider_location': '11',
'admin_metadata': {'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'},
'replication_status': 'disabled',
'replication_driver_data':
'{"pair_id": "%s", "rmt_lun_id": "1"}' % TEST_PAIR_ID,
}
test_snap = {
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 1,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'provider_location': '11',
'volume': {'provider_location': '12',
'admin_metadata': {
'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'}},
}
test_host = {'host': 'ubuntu001@backend001#OpenStack_Pool',
'capabilities': {'smartcache': True,
'location_info': '210235G7J20000000000',
'QoS_support': True,
'pool_name': 'OpenStack_Pool',
'timestamp': '2015-07-13T11:41:00.513549',
'smartpartition': True,
'allocated_capacity_gb': 0,
'volume_backend_name': 'HuaweiFCDriver',
'free_capacity_gb': 20.0,
'driver_version': '1.1.0',
'total_capacity_gb': 20.0,
'smarttier': True,
'hypermetro': True,
'reserved_percentage': 0,
'vendor_name': None,
'thick_provisioning_support': False,
'thin_provisioning_support': True,
'storage_protocol': 'FC',
}
}
test_new_type = {
'name': u'new_type',
'qos_specs_id': None,
'deleted': False,
'created_at': None,
'updated_at': None,
'extra_specs': {
'smarttier': '<is> true',
'smartcache': '<is> true',
'smartpartition': '<is> true',
'thin_provisioning_support': '<is> true',
'thick_provisioning_support': '<is> False',
'policy': '2',
'smartcache:cachename': 'cache-test',
'smartpartition:partitionname': 'partition-test',
},
'is_public': True,
'deleted_at': None,
'id': u'530a56e1-a1a4-49f3-ab6c-779a6e5d999f',
'description': None,
}
test_new_replication_type = {
'name': u'new_type',
'qos_specs_id': None,
'deleted': False,
'created_at': None,
'updated_at': None,
'extra_specs': {
'replication_enabled': '<is> True',
'replication_type': '<in> sync',
},
'is_public': True,
'deleted_at': None,
'id': u'530a56e1-a1a4-49f3-ab6c-779a6e5d999f',
'description': None,
}
hypermetro_devices = """
{
"remote_device": {
"RestURL": "http://192.0.2.69:8082/deviceManager/rest",
"UserName":"admin",
"UserPassword":"Admin@storage2",
"StoragePool":"StoragePool001",
"domain_name":"hypermetro_test"}
}
"""
FAKE_FIND_POOL_RESPONSE = {'CAPACITY': '985661440',
'ID': '0',
'TOTALCAPACITY': '985661440'}
FAKE_CREATE_VOLUME_RESPONSE = {"ID": "1",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA",
"WWN": '6643e8c1004c5f6723e9f454003'}
FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
'wwpns': ['10000090fa0d6754'],
'wwnns': ['10000090fa0d6755'],
'host': 'ubuntuc',
}
smarttier_opts = {'smarttier': 'true',
'smartpartition': False,
'smartcache': False,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
'policy': '3',
'readcachepolicy': '1',
'writecachepolicy': None,
}
fake_fabric_mapping = {
'swd1': {
'target_port_wwn_list': ['2000643e8c4c5f66'],
'initiator_port_wwn_list': ['10000090fa0d6754']
}
}
CHANGE_OPTS = {'policy': ('1', '2'),
'partitionid': (['1', 'partition001'], ['2', 'partition002']),
'cacheid': (['1', 'cache001'], ['2', 'cache002']),
'qos': (['11', {'MAXIOPS': '100', 'IOType': '1'}],
{'MAXIOPS': '100', 'IOType': '2',
'MIN': 1, 'LATENCY': 1}),
'host': ('ubuntu@huawei#OpenStack_Pool',
'ubuntu@huawei#OpenStack_Pool'),
'LUNType': ('0', '1'),
}
# A fake response of create a host
FAKE_CREATE_HOST_RESPONSE = """
{
"error": {
"code": 0
},
"data":{"NAME": "ubuntuc001",
"ID": "1"}
}
"""
# A fake response of success response storage
FAKE_COMMON_SUCCESS_RESPONSE = """
{
"error": {
"code": 0
},
"data":{}
}
"""
# A fake response of login huawei storage
FAKE_GET_LOGIN_STORAGE_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"username": "admin",
"iBaseToken": "2001031430",
"deviceid": "210235G7J20000000000"
}
}
"""
# A fake response of login out huawei storage
FAKE_LOGIN_OUT_STORAGE_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11
}
}
"""
# A fake response of mock storage pool info
FAKE_STORAGE_POOL_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"USERFREECAPACITY": "985661440",
"ID": "0",
"NAME": "OpenStack_Pool",
"USERTOTALCAPACITY": "985661440"
}]
}
"""
# A fake response of lun or lungroup response
FAKE_LUN_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": "1",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA",
"WWN": "6643e8c1004c5f6723e9f454003",
"DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "27",
"ALLOCTYPE": "1",
"CAPACITY": "2097152"
}
}
"""
FAKE_LUN_GET_SUCCESS_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": "11",
"IOCLASSID": "11",
"NAME": "5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635",
"RUNNINGSTATUS": "10",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "27",
"LUNLIST": "",
"ALLOCTYPE": "1",
"CAPACITY": "2097152",
"WRITEPOLICY": "1",
"MIRRORPOLICY": "0",
"PREFETCHPOLICY": "1",
"PREFETCHVALUE": "20",
"DATATRANSFERPOLICY": "1",
"READCACHEPOLICY": "2",
"WRITECACHEPOLICY": "5",
"OWNINGCONTROLLER": "0B",
"SMARTCACHEPARTITIONID": "",
"CACHEPARTITIONID": "",
"WWN": "6643e8c1004c5f6723e9f454003",
"PARENTNAME": "OpenStack_Pool"
}
}
"""
FAKE_QUERY_ALL_LUN_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"ID": "1",
"NAME": "IexzQZJWSXuX2e9I7c8GNQ"
}]
}
"""
FAKE_LUN_ASSOCIATE_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":"11"
}]
}
"""
FAKE_QUERY_LUN_GROUP_INFO_RESPONSE = """
{
"error": {
"code":0
},
"data":[{
"NAME":"OpenStack_LunGroup_1",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}]
}
"""
FAKE_QUERY_LUN_GROUP_RESPONSE = """
{
"error": {
"code":0
},
"data":{
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}
}
"""
FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA",
"ID":"11",
"TYPE":256
}
}
"""
FAKE_LUN_COUNT_RESPONSE = """
{
"data":{
"COUNT":"0"
},
"error":{
"code":0,
"description":"0"
}
}
"""
# A fake response of snapshot list response
FAKE_SNAPSHOT_LIST_INFO_RESPONSE = """
{
"error": {
"code": 0,
"description": "0"
},
"data": [{
"ID": 11,
"NAME": "wr_LMKAjS7O_VtsEIREGYw"
},
{
"ID": 12,
"NAME": "SDFAJSDFLKJ"
},
{
"ID": 13,
"NAME": "s1Ew5v36To-hR2txJitX5Q"
}]
}
"""
# A fake response of create snapshot response
FAKE_CREATE_SNAPSHOT_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"ID": 11,
"NAME": "YheUoRwbSX2BxN7"
}
}
"""
# A fake response of get snapshot response
FAKE_GET_SNAPSHOT_INFO_RESPONSE = """
{
"error": {
"code": 0,
"description": "0"
},
"data": {
"ID": 11,
"NAME": "YheUoRwbSX2BxN7"
}
}
"""
# A fake response of get iscsi response
FAKE_GET_ISCSI_INFO_RESPONSE = """
{
"data": [{
"ETHPORTID": "139267",
"ID": "iqn.oceanstor:21004846fb8ca15f::22003:192.0.2.244",
"TPGT": "8196",
"TYPE": 249
},
{
"ETHPORTID": "139268",
"ID": "iqn.oceanstor:21004846fb8ca15f::22003:192.0.2.244",
"TPGT": "8196",
"TYPE": 249
}
],
"error": {
"code": 0,
"description": "0"
}
}
"""
# A fake response of get eth info response
FAKE_GET_ETH_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"PARENTTYPE": 209,
"MACADDRESS": "00:22:a1:0a:79:57",
"ETHNEGOTIATE": "-1",
"ERRORPACKETS": "0",
"IPV4ADDR": "192.0.2.2",
"IPV6GATEWAY": "",
"IPV6MASK": "0",
"OVERFLOWEDPACKETS": "0",
"ISCSINAME": "P0",
"HEALTHSTATUS": "1",
"ETHDUPLEX": "2",
"ID": "16909568",
"LOSTPACKETS": "0",
"TYPE": 213,
"NAME": "P0",
"INIORTGT": "4",
"RUNNINGSTATUS": "10",
"IPV4GATEWAY": "",
"BONDNAME": "",
"STARTTIME": "1371684218",
"SPEED": "1000",
"ISCSITCPPORT": "0",
"IPV4MASK": "255.255.0.0",
"IPV6ADDR": "",
"LOGICTYPE": "0",
"LOCATION": "ENG0.A5.P0",
"MTU": "1500",
"PARENTID": "1.5"
},
{
"PARENTTYPE": 209,
"MACADDRESS": "00:22:a1:0a:79:57",
"ETHNEGOTIATE": "-1",
"ERRORPACKETS": "0",
"IPV4ADDR": "192.0.2.1",
"IPV6GATEWAY": "",
"IPV6MASK": "0",
"OVERFLOWEDPACKETS": "0",
"ISCSINAME": "P0",
"HEALTHSTATUS": "1",
"ETHDUPLEX": "2",
"ID": "16909568",
"LOSTPACKETS": "0",
"TYPE": 213,
"NAME": "P0",
"INIORTGT": "4",
"RUNNINGSTATUS": "10",
"IPV4GATEWAY": "",
"BONDNAME": "",
"STARTTIME": "1371684218",
"SPEED": "1000",
"ISCSITCPPORT": "0",
"IPV4MASK": "255.255.0.0",
"IPV6ADDR": "",
"LOGICTYPE": "0",
"LOCATION": "ENG0.A5.P3",
"MTU": "1500",
"PARENTID": "1.5"
}]
}
"""
FAKE_GET_ETH_ASSOCIATE_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"IPV4ADDR": "192.0.2.1",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "10"
},
{
"IPV4ADDR": "192.0.2.2",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "10"
}
]
}
"""
# A fake response of get iscsi device info response
FAKE_GET_ISCSI_DEVICE_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"CMO_ISCSI_DEVICE_NAME": "iqn.2006-08.com.huawei:oceanstor:21000022a:"
}]
}
"""
# A fake response of get iscsi device info response
FAKE_GET_ALL_HOST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"PARENTTYPE": 245,
"NAME": "ubuntuc",
"DESCRIPTION": "",
"RUNNINGSTATUS": "1",
"IP": "",
"PARENTNAME": "",
"OPERATIONSYSTEM": "0",
"LOCATION": "",
"HEALTHSTATUS": "1",
"MODEL": "",
"ID": "1",
"PARENTID": "",
"NETWORKNAME": "",
"TYPE": 21
},
{
"PARENTTYPE": 245,
"NAME": "ubuntu",
"DESCRIPTION": "",
"RUNNINGSTATUS": "1",
"IP": "",
"PARENTNAME": "",
"OPERATIONSYSTEM": "0",
"LOCATION": "",
"HEALTHSTATUS": "1",
"MODEL": "",
"ID": "2",
"PARENTID": "",
"NETWORKNAME": "",
"TYPE": 21
}]
}
"""
# A fake response of get host or hostgroup info response
FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
},
{"NAME":"OpenStack_HostGroup_1",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
}
]
}
"""
FAKE_GET_HOST_GROUP_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data":{
"NAME":"ubuntuc",
"DESCRIPTION":"",
"ID":"0",
"TYPE":14
}
}
"""
# A fake response of lun copy info response
FAKE_GET_LUN_COPY_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": {
"COPYSTOPTIME": "-1",
"HEALTHSTATUS": "1",
"NAME": "w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS": "36",
"DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==",
"ID": "0",
"LUNCOPYTYPE": "1",
"COPYPROGRESS": "0",
"COPYSPEED": "2",
"TYPE": 219,
"COPYSTARTTIME": "-1"
}
}
"""
# A fake response of lun copy list info response
FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE = """
{
"error": {
"code": 0
},
"data": [{
"COPYSTOPTIME": "1372209335",
"HEALTHSTATUS": "1",
"NAME": "w1PSNvu6RumcZMmSh4/l+Q==",
"RUNNINGSTATUS": "40",
"DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==",
"ID": "0",
"LUNCOPYTYPE": "1",
"COPYPROGRESS": "100",
"COPYSPEED": "2",
"TYPE": 219,
"COPYSTARTTIME": "1372209329"
}]
}
"""
# A fake response of mappingview info response
FAKE_GET_MAPPING_VIEW_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"OpenStack_Mapping_View_1",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1",
"INBANDLUNWWN":"",
"TYPE":245
},
{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"YheUoRwbSX2BxN767nvLSw",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"2",
"INBANDLUNWWN": "",
"TYPE": 245
}]
}
"""
FAKE_GET_MAPPING_VIEW_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"11",
"INBANDLUNWWN":"",
"TYPE": 245,
"AVAILABLEHOSTLUNIDLIST": ""
}]
}
"""
FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"WORKMODE":"255",
"HEALTHSTATUS":"1",
"NAME":"mOWtSXnaQKi3hpB3tdFRIQ",
"RUNNINGSTATUS":"27",
"DESCRIPTION":"",
"ENABLEINBANDCOMMAND":"true",
"ID":"1",
"INBANDLUNWWN":"",
"TYPE":245,
"AVAILABLEHOSTLUNIDLIST": "[1]"
}
}
"""
FAKE_FC_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6754",
"OPERATIONSYSTEM":"255",
"TYPE":223
},
{
"HEALTHSTATUS":"1",
"NAME":"",
"MULTIPATHTYPE":"1",
"ISFREE":"true",
"RUNNINGSTATUS":"27",
"ID":"10000090fa0d6755",
"OPERATIONSYSTEM":"255",
"TYPE":223
}]
}
"""
FAKE_ISCSI_INITIATOR_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"CHAPNAME":"mm-user",
"HEALTHSTATUS":"1",
"ID":"iqn.1993-08.org.debian:01:9073aba6c6f",
"ISFREE":"true",
"MULTIPATHTYPE":"1",
"NAME":"",
"OPERATIONSYSTEM":"255",
"RUNNINGSTATUS":"28",
"TYPE":222,
"USECHAP":"true"
}]
}
"""
FAKE_HOST_LINK_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"PARENTTYPE":21,
"TARGET_ID":"0000000000000000",
"INITIATOR_NODE_WWN":"20000090fa0d6754",
"INITIATOR_TYPE":"223",
"RUNNINGSTATUS":"27",
"PARENTNAME":"ubuntuc",
"INITIATOR_ID":"10000090fa0d6754",
"TARGET_PORT_WWN":"24000022a10a2a39",
"HEALTHSTATUS":"1",
"INITIATOR_PORT_WWN":"10000090fa0d6754",
"ID":"010000090fa0d675-0000000000110400",
"TARGET_NODE_WWN":"21000022a10a2a39",
"PARENTID":"1",
"CTRL_ID":"0",
"TYPE":255,
"TARGET_TYPE":"212"
}]
}
"""
FAKE_PORT_GROUP_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":11,
"NAME": "portgroup-test"
}]
}
"""
FAKE_ERROR_INFO_RESPONSE = """
{
"error":{
"code":31755596
}
}
"""
FAKE_ERROR_CONNECT_RESPONSE = """
{
"error":{
"code":-403
}
}
"""
FAKE_ERROR_LUN_INFO_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"ID":"11",
"IOCLASSID":"11",
"NAME":"5mFHcBv4RkCcD+JyrWc0SA",
"ALLOCTYPE": "0",
"DATATRANSFERPOLICY": "0",
"SMARTCACHEPARTITIONID": "0",
"CACHEPARTITIONID": "0"
}
}
"""
FAKE_GET_FC_INI_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"ID":"10000090fa0d6754",
"ISFREE":"true"
}]
}
"""
FAKE_SYSTEM_VERSION_RESPONSE = """
{
"error":{
"code": 0
},
"data":{
"PRODUCTVERSION": "V100R001C10",
"wwn": "21003400a30d844d"
}
}
"""
FAKE_GET_LUN_MIGRATION_RESPONSE = """
{
"data":[{"ENDTIME":"1436816174",
"ID":"9",
"PARENTID":"11",
"PARENTNAME":"xmRBHMlVRruql5vwthpPXQ",
"PROCESS":"-1",
"RUNNINGSTATUS":"76",
"SPEED":"2",
"STARTTIME":"1436816111",
"TARGETLUNID":"1",
"TARGETLUNNAME":"4924891454902893639",
"TYPE":253,
"WORKMODE":"0"
}],
"error":{"code":0,
"description":"0"}
}
"""
FAKE_HYPERMETRODOMAIN_RESPONSE = """
{
"error":{
"code": 0
},
"data":{
"PRODUCTVERSION": "V100R001C10",
"ID": "11",
"NAME": "hypermetro_test",
"RUNNINGSTATUS": "42"
}
}
"""
FAKE_HYPERMETRO_RESPONSE = """
{
"error":{
"code": 0
},
"data":{
"PRODUCTVERSION": "V100R001C10",
"ID": "11",
"NAME": "hypermetro_test",
"RUNNINGSTATUS": "1",
"HEALTHSTATUS": "1"
}
}
"""
FAKE_QOS_INFO_RESPONSE = """
{
"error":{
"code": 0
},
"data":{
"ID": "11"
}
}
"""
FAKE_GET_FC_PORT_RESPONSE = """
{
"error":{
"code":0
},
"data":[{
"RUNNINGSTATUS":"10",
"WWN":"2000643e8c4c5f66",
"PARENTID":"0A.1",
"ID": "1114368",
"RUNSPEED": "16000"
},
{
"RUNNINGSTATUS":"10",
"WWN":"2009643e8c4c5f67",
"PARENTID":"0A.1",
"ID": "1114369",
"RUNSPEED": "16000"
}]
}
"""
FAKE_SMARTCACHEPARTITION_RESPONSE = """
{
"error":{
"code":0
},
"data":{
"ID":"11",
"NAME":"cache-name"
}
}
"""
FAKE_CONNECT_FC_RESPONCE = {
"driver_volume_type": 'fibre_channel',
"data": {
"target_wwn": ["10000090fa0d6754"],
"target_lun": "1",
"volume_id": "21ec7341-9256-497b-97d9-ef48edcf0635"
}
}
FAKE_METRO_INFO_RESPONCE = {
"error": {
"code": 0
},
"data": {
"PRODUCTVERSION": "V100R001C10",
"ID": "11",
"NAME": "hypermetro_test",
"RUNNINGSTATUS": "42"
}
}
# mock login info map
MAP_COMMAND_TO_FAKE_RESPONSE = {}
MAP_COMMAND_TO_FAKE_RESPONSE['/xx/sessions'] = (
FAKE_GET_LOGIN_STORAGE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/sessions'] = (
FAKE_LOGIN_OUT_STORAGE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/POST'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION?range=[0-256]/GET'] = (
FAKE_GET_LUN_MIGRATION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock storage info map
MAP_COMMAND_TO_FAKE_RESPONSE['/storagepool'] = (
FAKE_STORAGE_POOL_RESPONSE)
# mock lun info map
MAP_COMMAND_TO_FAKE_RESPONSE['/lun'] = (
FAKE_LUN_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/GET'] = (
FAKE_LUN_GET_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/GET'] = (
FAKE_LUN_GET_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun?range=[0-65535]/GET'] = (
FAKE_QUERY_ALL_LUN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=12/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?ID=1&TYPE=11&ASSOCIATEOBJTYPE=21'
'&ASSOCIATEOBJID=0/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21'
'&ASSOCIATEOBJID=1/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition?ID=1'
'&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=11'
'/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup?range=[0-8191]/GET'] = (
FAKE_QUERY_LUN_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup'] = (
FAKE_QUERY_LUN_GROUP_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate'] = (
FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUNGroup/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=1/GET'] = (
FAKE_LUN_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOBJTYPE=256'
'&ASSOCIATEOBJID=11/GET'] = (
FAKE_LUN_COUNT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/expand/PUT'] = (
FAKE_LUN_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=12&ASSOCIATEOBJTYPE=11'
'&ASSOCIATEOBJID=12/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock snapshot info map
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot'] = (
FAKE_CREATE_SNAPSHOT_INFO_RESPONSE)
# mock snapshot info map
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/GET'] = (
FAKE_GET_SNAPSHOT_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/activate'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/stop/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot?range=[0-32767]/GET'] = (
FAKE_SNAPSHOT_LIST_INFO_RESPONSE)
# mock QoS info map
MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/GET'] = (
FAKE_LUN_GET_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/active/11/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/'] = (
FAKE_QOS_INFO_RESPONSE)
# mock iscsi info map
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_tgt_port/GET'] = (
FAKE_GET_ISCSI_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/GET'] = (
FAKE_GET_ETH_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE'
'=257&ASSOCIATEOBJID=11/GET'] = (
FAKE_GET_ETH_ASSOCIATE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsidevicename'] = (
FAKE_GET_ISCSI_DEVICE_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator?range=[0-256]/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/POST'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/PUT'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/remove_iscsi_from_host/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/'
'iqn.1993-08.debian:01:ec2bff7ac3a3/PUT'] = (
FAKE_ISCSI_INITIATOR_RESPONSE)
# mock host info map
MAP_COMMAND_TO_FAKE_RESPONSE['/host?range=[0-65535]/GET'] = (
FAKE_GET_ALL_HOST_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host'] = (
FAKE_CREATE_HOST_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup?range=[0-8191]/GET'] = (
FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup'] = (
FAKE_GET_HOST_GROUP_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0'
'&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=1'
'/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0'
'&ASSOCIATEOBJID=0/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&'
'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/0/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&'
'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/associate'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock copy info map
MAP_COMMAND_TO_FAKE_RESPONSE['/luncopy'] = (
FAKE_GET_LUN_COPY_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY?range=[0-1023]/GET'] = (
FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/start/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/0/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock mapping view info map
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview?range=[0-8191]/GET'] = (
FAKE_GET_MAPPING_VIEW_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/PUT'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/1/GET'] = (
FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/1/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/lungroup?TYPE=256&'
'ASSOCIATEOBJTYPE=245&ASSOCIATEOBJID=1/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&'
'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&'
'ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=11/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&'
'ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=11/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate?ASSOCIATEOBJTYPE=245&'
'ASSOCIATEOBJID=1&range=[0-8191]/GET'] = (
FAKE_GET_MAPPING_VIEW_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock FC info map
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&'
'range=[0-8191]/GET'] = (
FAKE_FC_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
# mock FC info map
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&'
'range=[0-8191]/GET'] = (
FAKE_FC_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/GET'] = (
FAKE_FC_INFO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/host_link?INITIATOR_TYPE=223'
'&INITIATOR_PORT_WWN=10000090fa0d6754/GET'] = (
FAKE_HOST_LINK_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup?range=[0-8191]&TYPE=257/GET'] = (
FAKE_PORT_GROUP_RESPONSE)
# mock system info map
MAP_COMMAND_TO_FAKE_RESPONSE['/system//GET'] = (
FAKE_SYSTEM_VERSION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-256]/GET'] = (
FAKE_GET_FC_INI_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['fc_initiator?range=[0-256]/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition/POST'] = (
FAKE_SYSTEM_VERSION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-256]&PARENTID=1/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = (
FAKE_GET_FC_PORT_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/0/GET'] = (
FAKE_SMARTCACHEPARTITION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/REMOVE_ASSOCIATE/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/cachepartition/0/GET'] = (
FAKE_SMARTCACHEPARTITION_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroDomain?range=[0-32]/GET'] = (
FAKE_HYPERMETRODOMAIN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/POST'] = (
FAKE_HYPERMETRODOMAIN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/GET'] = (
FAKE_HYPERMETRODOMAIN_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/disable_hcpair/PUT'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/DELETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/1/GET'] = (
FAKE_HYPERMETRO_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair?range=[0-65535]/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
MAP_COMMAND_TO_FAKE_RESPONSE['/splitmirror?range=[0-512]/GET'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
FAKE_GET_PORTG_BY_VIEW = """
{
"data": [{
"DESCRIPTION": "Please do NOT modify this. Engine ID: 0",
"ID": "0",
"NAME": "OpenStack_PortGroup_1",
"TYPE": 257
}],
"error": {
"code": 0
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/mappingview?TYPE=257&AS'
'SOCIATEOBJTYPE=245&ASSOCIATEOBJID=1/GET'] = (
FAKE_GET_PORTG_BY_VIEW)
FAKE_GET_PORT_BY_PORTG = """
{
"data":[{
"CONFSPEED":"0","FCCONFMODE":"3",
"FCRUNMODE":"0","HEALTHSTATUS":"1","ID":"2000643e8c4c5f66",
"MAXSUPPORTSPEED":"16000","NAME":"P0","PARENTID":"0B.1",
"PARENTTYPE":209,"RUNNINGSTATUS":"10","RUNSPEED":"8000",
"WWN":"2000643e8c4c5f66"
}],
"error":{
"code":0,"description":"0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate/portgroup?TYPE=212&ASSOCI'
'ATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = (
FAKE_GET_PORT_BY_PORTG)
FAKE_GET_PORTG = """
{
"data": {
"TYPE": 257,
"NAME": "OpenStack_PortGroup_1",
"DESCRIPTION": "Please DO NOT change thefollowing message: 0",
"ID": "0"
},
"error": {
"code": 0,
"description": "0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/0/GET'] = FAKE_GET_PORTG
MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/0/PUT'] = FAKE_GET_PORTG
MAP_COMMAND_TO_FAKE_RESPONSE['/port/associate/portgroup/POST'] = (
FAKE_GET_PORT_BY_PORTG)
MAP_COMMAND_TO_FAKE_RESPONSE['/port/associate/portgroup?ID=0&TYPE=257&ASSOCIA'
'TEOBJTYPE=212&ASSOCIATEOBJID=2000643e8c4c5f66/DE'
'LETE'] = (
FAKE_COMMON_SUCCESS_RESPONSE)
FAKE_CREATE_PORTG = """
{
"data": {
"DESCRIPTION": "Please DO NOT change the following message: 0",
"ID": "0",
"NAME": "OpenStack_PortGroup_1",
"TYPE": 257
},
"error": {
"code": 0,
"description": "0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/PortGroup/POST'] = FAKE_CREATE_PORTG
FAKE_GET_PORTG_FROM_PORT = """
{
"data": [{
"TYPE": 257,
"NAME": "OpenStack_PortGroup_1",
"DESCRIPTION": "PleaseDONOTchangethefollowingmessage: 0",
"ID": "0"
}],
"error": {
"code": 0,
"description": "0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/fc_port?TYPE=257&ASSOCIA'
'TEOBJTYPE=212&ASSOCIATEOBJID=1114368/GET'] = (
FAKE_GET_PORTG_FROM_PORT)
FAKE_GET_VIEW_BY_PORTG = """
{
"data": [{
"ASSOCIATEOBJID": "0",
"COUNT": "0",
"ASSOCIATEOBJTYPE": "0",
"INBANDLUNWWN": "",
"FORFILESYSTEM": "false",
"ID": "2",
"ENABLEINBANDCOMMAND": "false",
"NAME": "OpenStack_Mapping_View_1",
"WORKMODE": "0",
"TYPE": 245,
"HOSTLUNID": "0",
"DESCRIPTION": ""
}],
"error": {
"code": 0,
"description": "0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/portgroup?TYPE=245&ASS'
'OCIATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = (
FAKE_GET_VIEW_BY_PORTG)
FAKE_GET_LUNG_BY_VIEW = """
{
"data": [{
"TYPE": 256,
"NAME": "OpenStack_LunGroup_1",
"DESCRIPTION": "OpenStack_LunGroup_1",
"ID": "1"
}],
"error": {
"code": 0,
"description": "0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate/mappingview?TYPE=256&ASSO'
'CIATEOBJTYPE=245&ASSOCIATEOBJID=2/GET'] = (
FAKE_GET_LUNG_BY_VIEW)
FAKE_LUN_COUNT_RESPONSE_1 = """
{
"data":{
"COUNT":"2"
},
"error":{
"code":0,
"description":"0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOB'
'JTYPE=256&ASSOCIATEOBJID=1/GET'] = (
FAKE_LUN_COUNT_RESPONSE_1)
FAKE_PORTS_IN_PG_RESPONSE = """
{
"data": [{
"ID": "1114114",
"WWN": "2002643e8c4c5f66"
},
{
"ID": "1114113",
"WWN": "2001643e8c4c5f66"
}],
"error": {
"code": 0,
"description": "0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE='
'257&ASSOCIATEOBJID=0/GET'] = (
FAKE_PORTS_IN_PG_RESPONSE)
# Replication response
FAKE_GET_REMOTEDEV_RESPONSE = """
{
"data":[{
"ARRAYTYPE":"1",
"HEALTHSTATUS":"1",
"ID":"0",
"NAME":"Huawei.Storage",
"RUNNINGSTATUS":"1",
"WWN":"21003400a30d844d"
}],
"error":{
"code":0,
"description":"0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/remote_device/GET'] = (
FAKE_GET_REMOTEDEV_RESPONSE)
FAKE_CREATE_PAIR_RESPONSE = """
{
"data":{
"ID":"%s"
},
"error":{
"code":0,
"description":"0"
}
}
""" % TEST_PAIR_ID
MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/POST'] = (
FAKE_CREATE_PAIR_RESPONSE)
FAKE_DELETE_PAIR_RESPONSE = """
{
"data":{},
"error":{
"code":0,
"description":"0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/DELETE' % TEST_PAIR_ID] = (
FAKE_DELETE_PAIR_RESPONSE)
FAKE_SET_PAIR_ACCESS_RESPONSE = """
{
"data":{},
"error":{
"code":0,
"description":"0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/PUT' % TEST_PAIR_ID] = (
FAKE_SET_PAIR_ACCESS_RESPONSE)
FAKE_GET_PAIR_NORMAL_RESPONSE = """
{
"data":{
"REPLICATIONMODEL": "1",
"RUNNINGSTATUS": "1",
"SECRESACCESS": "2",
"HEALTHSTATUS": "1",
"ISPRIMARY": "true"
},
"error":{
"code":0,
"description":"0"
}
}
"""
FAKE_GET_PAIR_SPLIT_RESPONSE = """
{
"data":{
"REPLICATIONMODEL": "1",
"RUNNINGSTATUS": "26",
"SECRESACCESS": "2",
"ISPRIMARY": "true"
},
"error":{
"code":0,
"description":"0"
}
}
"""
FAKE_GET_PAIR_SYNC_RESPONSE = """
{
"data":{
"REPLICATIONMODEL": "1",
"RUNNINGSTATUS": "23",
"SECRESACCESS": "2"
},
"error":{
"code":0,
"description":"0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/GET' % TEST_PAIR_ID] = (
FAKE_GET_PAIR_NORMAL_RESPONSE)
FAKE_SYNC_PAIR_RESPONSE = """
{
"data":{},
"error":{
"code":0,
"description":"0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/sync/PUT'] = (
FAKE_SYNC_PAIR_RESPONSE)
FAKE_SPLIT_PAIR_RESPONSE = """
{
"data":{},
"error":{
"code":0,
"description":"0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/split/PUT'] = (
FAKE_SPLIT_PAIR_RESPONSE)
FAKE_SWITCH_PAIR_RESPONSE = """
{
"data":{},
"error":{
"code":0,
"description":"0"
}
}
"""
MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/switch/PUT'] = (
FAKE_SWITCH_PAIR_RESPONSE)
def Fake_sleep(time):
pass
REPLICA_BACKEND_ID = 'huawei-replica-1'
class FakeHuaweiConf(huawei_conf.HuaweiConf):
def __init__(self, conf, protocol):
self.conf = conf
self.protocol = protocol
def safe_get(self, key):
try:
return getattr(self.conf, key)
except Exception:
return
def update_config_value(self):
setattr(self.conf, 'volume_backend_name', 'huawei_storage')
setattr(self.conf, 'san_address',
['http://192.0.2.69:8082/deviceManager/rest/'])
setattr(self.conf, 'san_user', 'admin')
setattr(self.conf, 'san_password', 'Admin@storage')
setattr(self.conf, 'san_product', 'V3')
setattr(self.conf, 'san_protocol', self.protocol)
setattr(self.conf, 'lun_type', constants.THICK_LUNTYPE)
setattr(self.conf, 'lun_ready_wait_interval', 2)
setattr(self.conf, 'lun_copy_wait_interval', 2)
setattr(self.conf, 'lun_timeout', 43200)
setattr(self.conf, 'lun_write_type', '1')
setattr(self.conf, 'lun_mirror_switch', '1')
setattr(self.conf, 'lun_prefetch_type', '1')
setattr(self.conf, 'lun_prefetch_value', '0')
setattr(self.conf, 'lun_policy', '0')
setattr(self.conf, 'lun_read_cache_policy', '2')
setattr(self.conf, 'lun_write_cache_policy', '5')
setattr(self.conf, 'storage_pools', ['OpenStack_Pool'])
setattr(self.conf, 'iscsi_default_target_ip', ['192.0.2.68'])
setattr(self.conf, 'metro_san_address',
['https://192.0.2.240:8088/deviceManager/rest/'])
setattr(self.conf, 'metro_storage_pools', 'StoragePool001')
setattr(self.conf, 'metro_san_user', 'admin')
setattr(self.conf, 'metro_san_password', 'Admin@storage1')
setattr(self.conf, 'metro_domain_name', 'hypermetro_test')
iscsi_info = {'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3',
'TargetIP': '192.0.2.2',
'CHAPinfo': 'mm-user;mm-user@storage',
'ALUA': '1',
'TargetPortGroup': 'portgroup-test', }
setattr(self.conf, 'iscsi_info', [iscsi_info])
targets = [{'backend_id': REPLICA_BACKEND_ID,
'storage_pool': 'OpenStack_Pool',
'san_address':
'https://192.0.2.69:8088/deviceManager/rest/',
'san_user': 'admin',
'san_password': 'Admin@storage1'}]
setattr(self.conf, 'replication_device', targets)
setattr(self.conf, 'safe_get', self.safe_get)
class FakeClient(rest_client.RestClient):
def __init__(self, configuration):
san_address = configuration.san_address
san_user = configuration.san_user
san_password = configuration.san_password
rest_client.RestClient.__init__(self, configuration,
san_address,
san_user,
san_password)
self.test_fail = False
self.test_multi_url_flag = False
self.cache_not_exist = False
self.partition_not_exist = False
def _get_snapshotid_by_name(self, snapshot_name):
return "11"
def _check_snapshot_exist(self, snapshot_id):
return True
def get_partition_id_by_name(self, name):
if self.partition_not_exist:
return None
return "11"
def get_cache_id_by_name(self, name):
if self.cache_not_exist:
return None
return "11"
def add_lun_to_cache(self, lunid, cache_id):
pass
def do_call(self, url=False, data=None, method=None, calltimeout=4):
url = url.replace('http://192.0.2.69:8082/deviceManager/rest', '')
command = url.replace('/210235G7J20000000000/', '')
data = json.dumps(data) if data else None
if method:
command = command + "/" + method
for item in MAP_COMMAND_TO_FAKE_RESPONSE.keys():
if command == item:
data = MAP_COMMAND_TO_FAKE_RESPONSE[item]
if self.test_fail:
data = FAKE_ERROR_INFO_RESPONSE
if command == 'lun/11/GET':
data = FAKE_ERROR_LUN_INFO_RESPONSE
self.test_fail = False
if self.test_multi_url_flag:
data = FAKE_ERROR_CONNECT_RESPONSE
self.test_multi_url_flag = False
return json.loads(data)
class FakeReplicaPairManager(replication.ReplicaPairManager):
def _init_rmt_client(self):
self.rmt_client = FakeClient(self.conf)
class FakeISCSIStorage(huawei_driver.HuaweiISCSIDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
self.configuration = configuration
self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI')
self.active_backend_id = None
self.replica = None
def do_setup(self):
self.metro_flag = True
self.huawei_conf.update_config_value()
self.get_local_and_remote_dev_conf()
self.client = FakeClient(configuration=self.configuration)
self.rmt_client = FakeClient(configuration=self.configuration)
self.replica_client = FakeClient(configuration=self.configuration)
self.metro = hypermetro.HuaweiHyperMetro(self.client,
self.rmt_client,
self.configuration)
self.replica = FakeReplicaPairManager(self.client,
self.replica_client,
self.configuration)
class FakeFCStorage(huawei_driver.HuaweiFCDriver):
"""Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver."""
def __init__(self, configuration):
self.configuration = configuration
self.fcsan = None
self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI')
self.active_backend_id = None
self.replica = None
def do_setup(self):
self.metro_flag = True
self.huawei_conf.update_config_value()
self.get_local_and_remote_dev_conf()
self.client = FakeClient(configuration=self.configuration)
self.rmt_client = FakeClient(configuration=self.configuration)
self.replica_client = FakeClient(configuration=self.configuration)
self.metro = hypermetro.HuaweiHyperMetro(self.client,
self.rmt_client,
self.configuration)
self.replica = FakeReplicaPairManager(self.client,
self.replica_client,
self.configuration)
@ddt.ddt
class HuaweiISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(HuaweiISCSIDriverTestCase, self).setUp()
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.hypermetro_devices = hypermetro_devices
self.stubs.Set(time, 'sleep', Fake_sleep)
self.driver = FakeISCSIStorage(configuration=self.configuration)
self.driver.do_setup()
self.portgroup = 'portgroup-test'
self.iscsi_iqns = ['iqn.2006-08.com.huawei:oceanstor:21000022a:'
':20503:192.0.2.1',
'iqn.2006-08.com.huawei:oceanstor:21000022a:'
':20500:192.0.2.2']
self.target_ips = ['192.0.2.1',
'192.0.2.2']
self.portgroup_id = 11
self.driver.client.login()
def test_login_success(self):
device_id = self.driver.client.login()
self.assertEqual('210235G7J20000000000', device_id)
def test_check_volume_exist_on_array(self):
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu001@backend001#OpenStack_Pool',
'provider_location': None,
}
self.mock_object(rest_client.RestClient, 'get_lun_id_by_name',
mock.Mock(return_value=None))
self.driver._check_volume_exist_on_array(
test_volume, constants.VOLUME_NOT_EXISTS_WARN)
def test_create_volume_success(self):
# Have pool info in the volume.
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu001@backend001#OpenStack_Pool',
'provider_location': '11',
'admin_metadata': {},
}
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
# No pool info in the volume.
test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'host': 'ubuntu001@backend001',
'provider_location': '11',
'admin_metadata': {},
}
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_delete_volume_success(self):
self.driver.delete_volume(test_volume)
def test_create_snapshot_success(self):
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
test_snap['volume']['provider_location'] = ''
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
test_snap['volume']['provider_location'] = None
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
def test_delete_snapshot_success(self):
self.driver.delete_snapshot(test_snap)
def test_create_volume_from_snapsuccess(self):
self.mock_object(
huawei_driver.HuaweiBaseDriver,
'_get_volume_type',
mock.Mock(return_value={'extra_specs': sync_replica_specs}))
self.mock_object(replication.ReplicaCommonDriver, 'sync')
model_update = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual('1', model_update['provider_location'])
driver_data = {'pair_id': TEST_PAIR_ID,
'rmt_lun_id': '1'}
driver_data = replication.to_string(driver_data)
self.assertEqual(driver_data, model_update['replication_driver_data'])
self.assertEqual('available', model_update['replication_status'])
def test_initialize_connection_success(self):
iscsi_properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(1, iscsi_properties['data']['target_lun'])
def test_terminate_connection_success(self):
self.driver.terminate_connection(test_volume, FakeConnector)
def test_get_volume_status(self):
data = self.driver.get_volume_stats()
self.assertEqual('2.0.5', data['driver_version'])
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={"CAPACITY": 6291456})
@mock.patch.object(rest_client.RestClient, 'extend_lun')
def test_extend_volume_size_equal(self, mock_extend, mock_lun_info):
self.driver.extend_volume(test_volume, 3)
self.assertEqual(0, mock_extend.call_count)
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={"CAPACITY": 5291456})
@mock.patch.object(rest_client.RestClient, 'extend_lun')
def test_extend_volume_success(self, mock_extend, mock_lun_info):
self.driver.extend_volume(test_volume, 3)
self.assertEqual(1, mock_extend.call_count)
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={"CAPACITY": 7291456})
def test_extend_volume_fail(self, mock_lun_info):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume, test_volume, 3)
def test_extend_nonexistent_volume(self):
test_volume = {
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635'
}
self.mock_object(rest_client.RestClient,
'get_lun_id_by_name',
mock.Mock(return_value=None))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
test_volume, 3)
@ddt.data({'admin_metadata': {'huawei_lun_wwn': '1'},
'id': '21ec7341-9256-497b-97d9-ef48edcf0635'},
{'volume_admin_metadata': [{'key': 'huawei_lun_wwn',
'value': '1'}],
'id': '21ec7341-9256-497b-97d9-ef48edcf0635'})
def test_get_admin_metadata(self, volume_data):
expected_value = {'huawei_lun_wwn': '1'}
admin_metadata = huawei_utils.get_admin_metadata(volume_data)
self.assertEqual(expected_value, admin_metadata)
def test_login_fail(self):
self.driver.client.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.client.login)
def test_create_snapshot_fail(self):
self.driver.client.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, test_snap)
def test_create_volume_fail(self):
self.driver.client.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, test_volume)
def test_delete_volume_fail(self):
self.driver.client.test_fail = True
self.driver.delete_volume(test_volume)
def test_delete_snapshot_fail(self):
self.driver.client.test_fail = True
self.driver.delete_snapshot(test_snap)
def test_delete_snapshot_with_snapshot_nonexistent(self):
fake_snap = {
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 1,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'provider_location': None, }
self.driver.delete_snapshot(fake_snap)
def test_initialize_connection_fail(self):
self.driver.client.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def test_lun_is_associated_to_lungroup(self):
self.driver.client.associate_lun_to_lungroup('11', '11')
result = self.driver.client._is_lun_associated_to_lungroup('11',
'11')
self.assertTrue(result)
def test_lun_is_not_associated_to_lun_group(self):
self.driver.client.associate_lun_to_lungroup('12', '12')
self.driver.client.remove_lun_from_lungroup('12', '12')
result = self.driver.client._is_lun_associated_to_lungroup('12', '12')
self.assertFalse(result)
def test_get_tgtip(self):
portg_id = self.driver.client.get_tgt_port_group(self.portgroup)
target_ip = self.driver.client._get_tgt_ip_from_portgroup(portg_id)
self.assertEqual(self.target_ips, target_ip)
def test_find_chap_info(self):
tmp_dict = {}
tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3'
tmp_dict['CHAPinfo'] = 'mm-user;mm-user@storage'
iscsi_info = [tmp_dict]
initiator_name = FakeConnector['initiator']
chapinfo = self.driver.client.find_chap_info(iscsi_info,
initiator_name)
chap_username, chap_password = chapinfo.split(';')
self.assertEqual('mm-user', chap_username)
self.assertEqual('mm-user@storage', chap_password)
def test_find_alua_info(self):
tmp_dict = {}
tmp_dict['Name'] = 'iqn.1993-08.debian:01:ec2bff7ac3a3'
tmp_dict['ALUA'] = '1'
iscsi_info = [tmp_dict]
initiator_name = FakeConnector['initiator']
type = self.driver.client._find_alua_info(iscsi_info,
initiator_name)
self.assertEqual('1', type)
def test_get_pool_info(self):
pools = [{"NAME": "test001",
"ID": "0",
"USERFREECAPACITY": "36",
"USERTOTALCAPACITY": "48",
"USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE},
{"NAME": "test002",
"ID": "1",
"USERFREECAPACITY": "37",
"USERTOTALCAPACITY": "49",
"USAGETYPE": constants.FILE_SYSTEM_POOL_TYPE},
{"NAME": "test003",
"ID": "0",
"USERFREECAPACITY": "36",
"DATASPACE": "35",
"USERTOTALCAPACITY": "48",
"USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE}]
pool_name = 'test001'
test_info = {'CAPACITY': '36', 'ID': '0', 'TOTALCAPACITY': '48'}
pool_info = self.driver.client.get_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test002'
test_info = {}
pool_info = self.driver.client.get_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test000'
test_info = {}
pool_info = self.driver.client.get_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
pool_name = 'test003'
test_info = {'CAPACITY': '35', 'ID': '0', 'TOTALCAPACITY': '48'}
pool_info = self.driver.client.get_pool_info(pool_name, pools)
self.assertEqual(test_info, pool_info)
def test_get_smartx_specs_opts(self):
smartx_opts = smartx.SmartX().get_smartx_specs_opts(smarttier_opts)
self.assertEqual('3', smartx_opts['policy'])
@mock.patch.object(smartx.SmartQos, 'get_qos_by_volume_type',
return_value={'MAXIOPS': '100',
'IOType': '2'})
def test_create_smartqos(self, mock_qos_value):
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
@mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
return_value={'smarttier': 'true',
'smartcache': 'true',
'smartpartition': 'true',
'thin_provisioning_support': 'true',
'thick_provisioning_support': 'false',
'policy': '2',
'cachename': 'cache-test',
'partitionname': 'partition-test'})
def test_create_smartx(self, mock_volume_types, mock_add_lun_to_partition):
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_find_available_qos(self):
qos = {'MAXIOPS': '100', 'IOType': '2'}
fake_qos_info_response_equal = {
"error": {
"code": 0
},
"data": [{
"ID": "11",
"MAXIOPS": "100",
"LATENCY": "0",
"IOType": "2",
"FSLIST": u'[""]',
'RUNNINGSTATUS': "2",
"NAME": "OpenStack_57_20151225102851",
"LUNLIST": u'["1", "2", "3", "4", "5", "6", "7", "8", "9",\
"10", ,"11", "12", "13", "14", "15", "16", "17", "18", "19",\
"20", ,"21", "22", "23", "24", "25", "26", "27", "28", "29",\
"30", ,"31", "32", "33", "34", "35", "36", "37", "38", "39",\
"40", ,"41", "42", "43", "44", "45", "46", "47", "48", "49",\
"50", ,"51", "52", "53", "54", "55", "56", "57", "58", "59",\
"60", ,"61", "62", "63", "64"]'
}]
}
# Number of LUNs in QoS is equal to 64
with mock.patch.object(rest_client.RestClient, 'get_qos',
return_value=fake_qos_info_response_equal):
(qos_id, lun_list) = self.driver.client.find_available_qos(qos)
self.assertEqual((None, []), (qos_id, lun_list))
# Number of LUNs in QoS is less than 64
fake_qos_info_response_less = {
"error": {
"code": 0
},
"data": [{
"ID": "11",
"MAXIOPS": "100",
"LATENCY": "0",
"IOType": "2",
"FSLIST": u'[""]',
'RUNNINGSTATUS': "2",
"NAME": "OpenStack_57_20151225102851",
"LUNLIST": u'["0", "1", "2"]'
}]
}
with mock.patch.object(rest_client.RestClient, 'get_qos',
return_value=fake_qos_info_response_less):
(qos_id, lun_list) = self.driver.client.find_available_qos(qos)
self.assertEqual(("11", u'["0", "1", "2"]'), (qos_id, lun_list))
@mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
return_value=fake_hypermetro_opts)
@mock.patch.object(rest_client.RestClient, 'get_all_pools',
return_value=FAKE_STORAGE_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_pool_info',
return_value=FAKE_FIND_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id',
return_value='11')
@mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready',
return_value=True)
def test_create_hypermetro_success(self,
mock_volume_ready,
mock_hyper_domain,
mock_pool_info,
mock_all_pool_info,
mock_login_return):
metadata = {"hypermetro_id": '11',
"remote_lun_id": '1'}
lun_info = self.driver.create_volume(hyper_volume)
self.assertEqual(metadata, lun_info['metadata'])
@mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
return_value=fake_hypermetro_opts)
@mock.patch.object(rest_client.RestClient, 'get_all_pools',
return_value=FAKE_STORAGE_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_pool_info',
return_value=FAKE_FIND_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id',
return_value='11')
@mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready',
return_value=True)
@mock.patch.object(hypermetro.HuaweiHyperMetro,
'_create_hypermetro_pair')
@mock.patch.object(rest_client.RestClient, 'delete_lun')
def test_create_hypermetro_fail(self,
mock_delete_lun,
mock_hyper_pair_info,
mock_volume_ready,
mock_hyper_domain,
mock_pool_info,
mock_all_pool_info,
mock_hypermetro_opts):
self.driver.client.login()
mock_hyper_pair_info.side_effect = exception.VolumeBackendAPIException(
data='Create hypermetro error.')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, hyper_volume)
mock_delete_lun.assert_called_with('1')
@mock.patch.object(rest_client.RestClient, 'get_all_pools',
return_value=FAKE_STORAGE_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_pool_info',
return_value={})
def test_create_hypermetro_remote_pool_none_fail(self,
mock_pool_info,
mock_all_pool_info):
param = {'TYPE': '11',
'PARENTID': ''}
self.driver.client.login()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.metro.create_hypermetro,
'2', param)
@mock.patch.object(rest_client.RestClient, 'get_all_pools',
return_value=FAKE_STORAGE_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_pool_info',
return_value=FAKE_FIND_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'create_lun',
return_value={'CAPACITY': '2097152',
'DESCRIPTION': '2f0635',
'HEALTHSTATUS': '1',
'ALLOCTYPE': '1',
'WWN': '6643e8c1004c5f6723e9f454003',
'ID': '1',
'RUNNINGSTATUS': '27',
'NAME': '5mFHcBv4RkCcD'})
@mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id',
return_value='11')
@mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready',
return_value=True)
def test_create_hypermetro_remote_pool_parentid(self,
mock_volume_ready,
mock_hyper_domain,
mock_create_lun,
mock_pool_info,
mock_all_pool_info):
param = {'TYPE': '11',
'PARENTID': ''}
self.driver.metro.create_hypermetro('2', param)
lun_PARENTID = mock_create_lun.call_args[0][0]['PARENTID']
self.assertEqual(FAKE_FIND_POOL_RESPONSE['ID'], lun_PARENTID)
@mock.patch.object(huawei_driver.huawei_utils, 'get_volume_metadata',
return_value={'hypermetro_id': '3400a30d844d0007',
'remote_lun_id': '1'})
def test_hypermetro_none_map_info_fail(self, mock_metadata):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.metro.connect_volume_fc,
test_volume,
FakeConnector)
@mock.patch.object(rest_client.RestClient, 'check_lun_exist',
return_value=True)
@mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist',
return_value=True)
@mock.patch.object(rest_client.RestClient, 'delete_hypermetro',
return_value=FAKE_COMMON_SUCCESS_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'delete_lun',
return_value=None)
def test_delete_hypermetro_success(self,
mock_delete_lun,
mock_delete_hypermetro,
mock_check_hyermetro,
mock_lun_exit):
self.driver.delete_volume(hyper_volume)
@mock.patch.object(rest_client.RestClient, 'check_lun_exist',
return_value=True)
@mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist',
return_value=True)
@mock.patch.object(rest_client.RestClient, 'get_hypermetro_by_id',
return_value=FAKE_METRO_INFO_RESPONCE)
@mock.patch.object(rest_client.RestClient, 'delete_hypermetro')
@mock.patch.object(rest_client.RestClient, 'delete_lun',
return_value=None)
def test_delete_hypermetro_fail(self,
mock_delete_lun,
mock_delete_hypermetro,
mock_metro_info,
mock_check_hyermetro,
mock_lun_exit):
mock_delete_hypermetro.side_effect = (
exception.VolumeBackendAPIException(data='Delete hypermetro '
'error.'))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume, hyper_volume)
mock_delete_lun.assert_called_with('11')
def test_manage_existing_get_size_invalid_reference(self):
# Can't find LUN by source-name.
external_ref = {'source-name': 'LUN1'}
with mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value=None):
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
test_volume, external_ref)
self.assertIsNotNone(re.search('please check the source-name '
'or source-id', ex.msg))
# Can't find LUN by source-id.
external_ref = {'source-id': 'ID1'}
with mock.patch.object(rest_client.RestClient, 'get_lun_info') as m_gt:
m_gt.side_effect = exception.VolumeBackendAPIException(
data='Error')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
test_volume, external_ref)
self.assertIsNotNone(re.search('please check the source-name '
'or source-id', ex.msg))
def test_manage_existing_get_size_improper_lunsize(self):
# LUN size is not multiple of 1 GB.
external_ref = {'source-id': 'ID1'}
with mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097150}):
ex = self.assertRaises(exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
test_volume, external_ref)
self.assertIsNotNone(
re.search('Volume size must be multiple of 1 GB', ex.msg))
@ddt.data({'source-id': 'ID1'}, {'source-name': 'LUN1'},
{'source-name': 'LUN1', 'source-id': 'ID1'})
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097152})
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value='ID1')
def test_manage_existing_get_size_success(self, mock_get_lun_id_by_name,
mock_get_lun_info,
external_ref):
size = self.driver.manage_existing_get_size(test_volume,
external_ref)
self.assertEqual(1, size)
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097152,
'ID': 'ID1',
'PARENTNAME': 'StoragePool001'})
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value='ID1')
def test_manage_existing_pool_mismatch(self, mock_get_by_name,
mock_get_info):
# LUN does not belong to the specified pool.
with mock.patch.object(huawei_driver.HuaweiBaseDriver,
'_get_lun_info_by_ref',
return_value={'PARENTNAME': 'StoragePool001'}):
test_volume = {'host': 'ubuntu-204@v3r3#StoragePool002',
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf'}
external_ref = {'source-name': 'LUN1'}
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume, external_ref)
self.assertIsNotNone(re.search('The specified LUN does not belong'
' to the given pool', ex.msg))
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097152,
'ID': 'ID1',
'PARENTNAME': 'StoragePool001'})
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value='ID1')
def test_manage_existing_lun_abnormal(self, mock_get_by_name,
mock_get_info):
# Status is not normal.
ret = {'PARENTNAME': "StoragePool001",
'HEALTHSTATUS': '2'}
with mock.patch.object(huawei_driver.HuaweiBaseDriver,
'_get_lun_info_by_ref',
return_value=ret):
test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001',
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf'}
external_ref = {'source-name': 'LUN1'}
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume, external_ref)
self.assertIsNotNone(re.search('LUN status is not normal', ex.msg))
@mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs',
return_value=[{'LOCALOBJID': 'ID1'}])
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097152,
'ID': 'ID1',
'PARENTNAME': 'StoragePool001',
'HEALTHSTATUS': constants.STATUS_HEALTH})
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value='ID1')
def test_manage_existing_with_hypermetro(self, mock_get_by_name,
mock_get_info,
mock_get_hyper_pairs):
test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001',
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf'}
# Exists in a HyperMetroPair.
with mock.patch.object(rest_client.RestClient,
'get_hypermetro_pairs',
return_value=[{'LOCALOBJID': 'ID1'}]):
external_ref = {'source-name': 'LUN1'}
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume, external_ref)
self.assertIsNotNone(re.search('HyperMetroPair', ex.msg))
@mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs')
@mock.patch.object(rest_client.RestClient, 'rename_lun')
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097152,
'ID': 'ID1',
'PARENTNAME': 'StoragePool001',
'HEALTHSTATUS': constants.STATUS_HEALTH,
'WWN': '6643e8c1004c5f6723e9f454003'})
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value='ID1')
def test_manage_existing_with_lower_version(self, mock_get_by_name,
mock_get_info, mock_rename,
mock_get_hyper_pairs):
test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf',
'admin_metadata': {
'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'}}
mock_get_hyper_pairs.side_effect = (
exception.VolumeBackendAPIException(data='err'))
external_ref = {'source-name': 'LUN1'}
model_update = self.driver.manage_existing(test_volume,
external_ref)
expected_val = {
'admin_metadata': {
'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'
},
'provider_location': 'ID1'}
self.assertEqual(expected_val, model_update)
@ddt.data([[{'PRILUNID': 'ID1'}], []],
[[{'PRILUNID': 'ID2'}], ['ID1', 'ID2']])
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097152,
'ID': 'ID1',
'PARENTNAME': 'StoragePool001',
'HEALTHSTATUS': constants.STATUS_HEALTH})
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value='ID1')
def test_manage_existing_with_splitmirror(self, ddt_data, mock_get_by_name,
mock_get_info):
test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001',
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf',
'id': '21ec7341-9256-497b-97d9-ef48edcf'}
# Exists in a SplitMirror.
with mock.patch.object(rest_client.RestClient, 'get_split_mirrors',
return_value=ddt_data[0]), \
mock.patch.object(rest_client.RestClient, 'get_target_luns',
return_value=ddt_data[1]):
external_ref = {'source-name': 'LUN1'}
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume, external_ref)
self.assertIsNotNone(re.search('SplitMirror', ex.msg))
@ddt.data([{'PARENTID': 'ID1'}], [{'TARGETLUNID': 'ID1'}])
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097152,
'ID': 'ID1',
'PARENTNAME': 'StoragePool001',
'HEALTHSTATUS': constants.STATUS_HEALTH})
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value='ID1')
def test_manage_existing_under_migration(self, ddt_data, mock_get_by_name,
mock_get_info):
test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001',
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf',
'id': '21ec7341-9256-497b-97d9-ef48edcf'}
# Exists in a migration task.
with mock.patch.object(rest_client.RestClient, 'get_migration_task',
return_value=ddt_data):
external_ref = {'source-name': 'LUN1'}
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume, external_ref)
self.assertIsNotNone(re.search('migration', ex.msg))
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097152,
'ID': 'ID1',
'PARENTNAME': 'StoragePool001',
'SNAPSHOTIDS': [],
'ISADD2LUNGROUP': 'true',
'HEALTHSTATUS': constants.STATUS_HEALTH})
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value='ID1')
def test_manage_existing_with_lungroup(self, mock_get_by_name,
mock_get_info):
# Already in LUN group.
test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001',
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf'}
external_ref = {'source-name': 'LUN1'}
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
test_volume, external_ref)
self.assertIsNotNone(re.search('Already exists in a LUN group',
ex.msg))
@ddt.data({'source-name': 'LUN1'}, {'source-id': 'ID1'})
@mock.patch.object(rest_client.RestClient, 'rename_lun')
@mock.patch.object(huawei_driver.HuaweiBaseDriver,
'_get_lun_info_by_ref',
return_value={'PARENTNAME': 'StoragePool001',
'SNAPSHOTIDS': [],
'ID': 'ID1',
'HEALTHSTATUS': constants.STATUS_HEALTH,
'WWN': '6643e8c1004c5f6723e9f454003'})
@mock.patch.object(rest_client.RestClient, 'get_lun_info',
return_value={'CAPACITY': 2097152,
'ALLOCTYPE': 1})
@mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name',
return_value='ID1')
def test_manage_existing_success(self, mock_get_by_name, mock_get_info,
mock_check_lun, mock_rename,
external_ref):
test_volume = {
'host': 'ubuntu-204@v3r3#StoragePool001',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf',
'admin_metadata': {
'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'
}
}
model_update = self.driver.manage_existing(test_volume,
external_ref)
expected_val = {
'admin_metadata': {
'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'
},
'provider_location': 'ID1'}
self.assertEqual(expected_val, model_update)
@ddt.data([None, 0], ['ID1', 1])
@mock.patch.object(rest_client.RestClient, 'rename_lun')
def test_unmanage(self, ddt_data, mock_rename):
test_volume = {'host': 'ubuntu-204@v3r3#StoragePool001',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635'}
with mock.patch.object(huawei_driver.HuaweiBaseDriver,
'_check_volume_exist_on_array',
return_value=ddt_data[0]):
self.driver.unmanage(test_volume)
self.assertEqual(ddt_data[1], mock_rename.call_count)
@mock.patch.object(rest_client.RestClient, 'get_snapshot_info',
return_value={'ID': 'ID1',
'NAME': 'test1',
'PARENTID': '12',
'USERCAPACITY': 2097152,
'HEALTHSTATUS': '2'})
@mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name',
return_value='ID1')
def test_manage_existing_snapshot_abnormal(self, mock_get_by_name,
mock_get_info):
with mock.patch.object(huawei_driver.HuaweiBaseDriver,
'_get_snapshot_info_by_ref',
return_value={'HEALTHSTATUS': '2',
'PARENTID': '12'}):
test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume': {'provider_location': '12'}}
external_ref = {'source-name': 'test1'}
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_snapshot,
test_snapshot, external_ref)
self.assertIsNotNone(re.search('Snapshot status is not normal',
ex.msg))
@mock.patch.object(rest_client.RestClient, 'get_snapshot_info',
return_value={'ID': 'ID1',
'EXPOSEDTOINITIATOR': 'true',
'NAME': 'test1',
'PARENTID': '12',
'USERCAPACITY': 2097152,
'HEALTHSTATUS': constants.STATUS_HEALTH})
@mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name',
return_value='ID1')
def test_manage_existing_snapshot_with_lungroup(self, mock_get_by_name,
mock_get_info):
# Already in LUN group.
test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume': {'provider_location': '12'}}
external_ref = {'source-name': 'test1'}
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_snapshot,
test_snapshot, external_ref)
self.assertIsNotNone(re.search('Snapshot is exposed to initiator',
ex.msg))
@mock.patch.object(rest_client.RestClient, 'rename_snapshot')
@mock.patch.object(huawei_driver.HuaweiBaseDriver,
'_get_snapshot_info_by_ref',
return_value={'ID': 'ID1',
'EXPOSEDTOINITIATOR': 'false',
'NAME': 'test1',
'PARENTID': '12',
'USERCAPACITY': 2097152,
'HEALTHSTATUS': constants.STATUS_HEALTH})
@mock.patch.object(rest_client.RestClient, 'get_snapshot_info',
return_value={'ID': 'ID1',
'EXPOSEDTOINITIATOR': 'false',
'NAME': 'test1',
'PARENTID': '12',
'USERCAPACITY': 2097152,
'HEALTHSTATUS': constants.STATUS_HEALTH})
@mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name',
return_value='ID1')
def test_manage_existing_snapshot_success(self, mock_get_by_name,
mock_get_info,
mock_check_snapshot,
mock_rename):
test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume': {'provider_location': '12'}}
external_ref = {'source-name': 'test1'}
model_update = self.driver.manage_existing_snapshot(test_snapshot,
external_ref)
self.assertEqual({'provider_location': 'ID1'}, model_update)
test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume': {'provider_location': '12'}}
external_ref = {'source-id': 'ID1'}
model_update = self.driver.manage_existing_snapshot(test_snapshot,
external_ref)
self.assertEqual({'provider_location': 'ID1'}, model_update)
@mock.patch.object(rest_client.RestClient, 'get_snapshot_info',
return_value={'ID': 'ID1',
'EXPOSEDTOINITIATOR': 'false',
'NAME': 'test1',
'USERCAPACITY': 2097152,
'PARENTID': '11',
'HEALTHSTATUS': constants.STATUS_HEALTH})
@mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name',
return_value='ID1')
def test_manage_existing_snapshot_mismatch_lun(self, mock_get_by_name,
mock_get_info):
external_ref = {'source-name': 'test1'}
test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume': {'provider_location': '12'}}
ex = self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_snapshot,
test_snapshot, external_ref)
self.assertIsNotNone(re.search("Snapshot doesn't belong to volume",
ex.msg))
@mock.patch.object(rest_client.RestClient, 'get_snapshot_info',
return_value={'USERCAPACITY': 2097152})
@mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name',
return_value='ID1')
def test_manage_existing_snapshot_get_size_success(self,
mock_get_id_by_name,
mock_get_info):
external_ref = {'source-name': 'test1',
'source-id': 'ID1'}
test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume': {'provider_location': '12'}}
size = self.driver.manage_existing_snapshot_get_size(test_snapshot,
external_ref)
self.assertEqual(1, size)
external_ref = {'source-name': 'test1'}
test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume': {'provider_location': '12'}}
size = self.driver.manage_existing_snapshot_get_size(test_snapshot,
external_ref)
self.assertEqual(1, size)
external_ref = {'source-id': 'ID1'}
test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume': {'provider_location': '12'}}
size = self.driver.manage_existing_snapshot_get_size(test_snapshot,
external_ref)
self.assertEqual(1, size)
@mock.patch.object(rest_client.RestClient, 'rename_snapshot')
def test_unmanage_snapshot(self, mock_rename):
test_snapshot = {'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635'}
with mock.patch.object(rest_client.RestClient,
'get_snapshot_id_by_name',
return_value=None):
self.driver.unmanage_snapshot(test_snapshot)
self.assertEqual(0, mock_rename.call_count)
with mock.patch.object(rest_client.RestClient,
'get_snapshot_id_by_name',
return_value='ID1'):
self.driver.unmanage_snapshot(test_snapshot)
self.assertEqual(1, mock_rename.call_count)
@ddt.data(sync_replica_specs, async_replica_specs)
def test_create_replication_success(self, mock_type):
self.mock_object(replication.ReplicaCommonDriver, 'sync')
self.mock_object(
huawei_driver.HuaweiBaseDriver,
'_get_volume_type',
mock.Mock(return_value={'extra_specs': mock_type}))
model_update = self.driver.create_volume(replication_volume)
driver_data = {'pair_id': TEST_PAIR_ID,
'rmt_lun_id': '1'}
driver_data = replication.to_string(driver_data)
self.assertEqual(driver_data, model_update['replication_driver_data'])
self.assertEqual('available', model_update['replication_status'])
@ddt.data(
[
rest_client.RestClient,
'get_array_info',
mock.Mock(
side_effect=exception.VolumeBackendAPIException(data='err'))
],
[
rest_client.RestClient,
'get_remote_devices',
mock.Mock(
side_effect=exception.VolumeBackendAPIException(data='err'))
],
[
rest_client.RestClient,
'get_remote_devices',
mock.Mock(return_value={})
],
[
replication.ReplicaPairManager,
'wait_volume_online',
mock.Mock(side_effect=[
None,
exception.VolumeBackendAPIException(data='err')])
],
[
rest_client.RestClient,
'create_pair',
mock.Mock(
side_effect=exception.VolumeBackendAPIException(data='err'))
],
[
replication.ReplicaCommonDriver,
'sync',
mock.Mock(
side_effect=exception.VolumeBackendAPIException(data='err'))
],
)
@ddt.unpack
def test_create_replication_fail(self, mock_module, mock_func, mock_value):
self.mock_object(
huawei_driver.HuaweiBaseDriver,
'_get_volume_type',
mock.Mock(return_value={'extra_specs': sync_replica_specs}))
self.mock_object(replication.ReplicaPairManager, '_delete_pair')
self.mock_object(mock_module, mock_func, mock_value)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume, replication_volume)
def test_delete_replication_success(self):
self.mock_object(replication.ReplicaCommonDriver, 'split')
self.mock_object(
huawei_driver.HuaweiBaseDriver,
'_get_volume_type',
mock.Mock(return_value={'extra_specs': sync_replica_specs}))
self.driver.delete_volume(replication_volume)
self.mock_object(rest_client.RestClient, 'check_lun_exist',
mock.Mock(return_value=False))
self.driver.delete_volume(replication_volume)
def test_wait_volume_online(self):
replica = FakeReplicaPairManager(self.driver.client,
self.driver.replica_client,
self.configuration)
lun_info = {'ID': '11'}
replica.wait_volume_online(self.driver.client, lun_info)
offline_status = {'RUNNINGSTATUS': '28'}
replica.wait_volume_online(self.driver.client, lun_info)
with mock.patch.object(rest_client.RestClient, 'get_lun_info',
offline_status):
self.assertRaises(exception.VolumeBackendAPIException,
replica.wait_volume_online,
self.driver.client,
lun_info)
def test_wait_second_access(self):
pair_id = '1'
access_ro = constants.REPLICA_SECOND_RO
access_rw = constants.REPLICA_SECOND_RW
op = replication.PairOp(self.driver.client)
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
self.mock_object(replication.PairOp, 'get_replica_info',
mock.Mock(return_value={'SECRESACCESS': access_ro}))
self.mock_object(huawei_utils.time, 'time', mock.Mock(
side_effect = utils.generate_timeout_series(
constants.DEFAULT_REPLICA_WAIT_TIMEOUT)))
common_driver.wait_second_access(pair_id, access_ro)
self.assertRaises(exception.VolumeBackendAPIException,
common_driver.wait_second_access, pair_id, access_rw)
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall',
new=utils.ZeroIntervalLoopingCall)
def test_wait_replica_ready(self):
normal_status = {
'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_NORMAL,
'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL
}
split_status = {
'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_SPLIT,
'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL
}
sync_status = {
'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_SYNC,
'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL
}
pair_id = '1'
op = replication.PairOp(self.driver.client)
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
with mock.patch.object(replication.PairOp, 'get_replica_info',
mock.Mock(return_value=normal_status)):
common_driver.wait_replica_ready(pair_id)
with mock.patch.object(
replication.PairOp,
'get_replica_info',
mock.Mock(side_effect=[sync_status, normal_status])):
common_driver.wait_replica_ready(pair_id)
with mock.patch.object(replication.PairOp, 'get_replica_info',
mock.Mock(return_value=split_status)):
self.assertRaises(exception.VolumeBackendAPIException,
common_driver.wait_replica_ready, pair_id)
def test_failover_to_current(self):
driver = FakeISCSIStorage(configuration=self.configuration)
driver.do_setup()
old_client = driver.client
old_replica_client = driver.replica_client
old_replica = driver.replica
secondary_id, volumes_update = driver.failover_host(
None, [test_volume], 'default')
self.assertTrue(driver.active_backend_id in ('', None))
self.assertTrue(old_client == driver.client)
self.assertTrue(old_replica_client == driver.replica_client)
self.assertTrue(old_replica == driver.replica)
self.assertEqual('default', secondary_id)
self.assertEqual(0, len(volumes_update))
def test_failover_normal_volumes(self):
driver = FakeISCSIStorage(configuration=self.configuration)
driver.do_setup()
old_client = driver.client
old_replica_client = driver.replica_client
old_replica = driver.replica
secondary_id, volumes_update = driver.failover_host(
None, [test_volume], REPLICA_BACKEND_ID)
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
self.assertTrue(old_client == driver.replica_client)
self.assertTrue(old_replica_client == driver.client)
self.assertFalse(old_replica == driver.replica)
self.assertEqual(REPLICA_BACKEND_ID, secondary_id)
self.assertEqual(1, len(volumes_update))
v_id = volumes_update[0]['volume_id']
v_update = volumes_update[0]['updates']
self.assertEqual(test_volume['id'], v_id)
self.assertEqual('error', v_update['status'])
self.assertEqual(test_volume['status'],
v_update['metadata']['old_status'])
def test_failback_to_current(self):
driver = FakeISCSIStorage(configuration=self.configuration)
driver.active_backend_id = REPLICA_BACKEND_ID
driver.do_setup()
old_client = driver.client
old_replica_client = driver.replica_client
old_replica = driver.replica
secondary_id, volumes_update = driver.failover_host(
None, [test_volume], REPLICA_BACKEND_ID)
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
self.assertTrue(old_client == driver.client)
self.assertTrue(old_replica_client == driver.replica_client)
self.assertTrue(old_replica == driver.replica)
self.assertEqual(REPLICA_BACKEND_ID, secondary_id)
self.assertEqual(0, len(volumes_update))
def test_failback_normal_volumes(self):
volume = copy.deepcopy(test_volume)
volume['status'] = 'error'
volume['metadata'] = {'old_status', 'available'}
driver = FakeISCSIStorage(configuration=self.configuration)
driver.active_backend_id = REPLICA_BACKEND_ID
driver.do_setup()
old_client = driver.client
old_replica_client = driver.replica_client
old_replica = driver.replica
secondary_id, volumes_update = driver.failover_host(
None, [volume], 'default')
self.assertTrue(driver.active_backend_id in ('', None))
self.assertTrue(old_client == driver.replica_client)
self.assertTrue(old_replica_client == driver.client)
self.assertFalse(old_replica == driver.replica)
self.assertEqual('default', secondary_id)
self.assertEqual(1, len(volumes_update))
v_id = volumes_update[0]['volume_id']
v_update = volumes_update[0]['updates']
self.assertEqual(volume['id'], v_id)
self.assertEqual('available', v_update['status'])
self.assertFalse('old_status' in v_update['metadata'])
def test_failover_replica_volumes(self):
driver = FakeISCSIStorage(configuration=self.configuration)
driver.do_setup()
old_client = driver.client
old_replica_client = driver.replica_client
old_replica = driver.replica
self.mock_object(replication.ReplicaCommonDriver, 'failover')
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
mock.Mock(
return_value={'replication_enabled': 'true'}))
secondary_id, volumes_update = driver.failover_host(
None, [replication_volume], REPLICA_BACKEND_ID)
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
self.assertTrue(old_client == driver.replica_client)
self.assertTrue(old_replica_client == driver.client)
self.assertFalse(old_replica == driver.replica)
self.assertEqual(REPLICA_BACKEND_ID, secondary_id)
self.assertEqual(1, len(volumes_update))
v_id = volumes_update[0]['volume_id']
v_update = volumes_update[0]['updates']
self.assertEqual(replication_volume['id'], v_id)
self.assertEqual('1', v_update['provider_location'])
self.assertEqual('failed-over', v_update['replication_status'])
new_drv_data = {'pair_id': TEST_PAIR_ID,
'rmt_lun_id': replication_volume['provider_location']}
new_drv_data = replication.to_string(new_drv_data)
self.assertEqual(new_drv_data, v_update['replication_driver_data'])
@ddt.data({}, {'pair_id': TEST_PAIR_ID})
def test_failover_replica_volumes_invalid_drv_data(self, mock_drv_data):
volume = copy.deepcopy(replication_volume)
volume['replication_driver_data'] = replication.to_string(
mock_drv_data)
driver = FakeISCSIStorage(configuration=self.configuration)
driver.do_setup()
old_client = driver.client
old_replica_client = driver.replica_client
old_replica = driver.replica
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
mock.Mock(
return_value={'replication_enabled': 'true'}))
secondary_id, volumes_update = driver.failover_host(
None, [volume], REPLICA_BACKEND_ID)
self.assertTrue(driver.active_backend_id == REPLICA_BACKEND_ID)
self.assertTrue(old_client == driver.replica_client)
self.assertTrue(old_replica_client == driver.client)
self.assertFalse(old_replica == driver.replica)
self.assertEqual(REPLICA_BACKEND_ID, secondary_id)
self.assertEqual(1, len(volumes_update))
v_id = volumes_update[0]['volume_id']
v_update = volumes_update[0]['updates']
self.assertEqual(volume['id'], v_id)
self.assertEqual('error', v_update['replication_status'])
def test_failback_replica_volumes(self):
self.mock_object(replication.ReplicaCommonDriver, 'enable')
self.mock_object(replication.ReplicaCommonDriver, 'wait_replica_ready')
self.mock_object(replication.ReplicaCommonDriver, 'failover')
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
mock.Mock(
return_value={'replication_enabled': 'true'}))
volume = copy.deepcopy(replication_volume)
driver = FakeISCSIStorage(configuration=self.configuration)
driver.active_backend_id = REPLICA_BACKEND_ID
driver.do_setup()
old_client = driver.client
old_replica_client = driver.replica_client
old_replica = driver.replica
secondary_id, volumes_update = driver.failover_host(
None, [volume], 'default')
self.assertTrue(driver.active_backend_id in ('', None))
self.assertTrue(old_client == driver.replica_client)
self.assertTrue(old_replica_client == driver.client)
self.assertFalse(old_replica == driver.replica)
self.assertEqual('default', secondary_id)
self.assertEqual(1, len(volumes_update))
v_id = volumes_update[0]['volume_id']
v_update = volumes_update[0]['updates']
self.assertEqual(replication_volume['id'], v_id)
self.assertEqual('1', v_update['provider_location'])
self.assertEqual('available', v_update['replication_status'])
new_drv_data = {'pair_id': TEST_PAIR_ID,
'rmt_lun_id': replication_volume['provider_location']}
new_drv_data = replication.to_string(new_drv_data)
self.assertEqual(new_drv_data, v_update['replication_driver_data'])
@ddt.data({}, {'pair_id': TEST_PAIR_ID})
def test_failback_replica_volumes_invalid_drv_data(self, mock_drv_data):
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
mock.Mock(
return_value={'replication_enabled': 'true'}))
volume = copy.deepcopy(replication_volume)
volume['replication_driver_data'] = replication.to_string(
mock_drv_data)
driver = FakeISCSIStorage(configuration=self.configuration)
driver.active_backend_id = REPLICA_BACKEND_ID
driver.do_setup()
old_client = driver.client
old_replica_client = driver.replica_client
old_replica = driver.replica
secondary_id, volumes_update = driver.failover_host(
None, [volume], 'default')
self.assertTrue(driver.active_backend_id in ('', None))
self.assertTrue(old_client == driver.replica_client)
self.assertTrue(old_replica_client == driver.client)
self.assertFalse(old_replica == driver.replica)
self.assertEqual('default', secondary_id)
self.assertEqual(1, len(volumes_update))
v_id = volumes_update[0]['volume_id']
v_update = volumes_update[0]['updates']
self.assertEqual(replication_volume['id'], v_id)
self.assertEqual('error', v_update['replication_status'])
@mock.patch.object(replication.PairOp, 'is_primary',
side_effect=[False, True])
@mock.patch.object(replication.ReplicaCommonDriver, 'split')
@mock.patch.object(replication.ReplicaCommonDriver, 'unprotect_second')
def test_replication_driver_enable_success(self,
mock_unprotect,
mock_split,
mock_is_primary):
replica_id = TEST_PAIR_ID
op = replication.PairOp(self.driver.client)
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
common_driver.enable(replica_id)
self.assertTrue(mock_unprotect.called)
self.assertTrue(mock_split.called)
self.assertTrue(mock_is_primary.called)
@mock.patch.object(replication.PairOp, 'is_primary', return_value=False)
@mock.patch.object(replication.ReplicaCommonDriver, 'split')
def test_replication_driver_failover_success(self,
mock_split,
mock_is_primary):
replica_id = TEST_PAIR_ID
op = replication.PairOp(self.driver.client)
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
common_driver.failover(replica_id)
self.assertTrue(mock_split.called)
self.assertTrue(mock_is_primary.called)
@mock.patch.object(replication.PairOp, 'is_primary', return_value=True)
def test_replication_driver_failover_fail(self, mock_is_primary):
replica_id = TEST_PAIR_ID
op = replication.PairOp(self.driver.client)
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
self.assertRaises(
exception.VolumeBackendAPIException,
common_driver.failover,
replica_id)
@ddt.data(constants.REPLICA_SECOND_RW, constants.REPLICA_SECOND_RO)
def test_replication_driver_protect_second(self, mock_access):
replica_id = TEST_PAIR_ID
op = replication.PairOp(self.driver.client)
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
self.mock_object(replication.ReplicaCommonDriver, 'wait_second_access')
self.mock_object(
replication.PairOp,
'get_replica_info',
mock.Mock(return_value={'SECRESACCESS': mock_access}))
common_driver.protect_second(replica_id)
common_driver.unprotect_second(replica_id)
def test_replication_driver_sync(self):
replica_id = TEST_PAIR_ID
op = replication.PairOp(self.driver.client)
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
async_normal_status = {
'REPLICATIONMODEL': constants.REPLICA_ASYNC_MODEL,
'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_NORMAL,
'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL
}
self.mock_object(replication.ReplicaCommonDriver, 'protect_second')
self.mock_object(replication.PairOp, 'get_replica_info',
mock.Mock(return_value=async_normal_status))
common_driver.sync(replica_id, True)
common_driver.sync(replica_id, False)
def test_replication_driver_split(self):
replica_id = TEST_PAIR_ID
op = replication.PairOp(self.driver.client)
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
self.mock_object(replication.ReplicaCommonDriver, 'wait_expect_state')
self.mock_object(replication.PairOp, 'split', mock.Mock(
side_effect=exception.VolumeBackendAPIException(data='err')))
common_driver.split(replica_id)
@mock.patch.object(replication.PairOp, 'split')
@ddt.data(constants.REPLICA_RUNNING_STATUS_SPLIT,
constants.REPLICA_RUNNING_STATUS_INVALID,
constants.REPLICA_RUNNING_STATUS_ERRUPTED)
def test_replication_driver_split_already_disabled(self, mock_status,
mock_op_split):
replica_id = TEST_PAIR_ID
op = replication.PairOp(self.driver.client)
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
pair_info = json.loads(FAKE_GET_PAIR_NORMAL_RESPONSE)['data']
pair_info['RUNNINGSTATUS'] = mock_status
self.mock_object(rest_client.RestClient, 'get_pair_by_id', mock.Mock(
return_value=pair_info))
common_driver.split(replica_id)
self.assertFalse(mock_op_split.called)
def test_replication_base_op(self):
replica_id = '1'
op = replication.AbsReplicaOp(None)
op.create()
op.delete(replica_id)
op.protect_second(replica_id)
op.unprotect_second(replica_id)
op.sync(replica_id)
op.split(replica_id)
op.switch(replica_id)
op.is_primary({})
op.get_replica_info(replica_id)
op._is_status(None, {'key': 'volue'}, None)
@mock.patch.object(rest_client.RestClient, 'call',
return_value={"error": {"code": 0}})
def test_get_tgt_port_group_no_portg_exist(self, mock_call):
portg = self.driver.client.get_tgt_port_group('test_portg')
self.assertIsNone(portg)
def test_get_tgt_iqn_from_rest_match(self):
match_res = {
'data': [{
'TYPE': 249,
'ID': '0+iqn.2006-08.com: 210048cee9d: 111.111.111.19,t,0x01'
}, {
'TYPE': 249,
'ID': '0+iqn.2006-08.com: 210048cee9d: 111.111.111.191,t,0x01'
}],
'error': {
'code': 0
}
}
ip = '111.111.111.19'
expected_iqn = 'iqn.2006-08.com: 210048cee9d: 111.111.111.19'
self.mock_object(rest_client.RestClient, 'call',
mock.Mock(return_value=match_res))
iqn = self.driver.client._get_tgt_iqn_from_rest(ip)
self.assertEqual(expected_iqn, iqn)
def test_get_tgt_iqn_from_rest_mismatch(self):
match_res = {
'data': [{
'TYPE': 249,
'ID': '0+iqn.2006-08.com: 210048cee9d: 192.0.2.191,t,0x01'
}, {
'TYPE': 249,
'ID': '0+iqn.2006-08.com: 210048cee9d: 192.0.2.192,t,0x01'
}],
'error': {
'code': 0
}
}
ip = '192.0.2.19'
self.mock_object(rest_client.RestClient, 'call',
mock.Mock(return_value=match_res))
iqn = self.driver.client._get_tgt_iqn_from_rest(ip)
self.assertIsNone(iqn)
class FCSanLookupService(object):
def get_device_mapping_from_network(self, initiator_list,
target_list):
return fake_fabric_mapping
class HuaweiFCDriverTestCase(test.TestCase):
def setUp(self):
super(HuaweiFCDriverTestCase, self).setUp()
self.configuration = mock.Mock(spec=conf.Configuration)
self.huawei_conf = FakeHuaweiConf(self.configuration, 'FC')
self.configuration.hypermetro_devices = hypermetro_devices
self.stubs.Set(time, 'sleep', Fake_sleep)
driver = FakeFCStorage(configuration=self.configuration)
self.driver = driver
self.driver.do_setup()
self.driver.client.login()
def test_login_success(self):
device_id = self.driver.client.login()
self.assertEqual('210235G7J20000000000', device_id)
def test_create_volume_success(self):
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_delete_volume_success(self):
self.driver.delete_volume(test_volume)
def test_create_snapshot_success(self):
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
test_snap['volume']['provider_location'] = ''
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
test_snap['volume']['provider_location'] = None
lun_info = self.driver.create_snapshot(test_snap)
self.assertEqual(11, lun_info['provider_location'])
def test_delete_snapshot_success(self):
self.driver.delete_snapshot(test_snap)
def test_create_volume_from_snapsuccess(self):
lun_info = self.driver.create_volume_from_snapshot(test_volume,
test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_initialize_connection_success(self):
iscsi_properties = self.driver.initialize_connection(test_volume,
FakeConnector)
self.assertEqual(1, iscsi_properties['data']['target_lun'])
def test_hypermetro_connection_success(self):
self.mock_object(rest_client.RestClient, 'find_array_version',
mock.Mock(return_value='V300R003C00'))
fc_properties = self.driver.initialize_connection(hyper_volume,
FakeConnector)
self.assertEqual(1, fc_properties['data']['target_lun'])
def test_terminate_connection_success(self):
self.driver.client.terminateFlag = True
self.driver.terminate_connection(test_volume, FakeConnector)
self.assertTrue(self.driver.client.terminateFlag)
def test_terminate_connection_hypermetro_in_metadata(self):
self.driver.terminate_connection(hyper_volume, FakeConnector)
def test_get_volume_status(self):
remote_device_info = {"ARRAYTYPE": "1",
"HEALTHSTATUS": "1",
"RUNNINGSTATUS": "10"}
self.mock_object(
replication.ReplicaPairManager,
'get_remote_device_by_wwn',
mock.Mock(return_value=remote_device_info))
data = self.driver.get_volume_stats()
self.assertEqual('2.0.5', data['driver_version'])
self.assertTrue(data['pools'][0]['replication_enabled'])
self.assertListEqual(['sync', 'async'],
data['pools'][0]['replication_type'])
self.mock_object(
replication.ReplicaPairManager,
'get_remote_device_by_wwn',
mock.Mock(return_value={}))
data = self.driver.get_volume_stats()
self.assertNotIn('replication_enabled', data['pools'][0])
self.mock_object(
replication.ReplicaPairManager,
'try_get_remote_wwn',
mock.Mock(return_value={}))
data = self.driver.get_volume_stats()
self.assertEqual('2.0.5', data['driver_version'])
self.assertNotIn('replication_enabled', data['pools'][0])
def test_extend_volume(self):
self.driver.extend_volume(test_volume, 3)
def test_login_fail(self):
self.driver.client.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.client.login)
def test_create_snapshot_fail(self):
self.driver.client.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot, test_snap)
def test_create_volume_fail(self):
self.driver.client.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, test_volume)
def test_delete_volume_fail(self):
self.driver.client.test_fail = True
self.driver.delete_volume(test_volume)
def test_delete_snapshot_fail(self):
self.driver.client.test_fail = True
self.driver.delete_snapshot(test_snap)
def test_initialize_connection_fail(self):
self.driver.client.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
test_volume, FakeConnector)
def test_lun_is_associated_to_lungroup(self):
self.driver.client.associate_lun_to_lungroup('11', '11')
result = self.driver.client._is_lun_associated_to_lungroup('11',
'11')
self.assertTrue(result)
def test_lun_is_not_associated_to_lun_group(self):
self.driver.client.associate_lun_to_lungroup('12', '12')
self.driver.client.remove_lun_from_lungroup('12', '12')
result = self.driver.client._is_lun_associated_to_lungroup('12',
'12')
self.assertFalse(result)
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
def test_migrate_volume_success(self, mock_add_lun_to_partition):
# Migrate volume without new type.
empty_dict = {}
moved, model_update = self.driver.migrate_volume(None,
test_volume,
test_host,
None)
self.assertTrue(moved)
self.assertEqual(empty_dict, model_update)
# Migrate volume with new type.
empty_dict = {}
new_type = {'extra_specs':
{'smarttier': '<is> true',
'smartcache': '<is> true',
'smartpartition': '<is> true',
'thin_provisioning_support': '<is> true',
'thick_provisioning_support': '<is> False',
'policy': '2',
'smartcache:cachename': 'cache-test',
'smartpartition:partitionname': 'partition-test'}}
moved, model_update = self.driver.migrate_volume(None,
test_volume,
test_host,
new_type)
self.assertTrue(moved)
self.assertEqual(empty_dict, model_update)
def test_migrate_volume_fail(self):
self.driver.client.test_fail = True
# Migrate volume without new type.
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.migrate_volume, None,
test_volume, test_host, None)
# Migrate volume with new type.
new_type = {'extra_specs':
{'smarttier': '<is> true',
'smartcache': '<is> true',
'thin_provisioning_support': '<is> true',
'thick_provisioning_support': '<is> False',
'policy': '2',
'smartcache:cachename': 'cache-test',
'partitionname': 'partition-test'}}
self.driver.client.test_fail = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.migrate_volume, None,
test_volume, test_host, new_type)
def test_check_migration_valid(self):
is_valid = self.driver._check_migration_valid(test_host,
test_volume)
self.assertTrue(is_valid)
# No pool_name in capabilities.
invalid_host1 = {'host': 'ubuntu001@backend002#OpenStack_Pool',
'capabilities':
{'location_info': '210235G7J20000000000',
'allocated_capacity_gb': 0,
'volume_backend_name': 'HuaweiFCDriver',
'storage_protocol': 'FC'}}
is_valid = self.driver._check_migration_valid(invalid_host1,
test_volume)
self.assertFalse(is_valid)
# location_info in capabilities is not matched.
invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool',
'capabilities':
{'location_info': '210235G7J20000000001',
'allocated_capacity_gb': 0,
'pool_name': 'OpenStack_Pool',
'volume_backend_name': 'HuaweiFCDriver',
'storage_protocol': 'FC'}}
is_valid = self.driver._check_migration_valid(invalid_host2,
test_volume)
self.assertFalse(is_valid)
# storage_protocol is not match current protocol and volume status is
# 'in-use'.
volume_in_use = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
'size': 2,
'volume_name': 'vol1',
'id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635',
'volume_attachment': 'in-use',
'provider_location': '11'}
invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool',
'capabilities':
{'location_info': '210235G7J20000000001',
'allocated_capacity_gb': 0,
'pool_name': 'OpenStack_Pool',
'volume_backend_name': 'HuaweiFCDriver',
'storage_protocol': 'iSCSI'}}
is_valid = self.driver._check_migration_valid(invalid_host2,
volume_in_use)
self.assertFalse(is_valid)
# pool_name is empty.
invalid_host3 = {'host': 'ubuntu001@backend002#OpenStack_Pool',
'capabilities':
{'location_info': '210235G7J20000000001',
'allocated_capacity_gb': 0,
'pool_name': '',
'volume_backend_name': 'HuaweiFCDriver',
'storage_protocol': 'iSCSI'}}
is_valid = self.driver._check_migration_valid(invalid_host3,
test_volume)
self.assertFalse(is_valid)
@mock.patch.object(rest_client.RestClient, 'rename_lun')
def test_update_migrated_volume_success(self, mock_rename_lun):
original_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0635'}
current_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0636'}
model_update = self.driver.update_migrated_volume(None,
original_volume,
current_volume,
'available')
self.assertEqual({'_name_id': None}, model_update)
@mock.patch.object(rest_client.RestClient, 'rename_lun')
def test_update_migrated_volume_fail(self, mock_rename_lun):
mock_rename_lun.side_effect = exception.VolumeBackendAPIException(
data='Error occurred.')
original_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0635'}
current_volume = {'id': '21ec7341-9256-497b-97d9-ef48edcf0636',
'_name_id': '21ec7341-9256-497b-97d9-ef48edcf0637'}
model_update = self.driver.update_migrated_volume(None,
original_volume,
current_volume,
'available')
self.assertEqual({'_name_id': '21ec7341-9256-497b-97d9-ef48edcf0637'},
model_update)
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
def test_retype_volume_success(self, mock_add_lun_to_partition):
retype = self.driver.retype(None, test_volume,
test_new_type, None, test_host)
self.assertTrue(retype)
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
@mock.patch.object(
huawei_driver.HuaweiBaseDriver,
'_get_volume_type',
return_value={'extra_specs': sync_replica_specs})
def test_retype_replication_volume_success(self, mock_get_type,
mock_add_lun_to_partition):
retype = self.driver.retype(None, test_volume,
test_new_replication_type, None, test_host)
self.assertTrue(retype)
def test_retype_volume_cache_fail(self):
self.driver.client.cache_not_exist = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.retype, None,
test_volume, test_new_type, None, test_host)
def test_retype_volume_partition_fail(self):
self.driver.client.partition_not_exist = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.retype, None,
test_volume, test_new_type, None, test_host)
@mock.patch.object(rest_client.RestClient, 'add_lun_to_partition')
def test_retype_volume_fail(self, mock_add_lun_to_partition):
mock_add_lun_to_partition.side_effect = (
exception.VolumeBackendAPIException(data='Error occurred.'))
retype = self.driver.retype(None, test_volume,
test_new_type, None, test_host)
self.assertFalse(retype)
@mock.patch.object(rest_client.RestClient, 'get_all_engines',
return_value=[{'NODELIST': '["0A","0B"]', 'ID': '0'}])
def test_build_ini_targ_map_engie_recorded(self, mock_engines):
fake_lookup_service = FCSanLookupService()
zone_helper = fc_zone_helper.FCZoneHelper(
fake_lookup_service, self.driver.client)
(tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map(
['10000090fa0d6754'], '1', '11')
target_port_wwns = ['2000643e8c4c5f66']
self.assertEqual(target_port_wwns, tgt_wwns)
self.assertEqual({}, init_targ_map)
@mock.patch.object(rest_client.RestClient, 'get_all_engines',
return_value=[{'NODELIST': '["0A"]', 'ID': '0'},
{'NODELIST': '["0B"]', 'ID': '1'}])
def test_build_ini_targ_map_engie_not_recorded(self, mock_engines):
fake_lookup_service = FCSanLookupService()
zone_helper = fc_zone_helper.FCZoneHelper(
fake_lookup_service, self.driver.client)
(tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map(
['10000090fa0d6754'], '1', '11')
expected_wwns = ['2000643e8c4c5f66']
expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']}
self.assertEqual(expected_wwns, tgt_wwns)
self.assertEqual(expected_map, init_targ_map)
@mock.patch.object(rest_client.RestClient, 'get_all_engines',
return_value=[{'NODELIST': '["0A", "0B"]', 'ID': '0'}])
def test_build_ini_targ_map_no_map(self, mock_engines):
fake_lookup_service = FCSanLookupService()
zone_helper = fc_zone_helper.FCZoneHelper(
fake_lookup_service, self.driver.client)
# Host with id '5' has no map on the array.
(tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map(
['10000090fa0d6754'], '5', '11')
expected_wwns = ['2000643e8c4c5f66']
expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']}
self.assertEqual(expected_wwns, tgt_wwns)
self.assertEqual(expected_map, init_targ_map)
def test_get_init_targ_map(self):
fake_lookup_service = FCSanLookupService()
zone_helper = fc_zone_helper.FCZoneHelper(
fake_lookup_service, self.driver.client)
(tgt_wwns, portg_id, init_targ_map) = zone_helper.get_init_targ_map(
['10000090fa0d6754'], '1')
expected_wwns = ['2000643e8c4c5f66']
expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']}
self.assertEqual(expected_wwns, tgt_wwns)
self.assertEqual(expected_map, init_targ_map)
def test_multi_resturls_success(self):
self.driver.client.test_multi_url_flag = True
lun_info = self.driver.create_volume(test_volume)
self.assertEqual('1', lun_info['provider_location'])
def test_get_id_from_result(self):
result = {}
name = 'test_name'
key = 'NAME'
re = self.driver.client._get_id_from_result(result, name, key)
self.assertIsNone(re)
result = {'data': {}}
re = self.driver.client._get_id_from_result(result, name, key)
self.assertIsNone(re)
result = {'data': [{'COUNT': 1, 'ID': '1'},
{'COUNT': 2, 'ID': '2'}]}
re = self.driver.client._get_id_from_result(result, name, key)
self.assertIsNone(re)
result = {'data': [{'NAME': 'test_name1', 'ID': '1'},
{'NAME': 'test_name2', 'ID': '2'}]}
re = self.driver.client._get_id_from_result(result, name, key)
self.assertIsNone(re)
result = {'data': [{'NAME': 'test_name', 'ID': '1'},
{'NAME': 'test_name2', 'ID': '2'}]}
re = self.driver.client._get_id_from_result(result, name, key)
self.assertEqual('1', re)
@mock.patch.object(rest_client.RestClient, 'get_pool_info',
return_value={'ID': 1,
'CAPACITY': 110362624,
'TOTALCAPACITY': 209715200})
def test_get_capacity(self, mock_get_pool_info):
expected_pool_capacity = {'total_capacity': 100.0,
'free_capacity': 52.625}
pool_capacity = self.driver.client._get_capacity(None,
None)
self.assertEqual(expected_pool_capacity, pool_capacity)
@mock.patch.object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
return_value=fake_hypermetro_opts)
@mock.patch.object(rest_client.RestClient, 'get_all_pools',
return_value=FAKE_STORAGE_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_pool_info',
return_value=FAKE_FIND_POOL_RESPONSE)
@mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id',
return_value='11')
@mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready',
return_value=True)
@mock.patch.object(hypermetro.HuaweiHyperMetro,
'_create_hypermetro_pair',
return_value={"ID": '11',
"NAME": 'hypermetro-pair'})
@mock.patch.object(rest_client.RestClient, 'logout',
return_value=None)
def test_create_hypermetro_success(self, mock_hypermetro_opts,
mock_login_return,
mock_all_pool_info,
mock_pool_info,
mock_hyper_domain,
mock_volume_ready,
mock_logout):
metadata = {"hypermetro_id": '11',
"remote_lun_id": '1'}
lun_info = self.driver.create_volume(hyper_volume)
self.assertEqual(metadata, lun_info['metadata'])
@mock.patch.object(rest_client.RestClient, 'call',
return_value={"data": [{"RUNNINGSTATUS": "27",
"ID": '1'},
{"RUNNINGSTATUS": "26",
"ID": '2'}],
"error": {"code": 0}})
def test_get_online_free_wwns(self, mock_call):
wwns = self.driver.client.get_online_free_wwns()
self.assertEqual(['1'], wwns)
@mock.patch.object(rest_client.RestClient, 'call',
return_value={"data": {"ID": 1}, "error": {"code": 0}})
def test_rename_lun(self, mock_call):
des = 'This LUN is renamed.'
new_name = 'test_name'
self.driver.client.rename_lun('1', new_name, des)
self.assertEqual(1, mock_call.call_count)
url = "/lun/1"
data = {"NAME": new_name, "DESCRIPTION": des}
mock_call.assert_called_once_with(url, data, "PUT")
@mock.patch.object(rest_client.RestClient, 'call',
return_value={"data": {}})
def test_is_host_associated_to_hostgroup_no_data(self, mock_call):
res = self.driver.client.is_host_associated_to_hostgroup('1')
self.assertFalse(res)
@mock.patch.object(rest_client.RestClient, 'call',
return_value={"data": {'ISADD2HOSTGROUP': 'true'}})
def test_is_host_associated_to_hostgroup_true(self, mock_call):
res = self.driver.client.is_host_associated_to_hostgroup('1')
self.assertTrue(res)
@mock.patch.object(rest_client.RestClient, 'call',
return_value={"data": {'ISADD2HOSTGROUP': 'false'}})
def test_is_host_associated_to_hostgroup_false(self, mock_call):
res = self.driver.client.is_host_associated_to_hostgroup('1')
self.assertFalse(res)
class HuaweiConfTestCase(test.TestCase):
def setUp(self):
super(HuaweiConfTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.fake_xml_file = self.tmp_dir + '/cinder_huawei_conf.xml'
self.conf = mock.Mock()
self.conf.cinder_huawei_conf_file = self.fake_xml_file
self.huawei_conf = huawei_conf.HuaweiConf(self.conf)
def _create_fake_conf_file(self):
"""Create a fake Config file.
Huawei storage customize a XML configuration file, the configuration
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file.
"""
doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
url = doc.createElement('RestURL')
url_text = doc.createTextNode('http://192.0.2.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
username = doc.createElement('UserName')
username_text = doc.createTextNode('admin')
username.appendChild(username_text)
storage.appendChild(username)
password = doc.createElement('UserPassword')
password_text = doc.createTextNode('Admin@storage')
password.appendChild(password_text)
storage.appendChild(password)
product = doc.createElement('Product')
product_text = doc.createTextNode('V3')
product.appendChild(product_text)
storage.appendChild(product)
protocol = doc.createElement('Protocol')
protocol_text = doc.createTextNode('iSCSI')
protocol.appendChild(protocol_text)
storage.appendChild(protocol)
lun = doc.createElement('LUN')
config.appendChild(lun)
luntype = doc.createElement('LUNType')
luntype_text = doc.createTextNode('Thick')
luntype.appendChild(luntype_text)
lun.appendChild(luntype)
lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval')
lun_ready_wait_interval_text = doc.createTextNode('2')
lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text)
lun.appendChild(lun_ready_wait_interval)
lun_copy_wait_interval = doc.createElement('LUNcopyWaitInterval')
lun_copy_wait_interval_text = doc.createTextNode('2')
lun_copy_wait_interval.appendChild(lun_copy_wait_interval_text)
lun.appendChild(lun_copy_wait_interval)
timeout = doc.createElement('Timeout')
timeout_text = doc.createTextNode('43200')
timeout.appendChild(timeout_text)
lun.appendChild(timeout)
write_type = doc.createElement('WriteType')
write_type_text = doc.createTextNode('1')
write_type.appendChild(write_type_text)
lun.appendChild(write_type)
mirror_switch = doc.createElement('MirrorSwitch')
mirror_switch_text = doc.createTextNode('1')
mirror_switch.appendChild(mirror_switch_text)
lun.appendChild(mirror_switch)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '1')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
pool = doc.createElement('StoragePool')
pool_text = doc.createTextNode('OpenStack_Pool')
pool.appendChild(pool_text)
lun.appendChild(pool)
iscsi = doc.createElement('iSCSI')
config.appendChild(iscsi)
defaulttargetip = doc.createElement('DefaultTargetIP')
defaulttargetip_text = doc.createTextNode('192.0.2.68')
defaulttargetip.appendChild(defaulttargetip_text)
iscsi.appendChild(defaulttargetip)
initiator = doc.createElement('Initiator')
initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3')
initiator.setAttribute('TargetIP', '192.0.2.2')
initiator.setAttribute('CHAPinfo', 'mm-user;mm-user@storage')
initiator.setAttribute('ALUA', '1')
initiator.setAttribute('TargetPortGroup', 'PortGroup001')
iscsi.appendChild(initiator)
fakefile = open(self.conf.cinder_huawei_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
| [
"[email protected]"
] | |
c5aed1e631b4cec0b812399ab18123634d8a5671 | c3dc08fe8319c9d71f10473d80b055ac8132530e | /challenge-165/roger-bell-west/python/ch-1.py | 460191357b2828a60f49ec73f416a12739c54f30 | [] | no_license | southpawgeek/perlweeklychallenge-club | d4b70d9d8e4314c4dfc4cf7a60ddf457bcaa7a1e | 63fb76188e132564e50feefd2d9d5b8491568948 | refs/heads/master | 2023-01-08T19:43:56.982828 | 2022-12-26T07:13:05 | 2022-12-26T07:13:05 | 241,471,631 | 1 | 0 | null | 2020-02-18T21:30:34 | 2020-02-18T21:30:33 | null | UTF-8 | Python | false | false | 1,280 | py | #! /usr/bin/python3
import fileinput
points = []
lines = []
x = []
y = []
for line in fileinput.input():
line=line.rstrip()
f = [int(i) for i in line.split(',')]
for i in range(len(f)):
if i % 2 == 0:
x.append(f[i])
else:
y.append(f[i])
if len(f) == 4:
lines.append(f)
if len(f) == 2:
points.append(f)
mnx = min(x)
mxx = max(x)
mny = min(y)
mxy = max(y)
lo = [
mnx - (mxx-mnx)/10,
mny - (mxy-mny)/10
]
hi = [
mxx + (mxx-mnx)/10,
mxy + (mxy-mny)/10
]
w = hi[0] - lo[0]
h = hi[1] - lo[1]
print('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>')
print('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">')
print(f'<svg width="{w}" height="{h}" viewBox="{lo[0]} {lo[1]} {w} {h}" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">')
if len(lines) > 0:
print(' <g id="lines" stroke="#369" stroke-width="4">')
for p in lines:
print(f' <line x1="{p[0]}" y1="{p[1]}" x2="{p[2]}" y2="{p[3]}" />')
print(' </g>')
if len(points) > 0:
print(' <g fill="#f73" id="points">')
for p in points:
print(f' <circle cx="{p[0]}" cy="{p[1]}" r="3" />')
print(' </g>')
print('</svg>')
| [
"[email protected]"
] | |
2a2eedd00c5d7719d072f84307fedc954789ea25 | 32b46b0955d1abd963077c7ed6f614c8fa1403e9 | /BT/pyobjc-core-3.0.4/build/lib.macosx-10.11-intel-2.7/PyObjCTest/test_inspect_signatures.py | 91eb53069c7804e7efe4a7ac50b850519d493a79 | [
"MIT"
] | permissive | ivanmolera/Raspberry | 3771f74ce4e4667c95081bfa38a2b39ec6375a26 | 30d16fdb88efc2aec347047eef26da213346cd1a | refs/heads/master | 2021-01-19T20:27:51.424504 | 2018-02-10T16:48:30 | 2018-02-10T16:48:30 | 88,509,424 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | from PyObjCTools.TestSupport import *
import objc
import types
import inspect
class TestInspectSignatures (TestCase):
@min_python_release("3.4")
def test_module_functions_signature(self):
for nm in dir(objc):
obj = getattr(objc, nm)
if isinstance(obj, types.BuiltinMethodType):
try:
value = inspect.signature(obj)
except ValueError:
value = None
if value is None:
self.fail("No inspect.signature for %s"%(nm,))
@min_python_release("3.4")
def test_class_signature(self):
class_list = [objc.ObjCPointer, objc.objc_meta_class, objc.objc_class,
objc.objc_object, objc.pyobjc_unicode, objc.selector, objc.FSRef,
objc.FSSpec, objc.ivar, objc.informal_protocol, objc.formal_protocol,
objc.varlist, objc.function, objc.IMP, objc.super]
if hasattr(objc, 'WeakRef'):
class_list.append(objc.WeakRef)
for cls in class_list:
for nm in dir(cls):
if nm in ('__new__', '__subclasshook__', '__abstractmethods__', '__prepare__'):
continue
obj = getattr(cls, nm)
if isinstance(obj, types.BuiltinMethodType):
try:
value = inspect.signature(obj)
except ValueError:
value = None
if value is None:
self.fail("No inspect.signature for %s.%s"%(cls.__name__, nm,))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
ce452c20e4b8d2186443529b945ac3386d21abc2 | 552320aa1aed946ac6e83a149355d495252e09f4 | /rule/rule-code/oracle/DML_SORT.py | 89a12a34f4f23ee4c2386b611357ac74181aa3cc | [] | no_license | kk71/sqlaudit | 59bab5765a67f56f1dd2f3103812051c5acbbc49 | 51b4a3b188ab31da5511bb68f617933d771a3d51 | refs/heads/main | 2023-02-04T18:38:46.125746 | 2020-06-30T06:06:51 | 2020-06-30T06:06:51 | 323,559,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | import re
def code(rule, entries, **kwargs):
single_sql: dict = kwargs.get("single_sql")
sql_text: str = single_sql["sql_text_no_comment"]
dml_sort = re.compile(r"(\s)?((update )|(delete )).*order by", re.M+re.I)
if dml_sort.search(sql_text):
yield single_sql
code_hole.append(code)
| [
"[email protected]"
] | |
685d1a7c6b36d028af2b4c5681e232e478642a48 | 10720eba1c8d483ed1e360b86a4f7a9016bdd344 | /source/py3exiv2-arm/pyexiv2/xmp.py | 4edf01cf6978c882caacd436331ef2b64b756bd6 | [
"MIT"
] | permissive | pageauc/pi-timolo | 8faffad6d60f98f99ae364fad5647907e179a37c | 66b550228d24037773434d53be3f324a3ffb6908 | refs/heads/master | 2023-08-06T16:27:28.327055 | 2022-10-26T15:55:12 | 2022-10-26T15:55:12 | 28,255,076 | 556 | 127 | MIT | 2023-09-11T06:24:00 | 2014-12-20T03:09:16 | Python | UTF-8 | Python | false | false | 18,011 | py | # -*- coding: utf-8 -*-
# ******************************************************************************
#
# Copyright (C) 2006-2011 Olivier Tilloy <[email protected]>
# Copyright (C) 2015-2016 Vincent Vande Vyvre <[email protected]>
#
# This file is part of the py3exiv2 distribution.
#
# py3exiv2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 3 as published by the Free Software Foundation.
#
# py3exiv2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with py3exiv2; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, 5th Floor, Boston, MA 02110-1301 USA.
#
# Maintainer: Vincent Vande Vyvre <[email protected]>
#
# ******************************************************************************
"""
XMP specific code.
"""
import libexiv2python
from pyexiv2.utils import FixedOffset, is_fraction, make_fraction, \
GPSCoordinate, DateTimeFormatter
import datetime
import re
class XmpValueError(ValueError):
"""
Exception raised when failing to parse the *value* of an XMP tag.
:attribute value: the value that fails to be parsed
:type value: string
:attribute type: the XMP type of the tag
:type type: string
"""
def __init__(self, value, type_):
self.value = value
self.type = type
def __str__(self):
return 'Invalid value for XMP type [%s]: [%s]' % \
(self.type, self.value)
class XmpTag(object):
"""Define an XMP tag.
Here is a correspondance table between the XMP types and the possible
python types the value of a tag may take:
- alt, bag, seq: list of the contained simple type
- lang alt: dict of (language-code: value)
- Boolean: boolean
- Colorant: *[not implemented yet]*
- Date: :class:`datetime.date`, :class:`datetime.datetime`
- Dimensions: *[not implemented yet]*
- Font: *[not implemented yet]*
- GPSCoordinate: :class:`pyexiv2.utils.GPSCoordinate`
- Integer: int
- Locale: *[not implemented yet]*
- MIMEType: 2-tuple of strings
- Rational: :class:`fractions.Fraction`
- Real: *[not implemented yet]*
- AgentName, ProperName, Text: unicode string
- Thumbnail: *[not implemented yet]*
- URI, URL: string
- XPath: *[not implemented yet]*
"""
# strptime is not flexible enough to handle all valid Date formats, we use a
# custom regular expression
_time_zone_re = r'Z|((?P<sign>\+|-)(?P<ohours>\d{2}):(?P<ominutes>\d{2}))'
_time_re = r'(?P<hours>\d{2})(:(?P<minutes>\d{2})(:(?P<seconds>\d{2})(.(?P<decimal>\d+))?)?(?P<tzd>%s))?' % _time_zone_re
_date_re = re.compile(r'(?P<year>\d{4})(-(?P<month>\d{2})(-(?P<day>\d{2})(T(?P<time>%s))?)?)?' % _time_re)
def __init__(self, key, value=None, _tag=None):
"""The tag can be initialized with an optional value which expected
type depends on the XMP type of the tag.
Args:
key -- the key of the tag
value -- the value of the tag
"""
super(XmpTag, self).__init__()
if _tag is not None:
self._tag = _tag
else:
self._tag = libexiv2python._XmpTag(key)
self._raw_value = None
self._value = None
self._value_cookie = False
if value is not None:
#type_ = self._tag._getType()
self._set_value(value)
def _set_owner(self, metadata):
self._tag._setParentImage(metadata._image)
@staticmethod
def _from_existing_tag(_tag):
"""Build a tag from an already existing libexiv2python._XmpTag.
"""
tag = XmpTag(_tag._getKey(), _tag=_tag)
type_ = _tag._getExiv2Type()
# Do not set the raw_value property, as it would call
# _tag._set{Text,Array,LangAlt}Value
# (see https://bugs.launchpad.net/pyexiv2/+bug/582445).
if type_ == 'XmpText':
tag._raw_value = _tag._getTextValue()
elif type_ in ('XmpAlt', 'XmpBag', 'XmpSeq'):
tag._raw_value = _tag._getArrayValue()
elif type_ == 'LangAlt':
tag._raw_value = _tag._getLangAltValue()
tag._value_cookie = True
return tag
@property
def key(self):
"""The key of the tag in the dotted form
``familyName.groupName.tagName`` where ``familyName`` = ``xmp``.
"""
return self._tag._getKey()
@property
def type(self):
"""The XMP type of the tag.
"""
return self._tag._getType()
@property
def name(self):
"""The name of the tag (this is also the third part of the key).
"""
return self._tag._getName()
@property
def title(self):
"""The title (label) of the tag.
"""
return self._tag._getTitle()
@property
def description(self):
"""The description of the tag.
"""
return self._tag._getDescription()
def _get_raw_value(self):
return self._raw_value
def _set_raw_value(self, value):
type_ = self._tag._getExiv2Type()
if type_ == 'XmpText':
self._tag._setTextValue(value)
elif type_ in ('XmpAlt', 'XmpBag', 'XmpSeq'):
if not value:
raise ValueError('Empty array')
self._tag._setArrayValue(value)
elif type_ == 'LangAlt':
if not value:
raise ValueError('Empty LangAlt')
self._tag._setLangAltValue(value)
self._raw_value = value
self._value_cookie = True
raw_value = property(fget=_get_raw_value, fset=_set_raw_value,
doc='The raw value of the tag as a [list of] ' \
'string(s).')
def _compute_value(self):
# Lazy computation of the value from the raw value
if self.type.startswith(('seq', 'bag', 'alt')):
type_ = self.type[4:]
if type_.lower().startswith('closed choice of'):
type_ = type[17:]
self._value = [self._convert_to_python(v, type_) for v in self._raw_value]
elif self.type == 'Lang Alt':
self._value = {}
for k, v in self._raw_value.items():
try:
self._value[k] = v
except TypeError:
raise XmpValueError(self._raw_value, self.type)
elif self.type.lower().startswith('closed choice of'):
self._value = self._convert_to_python(self._raw_value, self.type[17:])
elif self.type == '':
self._value = self._raw_value
else:
self._value = self._convert_to_python(self._raw_value, self.type)
self._value_cookie = False
def _get_value(self):
if self._value_cookie:
self._compute_value()
return self._value
def _set_value(self, value):
type_ = self._tag._getExiv2Type()
if type_ == 'XmpText':
stype = self.type
if stype.lower().startswith('closed choice of'):
stype = stype[17:]
self.raw_value = self._convert_to_string(value, stype)
elif type_ in ('XmpAlt', 'XmpBag', 'XmpSeq'):
if not isinstance(value, (list, tuple)):
raise TypeError('Expecting a list of values')
stype = self.type[4:]
if stype.lower().startswith('closed choice of'):
stype = stype[17:]
self.raw_value = [self._convert_to_string(v, stype) for v in value]
elif type_ == 'LangAlt':
if isinstance(value, str):
value = {'x-default': value}
if not isinstance(value, dict):
raise TypeError('Expecting a dictionary mapping language codes to values')
raw_value = {}
for k, v in value.items():
if isinstance(v, str):
try:
v = v.encode('utf-8')
except TypeError:
raise XmpValueError(value, type_)
raw_value[k] = v
self.raw_value = raw_value
self._value = value
self._value_cookie = False
value = property(fget=_get_value, fset=_set_value,
doc='The value of the tag as a [list of] python ' \
'object(s).')
def _convert_to_python(self, value, type_):
"""Convert a raw value to its corresponding python type.
Args:
value -- the raw value to be converted
type_ -- the simple type of the raw value
Return: the value converted to its corresponding python type
Raise XmpValueError: if the conversion fails
"""
if type_ == 'Boolean':
if value == 'True':
return True
elif value == 'False':
return False
else:
raise XmpValueError(value, type_)
elif type_ == 'Colorant':
# TODO
raise NotImplementedError('XMP conversion for type [%s]' % type_)
elif type_ == 'Date':
match = self._date_re.match(value)
if match is None:
raise XmpValueError(value, type_)
gd = match.groupdict()
if gd['month'] is not None:
month = int(gd['month'])
else:
month = 1
if gd['day'] is not None:
day = int(gd['day'])
else:
day = 1
if gd['time'] is None:
try:
return datetime.date(int(gd['year']), month, day)
except ValueError:
raise XmpValueError(value, type_)
else:
if gd['minutes'] is None:
# Malformed time
raise XmpValueError(value, type_)
if gd['seconds'] is not None:
seconds = int(gd['seconds'])
else:
seconds = 0
if gd['decimal'] is not None:
microseconds = int(float('0.%s' % gd['decimal']) * 1E6)
else:
microseconds = 0
if gd['tzd'] == 'Z':
tzinfo = FixedOffset()
else:
tzinfo = FixedOffset(gd['sign'], int(gd['ohours']),
int(gd['ominutes']))
try:
return datetime.datetime(int(gd['year']), month, day,
int(gd['hours']), int(gd['minutes']),
seconds, microseconds, tzinfo)
except ValueError:
raise XmpValueError(value, type_)
elif type_ == 'Dimensions':
# TODO
raise NotImplementedError('XMP conversion for type [%s]' % type_)
elif type_ == 'Font':
# TODO
raise NotImplementedError('XMP conversion for type [%s]' % type_)
elif type_ == 'GPSCoordinate':
try:
return GPSCoordinate.from_string(value)
except ValueError:
raise XmpValueError(value, type_)
elif type_ == 'Integer':
try:
return int(value)
except ValueError:
raise XmpValueError(value, type_)
elif type_ == 'Locale':
# TODO
# See RFC 3066
raise NotImplementedError('XMP conversion for type [%s]' % type_)
elif type_ == 'MIMEType':
if value.count('/') != 1:
raise XmpValueError(value, type_)
try:
return tuple(value.split('/', 1))
except ValueError:
raise XmpValueError(value, type_)
elif type_ == 'Rational':
try:
return make_fraction(value)
except (ValueError, ZeroDivisionError):
raise XmpValueError(value, type_)
elif type_ == 'Real':
# TODO
raise NotImplementedError('XMP conversion for type [%s]' % type_)
elif type_ in ('AgentName', 'ProperName', 'Text'):
if isinstance(value, bytes):
try:
value = str(value, 'utf-8')
except TypeError:
raise XmpValueError(value, type_)
return value
elif type_ == 'Thumbnail':
# TODO
raise NotImplementedError('XMP conversion for type [%s]' % type_)
elif type_ in ('URI', 'URL'):
if isinstance(value, bytes):
try:
value = value.decode('utf-8')
except UnicodeDecodeError:
# Unknow encoding, return the raw value
pass
return value
elif type_ == 'XPath':
# TODO
raise NotImplementedError('XMP conversion for type [%s]' % type_)
raise NotImplementedError('XMP conversion for type [%s]' % type_)
def _convert_to_string(self, value, type_):
"""Convert a value to its corresponding string representation.
Args:
value -- the value to be converted
type_ -- the simple type of the value
Return: the value converted to its corresponding string representation
Raise XmpValueError: if the conversion fails
"""
if type_ == 'Boolean':
if isinstance(value, bool):
return str(value)
else:
raise XmpValueError(value, type_)
elif type_ == 'Date':
if isinstance(value, (datetime.date, datetime.datetime)):
return DateTimeFormatter.xmp(value)
else:
raise XmpValueError(value, type_)
elif type_ == 'GPSCoordinate':
if isinstance(value, GPSCoordinate):
return str(value)
else:
raise XmpValueError(value, type_)
elif type_ == 'Integer':
if isinstance(value, int):
return str(value)
else:
raise XmpValueError(value, type_)
elif type_ == 'MIMEType':
if isinstance(value, tuple) and len(value) == 2:
return '/'.join(value)
else:
raise XmpValueError(value, type_)
elif type_ in ('AgentName', 'ProperName', 'Text', 'URI', 'URL'):
if isinstance(value, str):
try:
return value.encode('utf-8')
except UnicodeEncodeError:
raise XmpValueError(value, type_)
elif isinstance(value, bytes):
return value
raise XmpValueError(value, type_)
elif type_ == 'Rational':
if is_fraction(value):
return str(value)
else:
raise XmpValueError(value, type_)
elif type_ == '':
# Undefined type
if isinstance(value, str):
try:
return value.encode('utf-8')
except UnicodeEncodeError:
raise XmpValueError(value, type_)
elif isinstance(value, (datetime.date, datetime.datetime)):
return DateTimeFormatter.xmp(value)
raise NotImplementedError('XMP conversion for type [%s]' % type_)
def __str__(self):
"""Return a string representation of the XMP tag for debugging purposes
"""
left = '%s [%s]' % (self.key, self.type)
if self._raw_value is None:
right = '(No value)'
else:
right = self._raw_value
return '<%s = %s>' % (left, right)
# Support for pickling.
def __getstate__(self):
return (self.key, self.raw_value)
def __setstate__(self, state):
key, raw_value = state
self._tag = libexiv2python._XmpTag(key)
self.raw_value = raw_value
def register_namespace(name, prefix):
"""Register a custom XMP namespace.
Overriding the prefix of a known or previously registered namespace is not
allowed.
Args:
name -- the name of the custom namespace (ending with a ``/``),
typically a URL (e.g. http://purl.org/dc/elements/1.1/)
prefix -- the prefix for the custom namespace (keys in this namespace
will be in the form ``Xmp.{prefix}.{something}``)
Raise ValueError: if the name doesn’t end with a ``/``
Raise KeyError: if a namespace already exist with this prefix
"""
if not name.endswith('/'):
raise ValueError('Name should end with a /')
libexiv2python._registerXmpNs(name, prefix)
def unregister_namespace(name):
"""Unregister a custom XMP namespace.
A custom namespace is identified by its name, **not** by its prefix.
Attempting to unregister an unknown namespace raises an error, as does
attempting to unregister a builtin namespace.
Args:
name -- the name of the custom namespace (ending with a ``/``),
typically a URL (e.g. http://purl.org/dc/elements/1.1/)
Raise ValueError: if the name doesn’t end with a ``/``
Raise KeyError: if the namespace is unknown or a builtin namespace
"""
if not name.endswith('/'):
raise ValueError('Name should end with a /')
libexiv2python._unregisterXmpNs(name)
def unregister_namespaces():
"""Unregister all custom XMP namespaces.
Builtin namespaces are not unregistered.
This function always succeeds.
"""
libexiv2python._unregisterAllXmpNs()
| [
"[email protected]"
] | |
5160d3321356517efa62024bc797cf05922ac1ac | 9947f5315175584c049d3690da3bd3b695c959a2 | /ch-09-system-design-and-scalability/01-stock-data.py | 664ed8d2aacd05cf4da2de998399540e43322f80 | [] | no_license | GeorgeUofT/ctci-questions | 3c32a2af59f980ee952386e3784fa6cb1e88ea56 | 99f65e56592b2e709984c85401a2faf8d01e620e | refs/heads/master | 2021-05-04T05:51:02.011012 | 2018-02-05T19:41:29 | 2018-02-05T19:41:29 | 120,345,490 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | # Design a system to deliver stock data.
| [
"[email protected]"
] | |
8571604395b1648e6b411f1aa7fd5c76a1acea6e | 9f91fa2910d13273a50ae416c116e16385a4eb95 | /bsestarmfapi/custcreation.py | 5e53c81e1a44c3937ae275c55bf08d3c460f1b8d | [] | no_license | natrayanp/mysb_v2 | cac811e7f66670f8546cccdbca386ba6ff4f8cd6 | 24dea04e2a631ca6b465b3f62077a83a5dce9758 | refs/heads/master | 2022-11-20T16:49:30.341095 | 2018-07-31T17:18:04 | 2018-07-31T17:18:04 | 116,319,931 | 0 | 1 | null | 2022-11-14T21:09:17 | 2018-01-05T00:05:13 | Python | UTF-8 | Python | false | false | 21,514 | py | from bsestarmfapi import app
#from .hello_world import app
from flask import request, make_response, jsonify, Response, redirect
from bsestarmfapi import settings
from datetime import datetime
import requests
import json
import zeep
@app.route('/custcreation',methods=['GET','POST','OPTIONS'])
def create_user_bse():
if request.method=='OPTIONS':
print ("inside custcreation options")
return 'inside custcreation options'
elif request.method=='POST':
print("inside REGISTRATIONDETAILSFETCH GET")
print((request))
#userid,entityid=jwtnoverify.validatetoken(request)
print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
payload= request.get_json()
#payload=json.loads(payload)
print(payload)
#clientdata,fatcadata=custdatavalidation(payload)
#reqdataifsc=payload[''ifsc']
## initialise the zeep client for order wsdl
client = zeep.Client(wsdl=settings.WSDL_UPLOAD_URL[settings.LIVE])
set_soap_logging()
## get the password
pass_dict = soap_get_password_upload(client)
## prepare the user record
#client_code='1234test'
#payload=request.get_json()
bse_user = prepare_user_param(payload)
## post the user creation request
user_response = soap_create_user(client, bse_user, pass_dict)
## TODO: Log the soap request and response post the user creation request
if user_response['bsesttuscode'] == '100':
#pass_dict = soap_get_password_upload(client)
bse_fatca = prepare_fatca_param(payload)
fatca_response = soap_create_fatca(client, bse_fatca, pass_dict)
## TODO: Log the soap request and response post the fatca creation request
if fatca_response['bsesttuscode'] == '100':
return make_response(jsonify({'statuscode': fatca_response['bsesttuscode'], 'statusmessage': "User and Fatca record created successfully"}),200)
else:
return make_response(jsonify({'statuscode': fatca_response['bsesttuscode'], 'statusmessage': fatca_response['bsesttusmsg']}),400)
else:
return make_response(jsonify({'statuscode': user_response['bsesttuscode'], 'statusmessage': user_response['bsesttusmsg']}),400)
'''
pass_dict = soap_get_password_upload(client)
bse_fatca = prepare_fatca_param(client_code)
fatca_response = soap_create_fatca(client, bse_fatca, pass_dict)
## TODO: Log the soap request and response post the fatca creation request
return ("success")
'''
# prepare the string that will be sent as param for user creation in bse
def prepare_user_param(payload):
# extract the records from the table
#info = Info.objects.get(id=client_code)
#kyc = KycDetail.objects.get(user=client_code)
#bank = BankDetail.objects.get(user=client_code)
# some fields require processing
## address field can be 40 chars as per BSE but RTA is truncating it to 30 chars and showing that in account statement which is confusing customers, so reducing the length to 30 chars
'''
add1 = kyc.address[:30]
if (len(kyc.address) > 30):
add2 = kyc.address[30:60]
if (len(kyc.address) > 60):
add3 = kyc.address[60:90]
else:
add3 = ''
else:
add2 = add3 = ''
appname1 = kyc.first_name
if (kyc.middle_name != ''):
appname1 = appname1 + ' ' + kyc.middle_name
if (kyc.last_name != ''):
appname1 = appname1 + ' ' + kyc.last_name
appname1 = appname1[:70]
ifsc_code = bank.branch.ifsc_code
'''
# make the list that will be used to create param
d=payload
print(type(d))
print( d['clientcode'])
param_list = [
('CODE', d['clientcode']),
('HOLDING', d['clientholding']),
('TAXSTATUS', d['clienttaxstatus']),
('OCCUPATIONCODE', d['clientoccupationcode']),
('APPNAME1', d['clientappname1']),
('APPNAME2', d['clientappname2']),
('APPNAME3', d['clientappname3']),
('DOB', d['clientdob']), #to change the format
('GENDER', d['clientgender']),
('FATHER/HUSBAND/gurdian', d['clientguardian']),
('PAN', d['clientpan']),
('NOMINEE', d['clientnominee']),
('NOMINEE_RELATION', d['clientnomineerelation']),
('GUARDIANPAN', d['clientguardianpan']),
('TYPE', d['clienttype']),
('DEFAULTDP', d['clientdefaultdp']),
('CDSLDPID', d['clientcdsldpid']),
('CDSLCLTID', d['clientcdslcltid']),
('NSDLDPID', d['clientnsdldpid']),
('NSDLCLTID', d['clientnsdlcltid']),
('ACCTYPE_1', d['clientacctype1']),
('ACCNO_1', d['clientaccno1']),
('MICRNO_1', d['clientmicrno1']),
('NEFT/IFSCCODE_1', d['clientifsccode1']),
('default_bank_flag_1', d['defaultbankflag1']),
('ACCTYPE_2', d['clientacctype2']),
('ACCNO_2', d['clientaccno2']),
('MICRNO_2', d['clientmicrno2']),
('NEFT/IFSCCODE_2', d['clientifsccode2']),
('default_bank_flag_2', d['defaultbankflag2']),
('ACCTYPE_3', d['clientacctype3']),
('ACCNO_3', d['clientaccno3']),
('MICRNO_3', d['clientmicrno3']),
('NEFT/IFSCCODE_3', d['clientifsccode3']),
('default_bank_flag_3', d['defaultbankflag3']),
('ACCTYPE_4', d['clientacctype4']),
('ACCNO_4', d['clientaccno4']),
('MICRNO_4', d['clientmicrno4']),
('NEFT/IFSCCODE_4', d['clientifsccode4']),
('default_bank_flag_4', d['defaultbankflag4']),
('ACCTYPE_5', d['clientacctype5']),
('ACCNO_5', d['clientaccno5']),
('MICRNO_5', d['clientmicrno5']),
('NEFT/IFSCCODE_5', d['clientifsccode5']),
('default_bank_flag_5', d['defaultbankflag5']),
('CHEQUENAME', d['clientchequename5']),
('ADD1', d['clientadd1'] ),
('ADD2', d['clientadd2']),
('ADD3', d['clientadd3']),
('CITY', d['clientcity']),
('STATE', d['clientstate']),
('PINCODE', d['clientpincode']),
('COUNTRY', d['clientcountry']),
('RESIPHONE', d['clientresiphone']),
('RESIFAX', d['clientresifax']),
('OFFICEPHONE', d['clientofficephone']),
('OFFICEFAX', d['clientofficefax']),
('EMAIL', d['clientemail']),
('COMMMODE',d['clientcommmode']),
('DIVPAYMODE', d['clientdivpaymode']),
('PAN2', d['clientpan2']),
('PAN3', d['clientpan3']),
('MAPINNO', d['mapinno']),
('CM_FORADD1', d['cm_foradd1']),
('CM_FORADD2', d['cm_foradd2']),
('CM_FORADD3', d['cm_foradd3']),
('CM_FORCITY', d['cm_forcity']),
('CM_FORPINCODE', d['cm_forpincode']),
('CM_FORSTATE', d['cm_forstate']),
('CM_FORCOUNTRY', d['cm_forcountry']),
('CM_FORRESIPHONE', d['cm_forresiphone']),
('CM_FORRESIFAX', d['cm_forresifax']),
('CM_FOROFFPHONE', d['cm_foroffphone']),
('CM_FOROFFFAX', d['cm_forofffax']),
('CM_MOBILE', d['cm_mobile'])
]
'''
param_list = [
('CODE', 'NAT1234'),
('HOLDING', 'SI'),
('TAXSTATUS', '01'),
('OCCUPATIONCODE', '01'),
('APPNAME1', 'appname1'),
('APPNAME2', ''),
('APPNAME3', ''),
('DOB', '04/10/1980'),
('GENDER', 'M'),
('FATHER/HUSBAND/gurdian', ''),
('PAN', 'ARYNJ1340H'),
('NOMINEE', ''),
('NOMINEE_RELATION', ''),
('GUARDIANPAN', ''),
('TYPE', 'P'),
('DEFAULTDP', ''),
('CDSLDPID', ''),
('CDSLCLTID', ''),
('NSDLDPID', ''),
('NSDLCLTID', ''),
('ACCTYPE_1', 'SB'),
('ACCNO_1', '1234567654'),
('MICRNO_1', ''),
('NEFT/IFSCCODE_1', 'ICIC0006036'),
('default_bank_flag_1', 'Y'),
('ACCTYPE_2', ''),
('ACCNO_2', ''),
('MICRNO_2', ''),
('NEFT/IFSCCODE_2', ''),
('default_bank_flag_2', ''),
('ACCTYPE_3', ''),
('ACCNO_3', ''),
('MICRNO_3', ''),
('NEFT/IFSCCODE_3', ''),
('default_bank_flag_3', ''),
('ACCTYPE_4', ''),
('ACCNO_4', ''),
('MICRNO_4', ''),
('NEFT/IFSCCODE_4', ''),
('default_bank_flag_4', ''),
('ACCTYPE_5', ''),
('ACCNO_5', ''),
('MICRNO_5', ''),
('NEFT/IFSCCODE_5', ''),
('default_bank_flag_5', ''),
('CHEQUENAME', ''),
('ADD1', 'add1'),
('ADD2', 'add2'),
('ADD3', 'add3'),
('CITY', 'city'),
('STATE', 'TN'),
('PINCODE', '600032'),
('COUNTRY', 'India'),
('RESIPHONE', ''),
('RESIFAX', ''),
('OFFICEPHONE', ''),
('OFFICEFAX', ''),
('EMAIL', '[email protected]'),
('COMMMODE', 'M'),
('DIVPAYMODE', '02'),
('PAN2', ''),
('PAN3', ''),
('MAPINNO', ''),
('CM_FORADD1', ''),
('CM_FORADD2', ''),
('CM_FORADD3', ''),
('CM_FORCITY', ''),
('CM_FORPINCODE', ''),
('CM_FORSTATE', ''),
('CM_FORCOUNTRY', ''),
('CM_FORRESIPHONE', ''),
('CM_FORRESIFAX', ''),
('CM_FOROFFPHONE', ''),
('CM_FOROFFFAX', ''),
('CM_MOBILE', '9677628897'),
]
'''
# prepare the param field to be returned
user_param = ''
for param in param_list:
user_param = user_param + '|' + str(param[1])
# print user_param
return user_param[1:]
# prepare the string that will be sent as param for fatca creation in bse
def prepare_fatca_param(payload):
'''
# extract the records from the table
kyc = KycDetail.objects.get(user=client_code)
# some fields require processing
inv_name = kyc.first_name
if (kyc.middle_name != ''):
inv_name = inv_name + ' ' + kyc.middle_name
if (kyc.last_name != ''):
inv_name = inv_name + ' ' + kyc.last_name
inv_name = inv_name[:70]
if kyc.occ_code == '01':
srce_wealt = '02'
occ_type = 'B'
else:
srce_wealt = '01'
occ_type = 'S'
'''
# make the list that will be used to create param
d=payload
param_list = [
('PAN_RP', d['pan_rp']),
('PEKRN', d['pekrn']),
('INV_NAME', d['inv_name']),
('DOB', d['dob']),
('FR_NAME', d['fr_name']),
('SP_NAME', d['sp_name']),
('TAX_STATUS', d['tax_status']),
('DATA_SRC', d['data_src']),
('ADDR_TYPE', d['addr_type']),
('PO_BIR_INC', d['po_bir_inc']),
('CO_BIR_INC', d['co_bir_inc']),
('TAX_RES1', d['tax_res1']),
('TPIN1', d['tpin1']),
('ID1_TYPE', d['id1_type']),
('TAX_RES2', d['tax_res2']),
('TPIN2', d['tpin2']),
('ID2_TYPE', d['id2_type']),
('TAX_RES3', d['tax_res3']),
('TPIN3', d['tpin3']),
('ID3_TYPE', d['id3_type']),
('TAX_RES4', d['tax_res4']),
('TPIN4', d['tpin4']),
('ID4_TYPE', d['id4_type']),
('SRCE_WEALT', d['srce_wealt']),
('CORP_SERVS', d['corp_servs']),
('INC_SLAB', d['inc_slab']),
('NET_WORTH', d['net_worth']),
('NW_DATE', d['nw_date']),
('PEP_FLAG', d['pep_flag']),
('OCC_CODE', d['occ_code']),
('OCC_TYPE', d['occ_type']),
('EXEMP_CODE', d['exemp_code']),
('FFI_DRNFE', d['ffi_drnfe']),
('GIIN_NO', d['giin_no']),
('SPR_ENTITY', d['spr_entity']),
('GIIN_NA', d['giin_na']),
('GIIN_EXEMC', d['giin_exemc']),
('NFFE_CATG', d['nffe_catg']),
('ACT_NFE_SC', d['act_nfe_sc']),
('NATURE_BUS', d['nature_bus']),
('REL_LISTED', d['rel_listed']),
('EXCH_NAME', d['exch_name']),
('UBO_APPL', d['ubo_appl']),
('UBO_COUNT', d['ubo_count']),
('UBO_NAME', d['ubo_name']),
('UBO_PAN', d['ubo_pan']),
('UBO_NATION', d['ubo_nation']),
('UBO_ADD1', d['ubo_add1']),
('UBO_ADD2', d['ubo_add2']),
('UBO_ADD3', d['ubo_add3']),
('UBO_CITY', d['ubo_city']),
('UBO_PIN', d['ubo_pin']),
('UBO_STATE', d['ubo_state']),
('UBO_CNTRY', d['ubo_cntry']),
('UBO_ADD_TY', d['ubo_add_ty']),
('UBO_CTR', d['ubo_ctr']),
('UBO_TIN', d['ubo_tin']),
('UBO_ID_TY', d['ubo_id_ty']),
('UBO_COB', d['ubo_cob']),
('UBO_DOB', d['ubo_dob']),
('UBO_GENDER', d['ubo_gender']),
('UBO_FR_NAM', d['ubo_fr_nam']),
('UBO_OCC', d['ubo_occ']),
('UBO_OCC_TY', d['ubo_occ_ty']),
('UBO_TEL', d['ubo_tel']),
('UBO_MOBILE', d['ubo_mobile']),
('UBO_CODE', d['ubo_code']),
('UBO_HOL_PC', d['ubo_hol_pc']),
('SDF_FLAG', d['sdf_flag']),
('UBO_DF', d['ubo_df']),
('AADHAAR_RP', d['aadhaar_rp']),
('NEW_CHANGE', d['new_change']),
('LOG_NAMe',d['log_name']),
('DOC1', d['filler1']),
('DOC2', d['filler2'])
]
'''
param_list = [
('PAN_RP', 'ARYNJ1340H'),
('PEKRN', ''),
('INV_NAME', 'appname1'),
('DOB', ''),
('FR_NAME', ''),
('SP_NAME', ''),
('TAX_STATUS', '1'),
('DATA_SRC', 'E'),
('ADDR_TYPE', '1'),
('PO_BIR_INC', 'IN'),
('CO_BIR_INC', 'IN'),
('TAX_RES1', 'IN'),
('TPIN1', 'ARYNJ1340H'),
('ID1_TYPE', 'C'),
('TAX_RES2', ''),
('TPIN2', ''),
('ID2_TYPE', ''),
('TAX_RES3', ''),
('TPIN3', ''),
('ID3_TYPE', ''),
('TAX_RES4', ''),
('TPIN4', ''),
('ID4_TYPE', ''),
('SRCE_WEALT', '01'),
('CORP_SERVS', ''),
('INC_SLAB', '32'),
('NET_WORTH', ''),
('NW_DATE', ''),
('PEP_FLAG', 'N'),
('OCC_CODE', '03'),
('OCC_TYPE', 'Service'),
('EXEMP_CODE', ''),
('FFI_DRNFE', ''),
('GIIN_NO', ''),
('SPR_ENTITY', ''),
('GIIN_NA', ''),
('GIIN_EXEMC', ''),
('NFFE_CATG', ''),
('ACT_NFE_SC', ''),
('NATURE_BUS', ''),
('REL_LISTED', ''),
('EXCH_NAME', 'O'),
('UBO_APPL', 'N'),
('UBO_COUNT', ''),
('UBO_NAME', ''),
('UBO_PAN', ''),
('UBO_NATION', ''),
('UBO_ADD1', ''),
('UBO_ADD2', ''),
('UBO_ADD3', ''),
('UBO_CITY', ''),
('UBO_PIN', ''),
('UBO_STATE', ''),
('UBO_CNTRY', ''),
('UBO_ADD_TY', ''),
('UBO_CTR', ''),
('UBO_TIN', ''),
('UBO_ID_TY', ''),
('UBO_COB', ''),
('UBO_DOB', ''),
('UBO_GENDER', ''),
('UBO_FR_NAM', ''),
('UBO_OCC', ''),
('UBO_OCC_TY', ''),
('UBO_TEL', ''),
('UBO_MOBILE', ''),
('UBO_CODE', ''),
('UBO_HOL_PC', ''),
('SDF_FLAG', ''),
('UBO_DF', ''),
('AADHAAR_RP', ''),
('NEW_CHANGE', 'N'),
('LOG_NAME','NAT1234'),
('DOC1', ''),
('DOC2', ''),
]
'''
# prepare the param field to be returned
fatca_param = ''
for param in param_list:
fatca_param = fatca_param + '|' + str(param[1])
# print fatca_param
return fatca_param[1:]
## fire SOAP query to get password for Upload API endpoint
## used by all functions except create_transaction_bse() and cancel_transaction_bse()
def soap_get_password_upload(client):
method_url = settings.METHOD_UPLOAD_URL[settings.LIVE] + 'getPassword'
svc_url = settings.SVC_UPLOAD_URL[settings.LIVE]
header_value = soap_set_wsa_headers(method_url, svc_url)
response = client.service.getPassword(
MemberId=settings.MEMBERID[settings.LIVE],
UserId=settings.USERID[settings.LIVE],
Password=settings.PASSWORD[settings.LIVE],
PassKey=settings.PASSKEY[settings.LIVE],
_soapheaders=[header_value]
)
print
response = response.split('|')
status = response[0]
if (status == '100'):
# login successful
pass_dict = {'password': response[1], 'passkey': settings.PASSKEY[settings.LIVE]}
print('#################')
print(pass_dict)
print('#################')
return pass_dict
else:
raise Exception(
"BSE error 640: Login unsuccessful for upload API endpoint"
)
## fire SOAP query to create a new user on bsestar
def soap_create_user(client, user_param, pass_dict):
method_url = settings.METHOD_UPLOAD_URL[settings.LIVE] + 'MFAPI'
header_value = soap_set_wsa_headers(method_url, settings.SVC_UPLOAD_URL[settings.LIVE])
response = client.service.MFAPI(
'02',
settings.USERID[settings.LIVE],
pass_dict['password'],
user_param,
_soapheaders=[header_value]
)
## this is a good place to put in a slack alert
response = response.split('|')
status = response[0]
if (status == '100'):
# User creation successful
return {'bsesttuscode': response[0], 'bsesttusmsg': response[1],'stcdtoreturn':200}
else:
raise Exception(
"BSE error 644: User creation unsuccessful: %s" % response[1]
)
return {'bsesttuscode': response[0], 'bsesttusmsg': response[1],'stcdtoreturn':400}
## fire SOAP query to craete fatca record of user on bsestar
def soap_create_fatca(client, fatca_param, pass_dict):
method_url = settings.METHOD_UPLOAD_URL[settings.LIVE] + 'MFAPI'
header_value = soap_set_wsa_headers(method_url, settings.SVC_UPLOAD_URL[settings.LIVE])
response = client.service.MFAPI(
'01',
settings.USERID[settings.LIVE],
pass_dict['password'],
fatca_param,
_soapheaders=[header_value]
)
## this is a good place to put in a slack alert
response = response.split('|')
status = response[0]
if (status == '100'):
# Fatca creation successful
return {'bsesttuscode': response[0], 'bsesttusmsg': response[1],'stcdtoreturn':200}
else:
raise Exception(
"BSE error 645: Fatca creation unsuccessful: %s" % response[1]
)
return {'bsesttuscode': response[0], 'bsesttusmsg': response[1],'stcdtoreturn':400}
# set logging such that its easy to debug soap queries
def set_soap_logging():
import logging.config
logging.config.dictConfig({
'version': 1,
'formatters': {
'verbose': {
'format': '%(name)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'zeep.transports': {
'level': 'DEBUG',
'propagate': True,
'handlers': ['console'],
},
}
})
################ HELPER SOAP FUNCTIONS
# every soap query to bse must have wsa headers set
def soap_set_wsa_headers(method_url, svc_url):
print(method_url)
print(svc_url)
header = zeep.xsd.Element("None", zeep.xsd.ComplexType([
zeep.xsd.Element('{http://www.w3.org/2005/08/addressing}Action', zeep.xsd.String()),
zeep.xsd.Element('{http://www.w3.org/2005/08/addressing}To', zeep.xsd.String())
])
)
header_value = header(Action=method_url, To=svc_url)
return header_value
'''
def custdatavalidation(jsonload):
#Does the validation of the json payload and split them to cust data and fatca data
print('inside validate and split to cust and fatca data')
jsonloadcpy=jsonload
# some fields require processing
## address field can be 40 chars as per BSE but RTA is truncating it to 30 chars and showing that in account statement which is confusing customers, so reducing the length to 30 chars
validatedcode['clientcode'] = jsonload.clientcode
validatedcode['clientholding'] = jsonload.clientholding
validatedcode['clienttaxstatus'] = jsonload.clienttaxstatus
validatedcode['clientoccupationcode'] = jsonload.clientoccupationcode
validatedcode['clientappname1'] = appname1
validatedcode['clientappname2'] = ''
validatedcode['clientappname3'] = ''
validatedcode['clientdob'] = jsonload.clientdob
validatedcode['clientgender'] = jsonload.clientgender
validatedcode['clientguardian'] = jsonload.clientguardian
validatedcode['clientpan'] = jsonload.clientpan
validatedcode['clientnominee'] = jsonload.clientnominee
validatedcode['clientnomineerelation'] = jsonload.clientnomineerelation
validatedcode['clientguardianpan'] = jsonload.clientguardianpan
validatedcode['clienttype'] = jsonload.clienttype
validatedcode['clientdefaultdp'] = jsonload.clientdefaultdp
validatedcode['clientcdsldpid'] = jsonload.clientcdsldpid
validatedcode['clientcdslcltid'] = jsonload.clientcdslcltid
validatedcode['clientnsdldpid'] = jsonload.clientnsdldpid
validatedcode['clientnsdlcltid'] = jsonload.clientnsdlcltid
validatedcode['clientacctype1'] = jsonload.clientacctype1
validatedcode['clientaccno1'] = jsonload.clientaccno1
validatedcode['clientmicrno1'] = jsonload.clientmicrno1
validatedcode['clientifsccode1'] = jsonload.clientifsccode1
validatedcode['defaultbankflag1'] = jsonload.defaultbankflag1
validatedcode['clientacctype2'] = jsonload.clientacctype2
validatedcode['clientaccno2'] = jsonload.clientaccno2
validatedcode['clientmicrno2'] = jsonload.clientmicrno2
validatedcode['clientifsccode2'] = jsonload.clientifsccode2
validatedcode['defaultbankflag2'] = jsonload.defaultbankflag2
validatedcode['clientacctype3'] = jsonload.clientacctype3
validatedcode['clientaccno3'] = jsonload.clientaccno3
validatedcode['clientmicrno3'] = jsonload.clientmicrno3
validatedcode['clientifsccode3'] = jsonload.clientifsccode3
validatedcode['defaultbankflag3'] = jsonload.defaultbankflag3
validatedcode['clientacctype4'] = jsonload.clientacctype4
validatedcode['clientaccno4'] = jsonload.clientaccno4
validatedcode['clientmicrno4'] = jsonload.clientmicrno4
validatedcode['clientifsccode4'] = jsonload.clientifsccode4
validatedcode['defaultbankflag4'] = jsonload.defaultbankflag4
validatedcode['clientacctype5'] = jsonload.clientacctype5
validatedcode['clientaccno5'] = jsonload.clientaccno5
validatedcode['clientmicrno5'] = jsonload.clientmicrno5
validatedcode['clientifsccode5'] = jsonload.clientifsccode5
validatedcode['defaultbankflag5'] = jsonload.defaultbankflag5
validatedcode['clientchequename5'] = jsonload.clientchequename5
validatedcode['clientadd1'] = jsonload.clientadd1
validatedcode['clientadd2'] = jsonload.clientadd2
validatedcode['clientadd3'] = jsonload.clientadd3
validatedcode['clientcity'] = jsonload.clientcity
validatedcode['clientstate'] = jsonload.clientstate
validatedcode['clientpincode'] = jsonload.clientpincode
validatedcode['clientcountry'] = jsonload.clientcountry
validatedcode['clientresiphone'] = jsonload.clientresiphone
validatedcode['clientresifax'] = jsonload.clientresifax
validatedcode['clientofficephone'] = jsonload.clientofficephone
validatedcode['clientofficefax'] = jsonload.clientofficefax
validatedcode['clientemail'] = jsonload.clientemail
validatedcode['clientcommmode'] = jsonload.clientcommmode
validatedcode['clientdivpaymode'] = jsonload.clientdivpaymode
validatedcode['clientpan2'] = jsonload.clientpan2
validatedcode['clientpan3'] = jsonload.clientpan3
validatedcode['mapinno'] = jsonload.mapinno
validatedcode['cm_foradd1'] = jsonload.cm_foradd1
validatedcode['cm_foradd2'] = jsonload.cm_foradd2
validatedcode['cm_foradd3'] = jsonload.cm_foradd3
validatedcode['cm_forcity'] = jsonload.cm_forcity
validatedcode['cm_forpincode'] = jsonload.cm_forpincode
validatedcode['cm_forstate'] = jsonload.cm_forstate
validatedcode['cm_forcountry'] = jsonload.cm_forcountry
validatedcode['cm_forresiphone'] = jsonload.cm_forresiphone
validatedcode['cm_forresifax'] = jsonload.cm_forresifax
validatedcode['cm_foroffphone'] = jsonload.cm_foroffphone
validatedcode['cm_forofffax'] = jsonload.cm_forofffax
validatedcode['cm_mobile'] = jsonload.cm_mobile
print(json.loads(validatecode))
'''
| [
"[email protected]"
] | |
1841e5f59a2690cdc2fb0f3fe2a6685186d96ab9 | ce7fb81204902a49786b6b9cbf29647d159f28c3 | /node/SQL/mysql/tbl_vector_style_insert.py | 961d1c50619e01fe8b7ec63c6907d31cdb4ffd3a | [] | no_license | seefs/Source_Insight | cddc11b80b2ffe7d01200867d9db5185521ed201 | 130d807d99333d11a708e59a80b06b0a5377f1a3 | refs/heads/master | 2023-08-16T21:16:19.543498 | 2023-08-06T18:17:11 | 2023-08-06T18:17:11 | 165,578,073 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,831 | py | import pymysql
import re
# 打开数据库连接
db = pymysql.connect("localhost","root","s0f0s0","jiebanew" )
# 使用cursor()方法获取操作游标
cursor = db.cursor()
match_str = '_'
dict = {}
sql_rd = "SELECT * FROM tbl_base_all"
sql_rd = "SELECT * FROM tbl_base_all"
#sql_wr_pre = "UPDATE tbl_vector SET `VC%s` =%s where VID = %s;"
sql_wr_next = "UPDATE tbl_vector SET `VC%s` =%s where VID = %s;"
i_pre = 0
i_next = 0
#p_id = 'p%s_%s'
n_id = 'n%s_%s'
try:
# 执行SQL语句
cursor.execute(sql_rd)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
fslist = row[3]
line_sents = re.split(match_str,fslist) # 保留分割符
line_len = len(line_sents)
# print (line_len)
if line_len == 0 :
continue
i_pre = line_sents[0]
for i in range(line_len-1):
i_next = line_sents[i + 1]
# dict[p_id % (str(i_next), str(i_pre))] = dict.get(p_id % (str(i_next), str(i_pre)), 0) + 1
dict[n_id % (str(i_pre), str(i_next))] = dict.get(n_id % (str(i_pre), str(i_next)), 0) + 1
i_pre = i_next
for i in range(17):
for j in range(17):
k = i
m = j
if k == 17 :
k = 26
if m == 17 :
m = 26
# v = dict.get(p_id % (str(m), str(k)), 0)
v = dict.get(n_id % (str(k), str(m)), 0)
if v > 0 :
cursor.execute(sql_wr_next % (str(k), str(v), str(m)))
# 提交到数据库执行
db.commit()
# 打印结果
# print (sql_wr_num % (str(1), str(fcid)))
except:
print ("Error: unable to fetch data")
# 关闭数据库连接
db.close()
#print ("dict : %s" % str(dict))
| [
"[email protected]"
] | |
29f4630dce6ed1e24ddcb6b1e4a73d2781516456 | 60325d386df03a8a5d2602139b9b6a7422c8e79c | /Python/SOP/SOP_create_gizmo.py | c0be55f579105a0634ea62df51c95a0e96d13af0 | [] | no_license | jsa4000/SideFX-HOUDINI | 98f5aacfda4709a721a7cadb3c61171f1b8012ae | 2366b4b87c0e780dbc9ccecfc8bc04f8e59a01c9 | refs/heads/master | 2021-01-22T09:27:57.533881 | 2017-04-28T11:23:22 | 2017-04-28T11:23:22 | 81,961,380 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | node = hou.pwd()
geo = node.geometry()
# Add code to modify contents of geo.
# Use drop down menu to select examples.
attrib = geo.addAttrib(hou.attribType.Point,"Cd",(0,0,0))
# Vector X (Side)
sidevector = hou.Vector3((1.0,0.0,0.0))
sidevector.normalized()
attribSide = geo.addAttrib(hou.attribType.Point,"Side",sidevector)
# Vector Y (Up)
upvector = hou.Vector3((0.0,1.0,0.0))
upvector.normalized()
attribUp = geo.addAttrib(hou.attribType.Point,"Up",upvector)
# Vector Z (Aim)
aimvector = hou.Vector3((0.0,0.0,1.0))
aimvector.normalized()
attribAim = geo.addAttrib(hou.attribType.Point,"Aim",aimvector)
for i in range(0,3):
initial_pos = (0,0,0)
final_pos = [0,0,0]
final_pos[i] = 3
color = [0,0,0]
color[i] = 1
# Create the polygon
poly = geo.createPolygon()
positions = [initial_pos, final_pos ]
# Create the points and the vertices
for position in positions:
point = geo.createPoint()
point.setAttribValue(attrib, color)
point.setPosition(position)
poly.addVertex(point)
# SEt the poly opened to display as an edge
poly.setIsClosed(False)
| [
"[email protected]"
] | |
2fc2533d27af5189679f1be9017cff8d67e35c0d | 02e5b1240db2ef04b4f8b661a9ac4ce060144d74 | /test/geweke_ct_test.py | 5b96979aa97f893c391cdafaa57f3d088f12b1fa | [
"MIT"
] | permissive | jayeshchoudhari/pyhawkes | b3b143a5040730826c23a9b3703159dbeb9bf21d | f4b0e6e3ce7f74e647f0ed2254ea334c22d6e82b | refs/heads/master | 2021-06-12T12:55:54.740142 | 2017-03-27T06:47:16 | 2017-03-27T06:47:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,149 | py | import numpy as np
np.random.seed(1234)
import matplotlib.pyplot as plt
from scipy.stats import gamma, t, probplot
from pyhawkes.models import ContinuousTimeNetworkHawkesModel
from pybasicbayes.util.text import progprint_xrange
def test_geweke():
"""
Create a discrete time Hawkes model and generate from it.
:return:
"""
K = 1
T = 50.0
dt = 1.0
dt_max = 3.0
# network_hypers = {'C': 1, 'p': 0.5, 'kappa': 3.0, 'alpha': 3.0, 'beta': 1.0/20.0}
network_hypers = {'c': np.zeros(K, dtype=np.int), 'p': 0.5, 'kappa': 10.0, 'v': 10*3.0}
bkgd_hypers = {"alpha": 1., "beta": 10.}
model = ContinuousTimeNetworkHawkesModel(K=K, dt_max=dt_max,
network_hypers=network_hypers)
model.generate(T=T)
# Gibbs sample and then generate new data
N_samples = 1000
samples = []
lps = []
for itr in progprint_xrange(N_samples, perline=50):
# Resample the model
model.resample_model()
samples.append(model.copy_sample())
lps.append(model.log_likelihood())
# Geweke step
model.data_list.pop()
model.generate(T=T)
# Compute sample statistics for second half of samples
A_samples = np.array([s.weight_model.A for s in samples])
W_samples = np.array([s.weight_model.W for s in samples])
mu_samples = np.array([s.impulse_model.mu for s in samples])
tau_samples = np.array([s.impulse_model.tau for s in samples])
lambda0_samples = np.array([s.bias_model.lambda0 for s in samples])
lps = np.array(lps)
offset = 0
A_mean = A_samples[offset:, ...].mean(axis=0)
W_mean = W_samples[offset:, ...].mean(axis=0)
mu_mean = mu_samples[offset:, ...].mean(axis=0)
tau_mean = tau_samples[offset:, ...].mean(axis=0)
lambda0_mean = lambda0_samples[offset:, ...].mean(axis=0)
print "A mean: ", A_mean
print "W mean: ", W_mean
print "mu mean: ", mu_mean
print "tau mean: ", tau_mean
print "lambda0 mean: ", lambda0_mean
# Plot the log probability over iterations
plt.figure()
plt.plot(np.arange(N_samples), lps)
plt.xlabel("Iteration")
plt.ylabel("Log probability")
# Plot the histogram of bias samples
plt.figure()
p_lmbda0 = gamma(model.bias_model.alpha, scale=1./model.bias_model.beta)
_, bins, _ = plt.hist(lambda0_samples[:,0], bins=50, alpha=0.5, normed=True)
bincenters = 0.5*(bins[1:]+bins[:-1])
plt.plot(bincenters, p_lmbda0.pdf(bincenters), 'r--', linewidth=1)
plt.xlabel('lam0')
plt.ylabel('p(lam0)')
print "Expected p(A): ", model.network.P
print "Empirical p(A): ", A_samples.mean(axis=0)
# Plot the histogram of weight samples
plt.figure()
Aeq1 = A_samples[:,0,0] == 1
# p_W1 = gamma(model.network.kappa, scale=1./model.network.v[0,0])
# p_W1 = betaprime(model.network.kappa, model.network.alpha, scale=model.network.beta)
p_W1 = gamma(model.network.kappa, scale=1./model.network.v[0,0])
if np.sum(Aeq1) > 0:
_, bins, _ = plt.hist(W_samples[Aeq1,0,0], bins=50, alpha=0.5, normed=True)
bincenters = 0.5*(bins[1:]+bins[:-1])
plt.plot(bincenters, p_W1.pdf(bincenters), 'r--', linewidth=1)
plt.xlabel('W')
plt.ylabel('p(W | A=1)')
# Plot the histogram of impulse precisions
plt.figure()
p_tau = gamma(model.impulse_model.alpha_0, scale=1./model.impulse_model.beta_0)
_, bins, _ = plt.hist(tau_samples[:,0,0], bins=50, alpha=0.5, normed=True)
bincenters = 0.5*(bins[1:]+bins[:-1])
plt.plot(bincenters, p_tau.pdf(bincenters), 'r--', linewidth=1)
plt.xlabel('tau')
plt.ylabel('p(tau)')
# Plot the histogram of impulse means
plt.figure()
p_mu = t(df=2*model.impulse_model.alpha_0,
loc=model.impulse_model.mu_0,
scale=np.sqrt(model.impulse_model.beta_0/(model.impulse_model.alpha_0*model.impulse_model.lmbda_0)))
_, bins, _ = plt.hist(mu_samples[:,0,0], bins=50, alpha=0.5, normed=True)
bincenters = 0.5*(bins[1:]+bins[:-1])
plt.plot(bincenters, p_mu.pdf(bincenters), 'r--', linewidth=1)
plt.xlabel('mu')
plt.ylabel('p(mu)')
plt.show()
def test_sample_nig():
mu_0 = 0.0
lmbda_0 = 10.
alpha_0 = 10.
beta_0 = 10.
# Directly sample nig and lookg at marginals
from pyhawkes.utils.utils import sample_nig
mu_samples = \
np.array([sample_nig(mu_0, lmbda_0, alpha_0, beta_0)[0]
for _ in xrange(10000)])
# Plot the histogram of impulse means
plt.figure()
p_mu = t(df=2*alpha_0,
loc=mu_0,
scale=np.sqrt(beta_0/(alpha_0*lmbda_0)))
_, bins, _ = plt.hist(mu_samples, bins=50, alpha=0.5, normed=True)
bincenters = 0.5*(bins[1:]+bins[:-1])
plt.plot(bincenters, p_mu.pdf(bincenters), 'r--', linewidth=1)
plt.xlabel('mu')
plt.ylabel('p(mu)')
plt.figure()
probplot(mu_samples, dist=p_mu, plot=plt.gca())
plt.show()
if __name__ == "__main__":
# test_sample_nig()
test_geweke() | [
"[email protected]"
] | |
744b752fdc73caa1bf598b98daad68d6aa243493 | d2226b6a225e56cab534f4c7afabc3d0ac3b0b3f | /vaemodelHNTriplet.py | ab6c1d701ab28363976271edaeedeb0ad5620223 | [] | no_license | xixiareone/vae-crossmodal-retrieval | 74b384fc676d20d41023a8938d10e33bacc0564a | 31bcc443d2a25ad33803751ee5e3ddd9e5404e5e | refs/heads/master | 2022-12-24T17:03:29.304549 | 2020-10-10T14:50:19 | 2020-10-10T14:50:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,969 | py | #vaemodel
import copy
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.utils import data
from data_loader import DATA_LOADER as dataloader
from dataloader import Flickr30k as dataLoader
import final_classifier as classifier
import models
from torchvision import models as torchModels
from sklearn.neighbors import NearestNeighbors
from sklearn.manifold import TSNE
from scipy.spatial import distance
import numpy as np
import matplotlib.pyplot as plt
class LINEAR_LOGSOFTMAX(nn.Module):
def __init__(self, input_dim, nclass):
super(LINEAR_LOGSOFTMAX, self).__init__()
self.fc = nn.Linear(input_dim,nclass)
self.logic = nn.LogSoftmax(dim=1)
self.lossfunction = nn.NLLLoss()
def forward(self, x):
o = self.logic(self.fc(x))
return o
class Model(nn.Module):
def __init__(self,hyperparameters):
super(Model,self).__init__()
self.device = hyperparameters['device']
self.auxiliary_data_source = hyperparameters['auxiliary_data_source']
self.attr = hyperparameters['attr']
self.all_data_sources = ['resnet_features', 'attributes']
self.DATASET = hyperparameters['dataset']
self.num_shots = hyperparameters['num_shots']
self.latent_size = hyperparameters['latent_size']
self.batch_size = hyperparameters['batch_size']
self.hidden_size_rule = hyperparameters['hidden_size_rule']
self.warmup = hyperparameters['model_specifics']['warmup']
self.generalized = hyperparameters['generalized']
self.classifier_batch_size = 32
#self.img_seen_samples = hyperparameters['samples_per_class'][self.DATASET][0]
#self.att_seen_samples = hyperparameters['samples_per_class'][self.DATASET][1]
#self.att_unseen_samples = hyperparameters['samples_per_class'][self.DATASET][2]
# self.img_unseen_samples = hyperparameters['samples_per_class'][self.DATASET][3]
self.reco_loss_function = hyperparameters['loss']
self.margin = hyperparameters['margin_loss']
self.nepoch = hyperparameters['epochs']
self.lr_cls = hyperparameters['lr_cls']
self.cross_reconstruction = hyperparameters['model_specifics']['cross_reconstruction']
self.cls_train_epochs = hyperparameters['cls_train_steps']
#self.dataset = dataloader(self.DATASET, copy.deepcopy(self.auxiliary_data_source) , device= 'cuda')
self.dataset = dataLoader(copy.deepcopy(self.auxiliary_data_source) , device= 'cuda', attr = self.attr)
if self.DATASET=='CUB':
self.num_classes=200
self.num_novel_classes = 50
elif self.DATASET=='SUN':
self.num_classes=717
self.num_novel_classes = 72
elif self.DATASET=='AWA1' or self.DATASET=='AWA2':
self.num_classes=50
self.num_novel_classes = 10
if self.attr == 'attributes':
feature_dimensions = [2048, self.dataset.K]
elif self.attr == 'bert':
feature_dimensions = [2048, 768] #2048, 768
# Here, the encoders and decoders for all modalities are created and put into dict
self.fc_ft = nn.Linear(2048,2048)
self.fc_ft.to(self.device)
self.ft_bn = nn.BatchNorm1d(2048).to(self.device)
self.fc_at = nn.Linear(self.dataset.K, self.dataset.K)
self.fc_at.to(self.device)
self.at_bn = nn.BatchNorm1d(self.dataset.K).to(self.device)
self.encoder = {}
for datatype, dim in zip(self.all_data_sources,feature_dimensions):
self.encoder[datatype] = models.encoder_template(dim,self.latent_size,self.hidden_size_rule[datatype],self.device)
print(str(datatype) + ' ' + str(dim))
self.decoder = {}
for datatype, dim in zip(self.all_data_sources,feature_dimensions):
self.decoder[datatype] = models.decoder_template(self.latent_size,dim,self.hidden_size_rule[datatype],self.device)
# An optimizer for all encoders and decoders is defined here
parameters_to_optimize = list(self.parameters())
for datatype in self.all_data_sources:
parameters_to_optimize += list(self.encoder[datatype].parameters())
parameters_to_optimize += list(self.decoder[datatype].parameters())
parameters_to_optimize += list(self.fc_ft.parameters())
parameters_to_optimize += list(self.fc_at.parameters())
parameters_to_optimize += list(self.ft_bn.parameters())
parameters_to_optimize += list(self.at_bn.parameters())
self.optimizer = optim.Adam( parameters_to_optimize ,lr=hyperparameters['lr_gen_model'], betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)
if self.reco_loss_function=='l2':
self.reconstruction_criterion = nn.MSELoss(size_average=False)
elif self.reco_loss_function=='l1':
self.reconstruction_criterion = nn.L1Loss(size_average=False)
self.triplet_loss = nn.TripletMarginLoss(margin = self.margin)
def reparameterize(self, mu, logvar):
if self.reparameterize_with_noise:
sigma = torch.exp(logvar)
eps = torch.cuda.FloatTensor(logvar.size()[0],1).normal_(0,1)
eps = eps.expand(sigma.size())
return mu + sigma*eps
else:
return mu
def forward(self):
pass
def map_label(self,label, classes):
mapped_label = torch.LongTensor(label.size()).to(self.device)
for i in range(classes.size(0)):
mapped_label[label==classes[i]] = i
return mapped_label
def trainstep(self, img, att):
##############################################
# Encode image features and additional
# features
##############################################
img_in = F.normalize(img, p=2, dim=1)
img_in = self.ft_bn(img_in)
img_in = self.fc_ft(img_in)
att_in = F.normalize(att, p=2, dim=1)
att_in = self.at_bn(att_in)
att_in = self.fc_at(att_in)
#Add non-linearity?
mu_img, logvar_img = self.encoder['resnet_features'](img_in)
z_from_img = self.reparameterize(mu_img, logvar_img)
mu_att, logvar_att = self.encoder[self.auxiliary_data_source](att_in)
z_from_att = self.reparameterize(mu_att, logvar_att)
##############################################
# Reconstruct inputs
##############################################
img_from_img = self.decoder['resnet_features'](z_from_img)
att_from_att = self.decoder[self.auxiliary_data_source](z_from_att)
reconstruction_loss = self.reconstruction_criterion(img_from_img, img) \
+ self.reconstruction_criterion(att_from_att, att)
##############################################
# Cross Reconstruction Loss
##############################################
img_from_att = self.decoder['resnet_features'](z_from_att)
att_from_img = self.decoder[self.auxiliary_data_source](z_from_img)
cross_reconstruction_loss = self.reconstruction_criterion(img_from_att, img) \
+ self.reconstruction_criterion(att_from_img, att)
##############################################
# KL-Divergence
##############################################
KLD = (0.5 * torch.sum(1 + logvar_att - mu_att.pow(2) - logvar_att.exp())) \
+ (0.5 * torch.sum(1 + logvar_img - mu_img.pow(2) - logvar_img.exp()))
##############################################
# Distribution Alignment
##############################################
#distance = torch.sqrt(torch.sum((mu_img - mu_att) ** 2, dim=1) + \
# torch.sum((torch.sqrt(logvar_img.exp()) - torch.sqrt(logvar_att.exp())) ** 2, dim=1))
#distance = distance.sum()
lossI = []
lossT = []
for j in range(0, mu_img.shape[0]-1):
perm = torch.arange(0, mu_img.shape[0])
perm = perm + j
perm[perm > (mu_img.shape[0]-1)] = perm[perm > (mu_img.shape[0]-1)]%(mu_img.shape[0])
mu_att_perm = mu_att[perm]
mu_img_perm = mu_img[perm]
perm.cpu()
lossI.append(self.triplet_loss(mu_img, mu_att, mu_att_perm))
lossT.append(self.triplet_loss(mu_att, mu_img, mu_img_perm))
mu_img_perm.cpu()
mu_att_perm.cpu()
losI = max(lossI)
losT = max(lossT)
distance = losI + losT
##############################################
# scale the loss terms according to the warmup
# schedule
##############################################
f1 = 1.0*(self.current_epoch - self.warmup['cross_reconstruction']['start_epoch'] )/(1.0*( self.warmup['cross_reconstruction']['end_epoch']- self.warmup['cross_reconstruction']['start_epoch']))
f1 = f1*(1.0*self.warmup['cross_reconstruction']['factor'])
cross_reconstruction_factor = torch.cuda.FloatTensor([min(max(f1,0),self.warmup['cross_reconstruction']['factor'])])
f2 = 1.0 * (self.current_epoch - self.warmup['beta']['start_epoch']) / ( 1.0 * (self.warmup['beta']['end_epoch'] - self.warmup['beta']['start_epoch']))
f2 = f2 * (1.0 * self.warmup['beta']['factor'])
beta = torch.cuda.FloatTensor([min(max(f2, 0), self.warmup['beta']['factor'])])
f3 = 1.0*(self.current_epoch - self.warmup['distance']['start_epoch'] )/(1.0*( self.warmup['distance']['end_epoch']- self.warmup['distance']['start_epoch']))
f3 = f3*(1.0*self.warmup['distance']['factor'])
distance_factor = torch.cuda.FloatTensor([min(max(f3,0),self.warmup['distance']['factor'])])
##############################################
# Put the loss together and call the optimizer
##############################################
self.optimizer.zero_grad()
loss = reconstruction_loss - beta * KLD
if cross_reconstruction_loss>0:
loss += cross_reconstruction_factor*cross_reconstruction_loss
if distance_factor >0:
loss += distance_factor*distance
loss.backward()
self.optimizer.step()
return loss.item(), reconstruction_loss, beta*KLD, cross_reconstruction_factor*cross_reconstruction_loss, distance_factor*distance
def train_vae(self):
elosses = []
lossesR = []
lossesK = []
lossesC = []
lossesD = []
self.dataloader = data.DataLoader(self.dataset,batch_size=self.batch_size,shuffle= True,drop_last=True)#,num_workers = 4)
#self.dataset.novelclasses =self.dataset.novelclasses.long().cuda()
#self.dataset.seenclasses =self.dataset.seenclasses.long().cuda()
#leave both statements
self.train()
self.fc_ft.train()
self.fc_at.train()
self.reparameterize_with_noise = True
metricsI = []
metricsT = []
print('train for reconstruction')
for epoch in range(0, self.nepoch ):
self.train()
self.current_epoch = epoch
losses = []
ilossesR = []
ilossesK = []
ilossesC = []
ilossesD = []
i=-1
y = 0
for iters in range(0, len(self.dataset), self.batch_size):
#for iters in range(0, 1000, self.batch_size):
i+=1
features, attributes, idxs = self.dataset.next_batch(self.batch_size) #Si no és test treure la y
data_from_modalities = [features, attributes.type(torch.FloatTensor)]
for j in range(len(data_from_modalities)):
data_from_modalities[j] = data_from_modalities[j].to(self.device)
data_from_modalities[j].requires_grad = False
loss, lossR, lossK, lossC, lossD = self.trainstep(data_from_modalities[0], data_from_modalities[1] )
if i%10==0:
print('epoch ' + str(epoch) + ' | iter ' + str(i) + '\t'+
' | loss ' + str(loss))
losses.append(loss)
ilossesR.append(lossR)
ilossesK.append(lossK)
ilossesC.append(lossC)
ilossesD.append(lossD)
idxs = idxs.cpu()
attributes = attributes.cpu()
y += 1
y = 0
mean_loss = sum(losses)/len(losses)
elosses.append(mean_loss)
print('epoch ' + str(epoch) + 'Loss: ' + str(loss))
lossesR.append(sum(ilossesR)/len(ilossesR))
lossesK.append(sum(ilossesK)/len(ilossesK))
lossesC.append(sum(ilossesC)/len(ilossesC))
lossesD.append(sum(ilossesD)/len(ilossesD))
for j in range(len(data_from_modalities)):
data_from_modalities[j] = data_from_modalities[j].cpu()
print('Generating gallery set...')
self.generate_gallery()
print('Generating t-SNE plot...')
z_imgs_embedded = TSNE(n_components=2).fit_transform(self.gallery_imgs_z.clone().cpu().detach())
z_attrs_embedded = TSNE(n_components=2).fit_transform(self.gallery_attrs_z.clone().cpu().detach())
plt.scatter(z_imgs_embedded[:,0], z_imgs_embedded[:,1], c = 'red')
plt.scatter(z_attrs_embedded[:,0], z_attrs_embedded[:,1], c = 'blue')
filename = 't-sne-plot-epoch'+str(epoch)+'.png'
plt.savefig(filename)
plt.clf()
print('Evaluating retrieval...')
metricsIepoch, metricsTepoch = self.retrieval()
metricsI.append([metricsIepoch[0], metricsIepoch[1], metricsIepoch[2]])
metricsT.append([metricsTepoch[0], metricsTepoch[1], metricsTepoch[2]])
print('Evaluation Metrics for image retrieval')
print("R@1: {}, R@5: {}, R@10: {}, R@50: {}, R@100: {}, MEDR: {}, MEANR: {}".format(metricsIepoch[0], metricsIepoch[1], metricsIepoch[2], metricsIepoch[3], metricsIepoch[4], metricsIepoch[5], metricsIepoch[6]))
print('Evaluation Metrics for caption retrieval')
print("R@1: {}, R@5: {}, R@10: {}, R@50: {}, R@100: {}, MEDR: {}, MEANR: {}".format(metricsTepoch[0], metricsTepoch[1], metricsTepoch[2], metricsTepoch[3], metricsTepoch[4], metricsTepoch[5], metricsTepoch[6]))
# turn into evaluation mode:
for key, value in self.encoder.items():
self.encoder[key].eval()
for key, value in self.decoder.items():
self.decoder[key].eval()
import os
file_name = "losses-HNTriplet.png"
file_name2 = 'metrics-HNTriplet.png'
if os.path.isfile(file_name):
expand = 1
while True:
expand += 1
new_file_name = file_name.split(".png")[0] + str(expand) + ".png"
new_file_name2 = file_name2.split('.png')[0] + str(expand) + '.png'
if os.path.isfile(new_file_name):
continue
else:
file_name = new_file_name
file_name2 = new_file_name2
break
#Plot de les losses i desar-lo
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.plot(np.arange(self.nepoch), elosses, label="Total loss")
plt.plot(np.arange(self.nepoch), lossesR, label = 'Reconstruction loss')
plt.plot(np.arange(self.nepoch), lossesK, label = 'KL Divergence loss')
plt.plot(np.arange(self.nepoch), lossesC, label = 'Cross-Reconstruction loss')
plt.plot(np.arange(self.nepoch), lossesD, label = 'Hard Negative Triplet loss')
plt.legend()
plt.show()
plt.savefig(file_name)
plt.clf()
#Plot de les metrics
plt.xlabel('Epoch')
plt.ylabel('Metric')
plt.plot(np.arange(self.nepoch), metricsI[0], label = 'T2I R@1')
plt.plot(np.arange(self.nepoch), metricsI[1], label = 'T2I R@5')
plt.plot(np.arange(self.nepoch), metricsT[0], label = 'I2T R@1')
plt.plot(np.arange(self.nepoch), metricsT[1], label = 'I2T R@5')
plt.legend()
plt.show()
plt.savefig(file_name2)
plt.clf()
return losses, metricsI, metricsT
def retrieval(self):
self.eval()
def lda(self, x, y):
distance = torch.sqrt(torch.sum((x[0] - y[0]) ** 2, dim=1) + \
torch.sum((torch.sqrt(x[1].exp()) - torch.sqrt(y[1].exp())) ** 2, dim=1))
return distance
#nbrsI = NearestNeighbors(n_neighbors=self.dataset.ntest, algorithm='auto').fit(self.gallery_imgs_z.cpu().detach().numpy())
#nbrsI = NearestNeighbors(n_neighbors=1000, algorithm='auto').fit(self.gallery_imgs_z.cpu().detach().numpy())
#nbrsT = NearestNeighbors(n_neighbors=self.dataset.ntest, algorithm='auto').fit(self.gallery_attrs_z.cpu().detach().numpy())
#nbrsT = NearestNeighbors(n_neighbors=5000, algorithm='auto').fit(self.gallery_attrs_z.cpu().detach().numpy())
distI_dict = {}
distT_dict = {}
ranksI = np.zeros((1,5*self.dataset.ntest))
ranksT = np.zeros((1,self.dataset.ntest))
for i in range(0, self.dataset.ntest):
#for i in range(0, 500):
mu_img = self.gallery_imgs_z[i,:].unsqueeze(0)
mu_att = self.gallery_attrs_z[5*i:5*i + 5,:]
#Add L2 norm and BatchNorm?
#im_ft = F.normalize(im_ft, p=2, dim = 1)
#im_ft = self.ft_bn(im_ft)
#im_ft = self.fc_ft(im_ft)
#attr = F.normalize(attr.type(torch.FloatTensor).to(self.device), p=2, dim=1)
#attr = self.at_bn(attr)
#attr = self.fc_at(attr)
#mu_img, logvar_img = self.encoder['resnet_features'](im_ft)
#z_from_img = self.reparameterize(mu_img, logvar_img)
#mu_att, logvar_att = self.encoder['attributes'](attr.type(torch.FloatTensor).to(self.device))
#z_from_att = self.reparameterize(mu_att, logvar_att)
#img = [mu_img.cpu().detach().numpy(), logvar_img.cpu().detach().numpy()]
#att = [mu_att.cpu().detach().numpy(), logvar_att.cpu().detach().numpy()]
distancesI = distance.cdist(mu_att.cpu().detach().numpy(), self.gallery_imgs_z.cpu().detach().numpy(), 'cosine')
distancesT = distance.cdist(mu_img.cpu().detach().numpy(), self.gallery_attrs_z.cpu().detach().numpy(), 'cosine')
indicesI = np.argsort(distancesI)
indicesT = np.argsort(distancesT[0,:])
for z in range(0,5):
if len(indicesI[z] == i) != 0:
ranksI[:,(5*i) + z] = np.where(indicesI[z] == i)[0][0]
else:
ranksI[:,(5*i) + z] = 1000
if len(np.where((indicesT >= 5*i) & (indicesT <= ((5*i) + 4)))) != 0:
ranksT[:,i] = np.where((indicesT >= 5*i) & (indicesT <= ((5*i) + 4)))[0][0]
else:
ranksT[:,i] = 1000
'''
for z in range(0,5):
if len(np.where((indicesI[z] >= 5*i) & (indicesI[z] <= (5*i+4)))[0]) != 0:
ranksI[:,5*i + z] = np.where((indicesI[z] >= 5*i) & (indicesI[z] <= (5*i+4)))[0][0]
else:
ranksI[:,5*i + z] = 5000
if len(np.where((indicesT[0] > (5*i-1)) & (indicesT[0] < (5*i+5)))) != 0:
ranksT[:,i] = np.where((indicesT[0] > (5*i-1)) & (indicesT[0] < (5*i+5)))[0][0]
else:
ranksT[:,i] = 1000
'''
'''
for z in range(0,5):
if len(np.where((indicesT[z] >= 5*i) & (indicesT[z] <= (5*i+4)))[0]) != 0:
ranksT[:,5*i + z] = np.where((indicesT[z] >= 5*i) & (indicesT[z] <= (5*i+4)))[0][0]
else:
ranksT[:,5*i + z] = 5000
if len(np.where(indicesI[0] == i)) != 0:
ranksI[:,i] = np.where(indicesI[0] == i)[0][0]
else:
ranksI[:,i] = 1000
'''
r1im = 100.0 * len(np.where(ranksI < 1)[1]) / len(ranksI[0,:])
r5im = 100.0 * len(np.where(ranksI < 5)[1]) / len(ranksI[0,:])
r10im = 100.0 * len(np.where(ranksI < 10)[1]) / len(ranksI[0,:])
r50im = 100.0 * len(np.where(ranksI < 50)[1]) / len(ranksI[0,:])
r100im = 100.0 * len(np.where(ranksI < 100)[1]) / len(ranksI[0,:])
r1t = 100.0 * len(np.where(ranksT < 1)[1]) / len(ranksT[0,:])
r5t = 100.0 * len(np.where(ranksT < 5)[1]) / len(ranksT[0,:])
r10t = 100.0 * len(np.where(ranksT < 10)[1]) / len(ranksT[0,:])
r50t = 100.0 * len(np.where(ranksT < 50)[1]) / len(ranksT[0,:])
r100t = 100.0 * len(np.where(ranksT < 100)[1]) / len(ranksT[0,:])
medrI = np.floor(np.median(ranksI)) + 1
meanrI = ranksI.mean() + 1
medrT = np.floor(np.median(ranksT)) + 1
meanrT = ranksT.mean() + 1
metricsI = [r1im, r5im, r10im, r50im, r100im, medrI, meanrI]
metricsT = [r1t, r5t, r10t, r50t, r100t, medrT, meanrT]
return metricsI, metricsT
def generate_gallery(self):
self.eval()
z_imgs = []
z_vars_im = []
z_attrs = []
z_vars_att = []
rec_imgs = []
rec_attrs = []
y = 0
i=-1
for iters in range(0, self.dataset.ntest, 50):
#for iters in range(0, 500, 50):
i+=1
features, attributes, idxs = self.dataset.next_batch_test(50, y)
idxs = idxs.cpu()
data_from_modalities = [features, attributes.type(torch.FloatTensor)]
for j in range(len(data_from_modalities)):
data_from_modalities[j] = data_from_modalities[j].to(self.device)
data_from_modalities[j].requires_grad = False
if j== 0:
#Add L2 norm and BatchNorm?
data_from_modalities[j] = F.normalize(data_from_modalities[j], p=2, dim=1)
data_from_modalities[j] = self.ft_bn(data_from_modalities[j])
data_from_modalities[j] = self.fc_ft(data_from_modalities[j])
elif j == 1:
#Add L2 norm and BatchNorm?
data_from_modalities[j] = F.normalize(data_from_modalities[j], p=2, dim=1)
data_from_modalities[j] = self.at_bn(data_from_modalities[j])
data_from_modalities[j] = self.fc_at(data_from_modalities[j])
mu_img, logvar_img = self.encoder['resnet_features'](data_from_modalities[0])
z_from_img = self.reparameterize(mu_img, logvar_img)
mu_att, logvar_att = self.encoder['attributes'](data_from_modalities[1])
z_from_att = self.reparameterize(mu_att, logvar_att)
if y == 0:
z_imgs = z_from_img.cpu()
z_vars_im = logvar_img.cpu()
z_attrs = z_from_att.cpu()
z_vars_att = logvar_att.cpu()
else:
z_imgs = torch.cat((z_imgs.cpu(),z_from_img.cpu()), dim = 0).cpu()
z_vars_im = torch.cat((z_vars_im.cpu(),logvar_img.cpu()), dim = 0).cpu()
z_attrs = torch.cat((z_attrs.cpu(),z_from_att.cpu()), dim = 0).cpu()
z_vars_att = torch.cat((z_vars_att.cpu(),logvar_att.cpu()), dim = 0).cpu()
y = y + 1
print('iter: '+str(iters))
self.gallery_imgs_z = z_imgs.cpu()
self.gallery_vars_im = z_vars_im.cpu()
print(self.gallery_imgs_z.size())
self.gallery_attrs_z = z_attrs.cpu()
self.gallery_vars_att = z_vars_att.cpu()
print(self.gallery_attrs_z.size())
def train_classifier(self, show_plots=False):
if self.num_shots > 0 :
print('================ transfer features from test to train ==================')
self.dataset.transfer_features(self.num_shots, num_queries='num_features')
history = [] # stores accuracies
cls_seenclasses = self.dataset.seenclasses
cls_novelclasses = self.dataset.novelclasses
train_seen_feat = self.dataset.data['train_seen']['resnet_features']
train_seen_label = self.dataset.data['train_seen']['labels']
novelclass_aux_data = self.dataset.novelclass_aux_data # access as novelclass_aux_data['resnet_features'], novelclass_aux_data['attributes']
seenclass_aux_data = self.dataset.seenclass_aux_data
novel_corresponding_labels = self.dataset.novelclasses.long().to(self.device)
seen_corresponding_labels = self.dataset.seenclasses.long().to(self.device)
# The resnet_features for testing the classifier are loaded here
novel_test_feat = self.dataset.data['test_unseen'][
'resnet_features'] # self.dataset.test_novel_feature.to(self.device)
seen_test_feat = self.dataset.data['test_seen'][
'resnet_features'] # self.dataset.test_seen_feature.to(self.device)
test_seen_label = self.dataset.data['test_seen']['labels'] # self.dataset.test_seen_label.to(self.device)
test_novel_label = self.dataset.data['test_unseen']['labels'] # self.dataset.test_novel_label.to(self.device)
train_unseen_feat = self.dataset.data['train_unseen']['resnet_features']
train_unseen_label = self.dataset.data['train_unseen']['labels']
# in ZSL mode:
if self.generalized == False:
# there are only 50 classes in ZSL (for CUB)
# novel_corresponding_labels =list of all novel classes (as tensor)
# test_novel_label = mapped to 0-49 in classifier function
# those are used as targets, they have to be mapped to 0-49 right here:
novel_corresponding_labels = self.map_label(novel_corresponding_labels, novel_corresponding_labels)
if self.num_shots > 0:
# not generalized and at least 1 shot means normal FSL setting (use only unseen classes)
train_unseen_label = self.map_label(train_unseen_label, cls_novelclasses)
# for FSL, we train_seen contains the unseen class examples
# for ZSL, train seen label is not used
# if self.num_shots>0:
# train_seen_label = self.map_label(train_seen_label,cls_novelclasses)
test_novel_label = self.map_label(test_novel_label, cls_novelclasses)
# map cls novelclasses last
cls_novelclasses = self.map_label(cls_novelclasses, cls_novelclasses)
if self.generalized:
print('mode: gzsl')
clf = LINEAR_LOGSOFTMAX(self.latent_size, self.num_classes)
else:
print('mode: zsl')
clf = LINEAR_LOGSOFTMAX(self.latent_size, self.num_novel_classes)
clf.apply(models.weights_init)
with torch.no_grad():
####################################
# preparing the test set
# convert raw test data into z vectors
####################################
self.reparameterize_with_noise = False
mu1, var1 = self.encoder['resnet_features'](novel_test_feat)
test_novel_X = self.reparameterize(mu1, var1).to(self.device).data
test_novel_Y = test_novel_label.to(self.device)
mu2, var2 = self.encoder['resnet_features'](seen_test_feat)
test_seen_X = self.reparameterize(mu2, var2).to(self.device).data
test_seen_Y = test_seen_label.to(self.device)
####################################
# preparing the train set:
# chose n random image features per
# class. If n exceeds the number of
# image features per class, duplicate
# some. Next, convert them to
# latent z features.
####################################
self.reparameterize_with_noise = True
def sample_train_data_on_sample_per_class_basis(features, label, sample_per_class):
sample_per_class = int(sample_per_class)
if sample_per_class != 0 and len(label) != 0:
classes = label.unique()
for i, s in enumerate(classes):
features_of_that_class = features[label == s, :] # order of features and labels must coincide
# if number of selected features is smaller than the number of features we want per class:
multiplier = torch.ceil(torch.cuda.FloatTensor(
[max(1, sample_per_class / features_of_that_class.size(0))])).long().item()
features_of_that_class = features_of_that_class.repeat(multiplier, 1)
if i == 0:
features_to_return = features_of_that_class[:sample_per_class, :]
labels_to_return = s.repeat(sample_per_class)
else:
features_to_return = torch.cat(
(features_to_return, features_of_that_class[:sample_per_class, :]), dim=0)
labels_to_return = torch.cat((labels_to_return, s.repeat(sample_per_class)),
dim=0)
return features_to_return, labels_to_return
else:
return torch.cuda.FloatTensor([]), torch.cuda.LongTensor([])
# some of the following might be empty tensors if the specified number of
# samples is zero :
img_seen_feat, img_seen_label = sample_train_data_on_sample_per_class_basis(
train_seen_feat,train_seen_label,self.img_seen_samples )
img_unseen_feat, img_unseen_label = sample_train_data_on_sample_per_class_basis(
train_unseen_feat, train_unseen_label, self.img_unseen_samples )
att_unseen_feat, att_unseen_label = sample_train_data_on_sample_per_class_basis(
novelclass_aux_data,
novel_corresponding_labels,self.att_unseen_samples )
att_seen_feat, att_seen_label = sample_train_data_on_sample_per_class_basis(
seenclass_aux_data,
seen_corresponding_labels, self.att_seen_samples)
def convert_datapoints_to_z(features, encoder):
if features.size(0) != 0:
mu_, logvar_ = encoder(features)
z = self.reparameterize(mu_, logvar_)
return z
else:
return torch.cuda.FloatTensor([])
z_seen_img = convert_datapoints_to_z(img_seen_feat, self.encoder['resnet_features'])
z_unseen_img = convert_datapoints_to_z(img_unseen_feat, self.encoder['resnet_features'])
z_seen_att = convert_datapoints_to_z(att_seen_feat, self.encoder[self.auxiliary_data_source])
z_unseen_att = convert_datapoints_to_z(att_unseen_feat, self.encoder[self.auxiliary_data_source])
train_Z = [z_seen_img, z_unseen_img ,z_seen_att ,z_unseen_att]
train_L = [img_seen_label , img_unseen_label,att_seen_label,att_unseen_label]
# empty tensors are sorted out
train_X = [train_Z[i] for i in range(len(train_Z)) if train_Z[i].size(0) != 0]
train_Y = [train_L[i] for i in range(len(train_L)) if train_Z[i].size(0) != 0]
train_X = torch.cat(train_X, dim=0)
train_Y = torch.cat(train_Y, dim=0)
############################################################
##### initializing the classifier and train one epoch
############################################################
cls = classifier.CLASSIFIER(clf, train_X, train_Y, test_seen_X, test_seen_Y, test_novel_X,
test_novel_Y,
cls_seenclasses, cls_novelclasses,
self.num_classes, self.device, self.lr_cls, 0.5, 1,
self.classifier_batch_size,
self.generalized)
for k in range(self.cls_train_epochs):
if k > 0:
if self.generalized:
cls.acc_seen, cls.acc_novel, cls.H = cls.fit()
else:
cls.acc = cls.fit_zsl()
if self.generalized:
print('[%.1f] novel=%.4f, seen=%.4f, h=%.4f , loss=%.4f' % (
k, cls.acc_novel, cls.acc_seen, cls.H, cls.average_loss))
history.append([torch.tensor(cls.acc_seen).item(), torch.tensor(cls.acc_novel).item(),
torch.tensor(cls.H).item()])
else:
print('[%.1f] acc=%.4f ' % (k, cls.acc))
history.append([0, torch.tensor(cls.acc).item(), 0])
if self.generalized:
return torch.tensor(cls.acc_seen).item(), torch.tensor(cls.acc_novel).item(), torch.tensor(
cls.H).item(), history
else:
return 0, torch.tensor(cls.acc).item(), 0, history
| [
"[email protected]"
] | |
cd694d8462a1f2b71eb76f7f4cdc2aca77dba37f | b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf | /001146StepikPyBegin/Stepik001146PyBeginсh09p03st05TASK04_20210127.py | 82cd17e8322c2feb3ea34e49298c9d2e4d3f1eaa | [
"Apache-2.0"
] | permissive | SafonovMikhail/python_000577 | 5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4 | f2dccac82a37df430c4eb7425b5d084d83520409 | refs/heads/master | 2022-12-08T10:53:57.202746 | 2022-12-07T09:09:51 | 2022-12-07T09:09:51 | 204,713,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | s1 = 'a'
s2 = s1.upper()
print(s1, s2)
| [
"[email protected]"
] | |
454486b360222d0be71121c0c3b0766b377b6de8 | 4369c5a214f8c4fb1f8a286f72d57cfa9c3f02c7 | /geotrek/tourism/migrations/0004_auto_20190322_1908.py | f03a350395c6ef760026fa83bddfb5dc1f73f878 | [
"BSD-2-Clause"
] | permissive | GeotrekCE/Geotrek-admin | c13d251066e92359c26f22d185b8bd2e26e622ef | a91b75261a876be51ad2a693618629900bea6003 | refs/heads/master | 2023-08-21T12:45:25.586551 | 2023-08-09T12:28:33 | 2023-08-09T12:28:33 | 9,886,107 | 71 | 56 | BSD-2-Clause | 2023-09-13T09:40:33 | 2013-05-06T12:17:21 | Python | UTF-8 | Python | false | false | 1,182 | py | # Generated by Django 1.11.14 on 2019-03-22 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tourism', '0003_auto_20190306_1417'),
]
operations = [
migrations.AlterModelOptions(
name='touristiccontenttype1',
options={'verbose_name': 'Type1', 'verbose_name_plural': 'First list types'},
),
migrations.AlterModelOptions(
name='touristiccontenttype2',
options={'verbose_name': 'Type2', 'verbose_name_plural': 'Second list types'},
),
migrations.AlterField(
model_name='touristiccontent',
name='type1',
field=models.ManyToManyField(blank=True, db_table='t_r_contenu_touristique_type1', related_name='contents1', to='tourism.TouristicContentType1', verbose_name='Type 1'),
),
migrations.AlterField(
model_name='touristiccontent',
name='type2',
field=models.ManyToManyField(blank=True, db_table='t_r_contenu_touristique_type2', related_name='contents2', to='tourism.TouristicContentType2', verbose_name='Type 2'),
),
]
| [
"[email protected]"
] | |
3b9e645996132a272316f8928612a8d743e4ee43 | 026f12a5fdd4b3bfee00713091267aaef71047c1 | /end/demo1/bookdemo/polls/forms.py | 59f1ce8aebd950796aeffb0769d69f030a0e8aaa | [] | no_license | zzy0371/py1911project | 64c64413ea0107926ae81479adc27da87ee04767 | 7ce2a2acfc1dade24e6e7f8763fceb809fabd7a1 | refs/heads/master | 2023-01-08T07:51:13.388203 | 2020-03-19T03:31:33 | 2020-03-19T03:31:33 | 239,649,431 | 0 | 1 | null | 2023-01-05T09:04:53 | 2020-02-11T01:22:35 | JavaScript | UTF-8 | Python | false | false | 1,112 | py | from django import forms
from .models import User
class LoginForm(forms.Form):
"""
定义一个登录表单用于生成html登录表单
"""
username = forms.CharField(max_length=150,min_length=3,
label="输入用户名",
help_text="用户名最小6,最大150",)
password = forms.CharField(min_length=3,max_length=50,
widget=forms.PasswordInput,
help_text="密码最小6,最大50",label="输入密码")
class RegistForm(forms.ModelForm):
"""
定义一个注册表单用于生成html表单
"""
password2 = forms.CharField(widget=forms.PasswordInput,label="重复密码")
class Meta:
model = User
fields = ["username","password"]
labels = {
"username":"输入用户名",
"password":"输入密码"
}
help_texts = {
"username": "长度>3 <150",
"password": "长度>3 <150"
}
widgets = {
"password":forms.PasswordInput
}
| [
"[email protected]"
] | |
6eea14cd8ff6d3489b18e1c0a58b2528c6b0370c | a4deea660ea0616f3b5ee0b8bded03373c5bbfa2 | /concrete_instances/register-variants/vrsqrtps_xmm_xmm/instructions/vrsqrtps_xmm_xmm/vrsqrtps_xmm_xmm.gen.vex.py | 9f8d295f1eb314cbe69055e099f7208efc7b0bdf | [] | no_license | Vsevolod-Livinskij/x86-64-instruction-summary | 4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd | c276edab1b19e3929efb3ebe7514489f66087764 | refs/heads/master | 2022-02-02T18:11:07.818345 | 2019-01-25T17:19:21 | 2019-01-25T17:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | import angr
proj = angr.Project('./instructions/vrsqrtps_xmm_xmm/vrsqrtps_xmm_xmm.o')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp() | [
"[email protected]"
] | |
b63fa9d1cd2d5f05bba5cf59b8e7584e386ff9ef | 25d081c82bf9adc2a8d96c254df0239a9f982a71 | /tools/file_search.py | 11469d4167adeef4ce416653eba9b2256b77e6d2 | [
"MIT"
] | permissive | asiekierka/z2 | f102de582aaa9fc51b6b598a1fb07c58be4f540f | d926408423dc98d71d5e7fc2fda3202c03c309de | refs/heads/master | 2021-06-15T15:09:41.614135 | 2021-02-23T02:44:54 | 2021-02-23T02:44:54 | 146,348,922 | 1 | 0 | MIT | 2018-08-27T20:14:46 | 2018-08-27T20:14:46 | null | UTF-8 | Python | false | false | 2,140 | py | import django
import os
import re
import sys
import zipfile
sys.path.append("/var/projects/museum")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "museum.settings")
django.setup()
from museum_site.models import File # noqa: E402
from museum_site.constants import SITE_ROOT # noqa: E402
def main():
shortcuts = {
"1": "(.*).[zZ][zZ][tT]$",
"2": "(.*).[sS][zZ][tT]$",
"3": "(.*).[zZ][iI][gG]$",
"4": "(.*).[zZ][zZ][mM]$",
"5": "(.*).[tT][xX][tT]$",
"6": "(.*).[dD][oO][cC]$",
"7": "(.*).[bB][rR][dD]$",
"8": "(.*).[eE][xX][eE]$",
"9": "(.*).[cC][oO][mM]$",
}
length = len(list(shortcuts.keys()))
print("SHORTCUTS")
for x in range(1, length + 1):
print(x, shortcuts[str(x)])
contains_regex = input("Regex to list Files that do match: ")
if contains_regex in shortcuts.keys():
contains_regex = shortcuts[contains_regex]
lacks_regex = input("Regex to list Files that don't match: ")
if lacks_regex in list(shortcuts.keys()):
lacks_regex = shortcuts[lacks_regex]
files = File.objects.all().order_by("letter", "title")
lacks_matches = ""
contains_matches = ""
print("LACKS", lacks_regex)
for f in files:
letter = f.letter
fn = f.filename
# Open the zip
try:
zf = zipfile.ZipFile(
os.path.join(SITE_ROOT, "zgames", (letter + "/" + fn))
)
except Exception as e:
# The audit script should handle missing/invalid zips, not this.
print(e)
continue
# Print files
lack_fail = False
try:
file_list = zf.namelist()
for zfn in file_list:
if contains_regex:
if (re.match(contains_regex, zfn)):
print(fn, zfn)
if lacks_regex:
if (not re.match(lacks_regex, zfn)):
print(fn, zfn)
except Exception as e:
print(e)
continue
return True
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
de3366ed069ff8116b3bfe89dc977aaa79c406f6 | ea35facf6d823e93706b5f551408250b1e089be9 | /共通問題/14_12.py | d02d6ce9cf4df2e35aed12e8c7dfc19af3116257 | [] | no_license | YukiNGSM/PythonStudy | 7a2d24f4762e384531eadd691858296b00b6a6b3 | 26310d0e007745ff4920ccd0fc3e51771cb2d5f1 | refs/heads/master | 2023-07-19T00:06:29.061255 | 2021-09-22T01:29:49 | 2021-09-22T01:29:49 | 409,025,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | address_str = input("住所を入力してください:")
index = 0
if address_str.find("東京都") != -1:
index = address_str.find("都")
elif address_str.find("北海道") != -1:
index = address_str.find("道")
elif address_str.find("大阪府") != -1 or address_str.find("京都府") != -1:
index = address_str.find("府")
elif address_str.find("県") != -1:
index = address_str.find("県")
# 都道府県以下の住所を表示
print(address_str[index+1:]) | [
"[email protected]"
] | |
2206ac9cfa6da743114677db3d089f306b0497aa | 3f9e960174cfc5c8bd6827ce5362124c467a3952 | /python/prep/find-max.py | e9a8c5f30d079fb61552d3949305b9912db251da | [] | no_license | monobinab/python | f3ec6d462d7149c007ac9e14e72132eae73b4acd | 265621b045969c819eb86fa7ba2a3bdfad34ecb6 | refs/heads/master | 2020-12-03T00:04:29.185880 | 2017-07-01T18:53:11 | 2017-07-01T20:06:16 | 95,982,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,057 | py | # some functions to work with loop
def find_max(data):
biggest = data[0];
print(biggest)
for val in data:
if val > biggest:
biggest = val;
return biggest;
print(find_max([1000, 1, 2, 3, 7, 7, 100, 4]))
def find_min(data):
smallest = data[0];
for val in data:
if val < smallest:
smallest = val;
return smallest;
print(find_min([0, 2, -3, 9]))
# for loop usign index
def find_max_index(data):
max_index = 0;
for i in range(len(data)):
if data[i] > data[max_index]:
max_index = i;
return max_index;
print(find_max_index([-100, 1, 2, 3, 6, 7]))
# checks if a value exist
def if_exist(data, target):
print(data)
print(target)
found = False;
for val in data:
if val == target:
found = True;
break;
return found;
print(if_exist("abcde", "a"))
print(if_exist([0, 1, 2, 3, 4, 5], 2))
#this doesn't work yet
# sort
def sort_list(data):
i = 0;
# j = 1;
# temp = 0;
for i in range(len(data) - 1):
temp = 0;
for j in range(len(data)):
print("data1 is", data[i])
# print(data[j])
if data[i] > data[i + 1]:
data[i] = temp;
print("data[i] is ", data[i]);
print("temp is ", temp)
data[i] = data[i + 1];
data[i + 1] = temp
i = i + 1
# j = j+1
else:
continue;
return data;
#print(sort_list([3, 2, 8, 6, 9, 0, 11]))
#create a list with values from odd number index
def odd_index_finder(data):
result = [];
for i in range(len(data)):
if i%2 == 0:
result.append(data[i]);
return result;
print(odd_index_finder([0, 1, 2, 3, 4, 5,6]))
#create list by eliminating certain values
def filter_list(data, target_value):
i = 0
for val in data:
print(val)
if val == target_value:
i = data[val]
data.pop(i);
return data;
print(filter_list([1, 2, 6, 3, 8], 2))
| [
"[email protected]"
] | |
82c64dbf0b000b43b1611773f5fc004e0c6e24e6 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Data2vec_for_PyTorch/fairseq/models/speech_to_text/utils.py | 33117446a5e2f3b71c64f0a7b6b8122a1ac7c182 | [
"MIT",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 18,584 | py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
from collections.abc import Iterable
from itertools import repeat
from typing import List, Optional, Tuple
import torch
from torch import Tensor
# ------------------------------------------------------------------------------
# assert_equal()
# ------------------------------------------------------------------------------
def assert_equal(value1, value2, name1=None, name2=None):
"""Asserts two values are equal otherwise raise an error."""
str_name1 = "" if name1 is None else "{} ".format(name1)
str_name2 = "" if name2 is None else "{} ".format(name2)
if value1 != value2:
str_value1 = "{}" if name1 is None else "({})"
str_value1 = str_value1.format(value1)
str_value2 = "{}" if name2 is None else "({})"
str_value2 = str_value2.format(value2)
raise ValueError(
"Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2)
)
def fill_config(config, key, value):
if value is not None:
if key not in config or config[key] is None:
config[key] = value
assert_equal(value, config[key], "value", f'config["{key}"]')
# ------------------------------------------------------------------------------
# check_and_return_expected()
# ------------------------------------------------------------------------------
def check_and_return_expected(value, undefined_value, expected_value, name=None):
"""
Return the expected value while checking if the given value is undefined or
equal to the expected value.
"""
if (undefined_value is None and value is None) or (undefined_value == value):
return expected_value
if value != expected_value:
str_name = "" if name is None else "{} ".format(name)
str_value = "{}" if name is None else "({})"
str_value = str_value.format(value)
raise ValueError(
"Expected {}{} == {}".format(str_name, str_value, expected_value)
)
return expected_value
# ------------------------------------------------------------------------------
# get_time_axis()
# ------------------------------------------------------------------------------
def get_time_axis(layout):
"""
Extract the time axis from the layout, for example for breaking sequence into
segments.
"""
if layout in ["TB", "TBD"]:
return 0
if layout in ["BT", "BTD"]:
return 1
if layout in ["BCTD"]:
return 2
raise ValueError("Unsupported layout = {}".format(layout))
# ------------------------------------------------------------------------------
# get_batch_axis()
# ------------------------------------------------------------------------------
def get_batch_axis(layout):
"""
Extract the batch axis from the layout
"""
if layout in ["TB", "TBD"]:
return 1
if layout in ["BT", "BTD", "BCTD"]:
return 0
raise ValueError("Unsupported layout = {}".format(layout))
# ------------------------------------------------------------------------------
# monotonically_increasing_and_bounded()
# ------------------------------------------------------------------------------
def monotonically_increasing_and_bounded(iterable, min=None, max=None):
"""
Check if the elements in the given iterable are monotonically increasing and
bounded by upper/lower bounds.
"""
if not isinstance(iterable, Iterable):
raise TypeError(
"Expected iterable to be of type Iterable, got ({})".format(
iterable.__class__.__name__
)
)
for i in range(len(iterable)):
if min is not None and iterable[i] < min:
return False
if max is not None and iterable[i] > max:
return False
if i > 0 and iterable[i] <= iterable[i - 1]:
return False
return True
# ------------------------------------------------------------------------------
# to_pair()
# ------------------------------------------------------------------------------
def to_pair(value, name):
"""Make a pair (of type tuple) of given value."""
if isinstance(value, Iterable):
if len(value) != 2:
raise ValueError(
"Expected `{}` to have exactly 2 elements, got: ({})".format(
name, value
)
)
return value
return tuple(repeat(value, 2))
# ------------------------------------------------------------------------------
# infer_conv_output_attrs()
# ------------------------------------------------------------------------------
# TODO(cfyeh): figure out if we can get `output_dim` without calling the module.
def infer_conv_output_attrs(
module, input_channels, input_dim, batch_size=1, max_length=8
):
"""Get output attributes of a module with input."""
input = torch.randn(batch_size, input_channels, max_length, input_dim)
output = module(input)
output_channels = output.shape[1]
output_dim = output.shape[-1]
return output_channels, output_dim
# ------------------------------------------------------------------------------
# NoOp
# ------------------------------------------------------------------------------
class NoOp(torch.nn.Module):
"""
NoOp simply passes the input as the output.
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor) -> Tensor:
return input
# ------------------------------------------------------------------------------
# Permute: a torch.nn.Module applies permutation on the input tensor.
# ------------------------------------------------------------------------------
class Permute(torch.nn.Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, input: Tensor) -> Tensor:
return input.permute(self.dims).contiguous()
# ------------------------------------------------------------------------------
# lengths_to_padding_mask()
# ------------------------------------------------------------------------------
def lengths_to_padding_mask(lengths: Tensor) -> Tensor:
"""Convert lengths of shape (B, ) to padding mask."""
batch_size = lengths.shape[0]
max_length = int(torch.max(lengths).item())
padding_mask = torch.arange( # [0, ..., T-1]
max_length, device=lengths.device, dtype=lengths.dtype
).expand(batch_size, max_length) >= lengths.unsqueeze(1)
return padding_mask
# ------------------------------------------------------------------------------
# lengths_to_attention_mask()
# ------------------------------------------------------------------------------
def lengths_to_attention_mask(
lengths: Tensor,
left_context: Optional[int] = None,
right_context: Optional[int] = None,
) -> Optional[Tensor]:
"""
Generate attention mask based on (lengths, left_context, right_context).
left_context is None means unlimited left context.
right_context is None means unlimited right context.
"""
if left_context is None and right_context is None:
return None
max_length = int(torch.max(lengths).item())
# For example, with `max_length` == 5,
# indices = tensor([
# [ 0, 1, 2, 3, 4, 5],
# [-1, 0, 1, 2, 3, 4],
# [-2, -1, 0, 1, 2, 3],
# [-3, -2, -1, 0, 1, 2],
# [-4, -3, -2, -1, 0, 1],
# [-5, -4, -3, -2, -1, 0],
# ])
# In some cases the second torch.arange is created on cpu which causes a
# failure. Adding the device option to guard against it.
indices = torch.arange(
max_length, device=lengths.device, dtype=lengths.dtype
).expand(max_length, max_length) - torch.arange(
max_length, device=lengths.device
).view(
max_length, -1
)
# For example, with `max_length` == 5,
# bool_mask = tensor([
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# ])
bool_mask = (
torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length)
)
# For example, with `max_length` == 5, left_context == 2
# left_mask = tensor([
# [ True, True, True, True, True],
# [ True, True, True, True, True],
# [ True, True, True, True, True],
# [False, True, True, True, True],
# [False, False, True, True, True],
# ])
if left_context is not None:
left_mask = indices >= -left_context
bool_mask = bool_mask & left_mask
# For example, with `max_length` == 5, right_context == 1
# right_mask = tensor([
# [True, True, False, False, False],
# [True, True, True, False, False],
# [True, True, True, True, False],
# [True, True, True, True, True],
# [True, True, True, True, True],
# ])
if right_context is not None:
right_mask = indices <= right_context
bool_mask = bool_mask & right_mask
bool_mask = (~bool_mask).to(device=lengths.device)
return bool_mask
# ------------------------------------------------------------------------------
# infer_output_norm()
# ------------------------------------------------------------------------------
def infer_output_norm(module, output_norm=None):
"""
Infer the output norm (string and module) needed on the module gvien desired
output normalization.
"""
if output_norm == module.output_norm():
# output_norm already matches module.output_norm().
return (None, NoOp())
if output_norm is None and module.output_norm() is not None:
logger = logging.getLogger("infer_output_norm()")
logger.warning(
"trying to set output_norm ({}) ".format(output_norm)
+ "but got module.output_norm() ({}), ".format(module.output_norm())
+ "the combined output_norm() will be ({})".format(module.output_norm())
)
return (None, NoOp())
if output_norm == "log_softmax":
if module.output_norm() is not None:
raise ValueError(
"incompatible output_norm ({}) ".format(output_norm)
+ "and module.output_norm() ({})".format(module.output_norm())
)
else:
return ("log_softmax", torch.nn.LogSoftmax(dim=-1))
if output_norm == "softmax":
if module.output_norm() is not None:
raise ValueError(
"incompatible output_norm ({}) ".format(output_norm)
+ "and module.output_norm() ({})".format(module.output_norm())
)
else:
return ("softmax", torch.nn.Softmax(dim=-1))
raise ValueError(
"output_norm ({}) not in ".format(output_norm)
+ "supported list = [None, softmax, log_softmax]"
)
# ------------------------------------------------------------------------------
# infer_channels_from_layout()
# ------------------------------------------------------------------------------
def infer_channels_from_layout(layout, channels):
"""Extract the number of channels from the layout."""
if layout in ("TBD", "BTD"):
if channels is not None and channels != 1:
raise ValueError(
"Expected channels ({}) to be 1 for layout = {}".format(
channels, layout
)
)
if channels is None:
return 1
return channels
# ------------------------------------------------------------------------------
# pad_sequence()
# ------------------------------------------------------------------------------
@torch.jit.export
def pad_sequence(
sequence: Tensor,
time_axis: int,
extra_left_context: int = 0,
extra_right_context: int = 0,
) -> Tensor:
"""Pad extra left/right contexts to the sequence."""
if extra_left_context == 0 and extra_right_context == 0:
return sequence
tensors_to_concat = []
if extra_left_context:
size = (extra_left_context,)
fill_value = 0
indices = torch.full(
size=size,
fill_value=fill_value,
dtype=torch.long,
device=sequence.device,
)
left_padding = torch.index_select(sequence, time_axis, indices)
tensors_to_concat.append(left_padding)
tensors_to_concat.append(sequence)
# NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for
# extra right contexts.
if extra_right_context:
size = list(sequence.shape)
size[time_axis] = extra_right_context
right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device)
tensors_to_concat.append(right_padding)
padded_sequence = torch.cat(tensors_to_concat, dim=time_axis)
return padded_sequence
# ------------------------------------------------------------------------------
# sequence_to_segments()
# ------------------------------------------------------------------------------
@torch.jit.export
def sequence_to_segments(
sequence: Tensor,
time_axis: int,
lengths: Tensor,
segment_size: Optional[int] = None,
extra_left_context: int = 0,
extra_right_context: int = 0,
) -> List[Tuple[Tensor, Tensor]]:
"""Breaks sequence into segments."""
sequence = pad_sequence(
sequence=sequence,
time_axis=time_axis,
extra_left_context=extra_left_context,
extra_right_context=extra_right_context,
)
lengths = lengths + extra_left_context + extra_right_context
segments: List[Tuple[Tensor, Tensor]] = []
if segment_size is None:
segments.append((sequence, lengths))
return segments
offset = 0
end = sequence.shape[time_axis]
step = segment_size
size = extra_left_context + segment_size + extra_right_context
while offset + extra_left_context + extra_right_context < end:
clamped_size = min(size, end - offset)
segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size)
indices = torch.arange(
start=offset,
end=(offset + clamped_size),
step=1,
dtype=torch.long,
device=sequence.device,
)
segment_tensor = torch.index_select(sequence, time_axis, indices)
segments.append((segment_tensor, segment_lengths))
offset = offset + step
return segments
# ------------------------------------------------------------------------------
# segments_to_sequence()
# ------------------------------------------------------------------------------
@torch.jit.export
def segments_to_sequence(
segments: List[Tuple[Tensor, Tensor]], time_axis: int
) -> Tuple[Tensor, Tensor]:
"""Concatenate segments into a full sequence."""
if len(segments) == 1:
return segments[0]
tensors_to_concat: List[Tensor] = []
lengths_to_stack: List[Tensor] = []
for tensor, lengths in segments:
tensors_to_concat.append(tensor)
lengths_to_stack.append(lengths)
sequence = torch.cat(tensors_to_concat, dim=time_axis)
lengths = torch.stack(lengths_to_stack, dim=0)
lengths = torch.sum(lengths, dim=0)
return sequence, lengths
def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
batch_first: whether to return a (B, T) tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = False for t < lengths[b] and True otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) > lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
# ------------------------------------------------------------------------------
# attention suppression
# ------------------------------------------------------------------------------
def attention_suppression(attention_weights: Tensor, scale: float):
# B, H, qlen, klen -> B, H, qlen, 1
attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1)
attention_nozeros = attention_prob.to(torch.bool)
nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True)
# For very sparse situation, we need get round about 0s
key_sum = torch.sum(attention_prob, dim=-1, keepdim=True)
# nozeros_sum should > 1
key_mean = key_sum / (nozeros_sum + 1e-8)
# std calculation
dis = (attention_prob - key_mean) * (attention_prob - key_mean)
# if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i
dis_masked = torch.where(
attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size())
)
key_var = torch.sum(dis_masked, dim=-1, keepdim=True)
key_var = key_var / (nozeros_sum - 1.0 + 1e-8)
key_std = torch.sqrt(key_var)
key_thread = key_mean - scale * key_std
# if attention_prob[i] >= key_thread, then attention_prob[i]
# , otherwise "-inf"
inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach()
inf_tensor[:] = float("-inf")
attention_weights_float = torch.where(
attention_prob < key_thread,
inf_tensor,
attention_weights.float(),
)
return attention_weights_float.type_as(attention_weights)
def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value):
return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input)
| [
"[email protected]"
] | |
259a015c82d514ec77e650e0acb5a9afebc642d2 | 8f48d12b88048e424ebb0d72ca6dfab5cf12ae0f | /0600_0999/923.py | 0da37c61c3b5051a81a5e025ce875afd0d36cebc | [] | no_license | renjieliu/leetcode | e1caf13c18a8107ed9252588b339fb76bcb1b246 | 4668b64fcb9320b6c316d8608fc61911ce43b6c7 | refs/heads/master | 2023-03-18T18:16:06.187741 | 2023-03-14T20:31:59 | 2023-03-14T20:31:59 | 128,823,819 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | class Solution:
def threeSumMulti(self, arr: 'List[int]', target: int) -> int: # O(N2LogN | N)
hmp = {}
for a in arr:
if a not in hmp:
hmp[a] = 0
hmp[a] += 1
output = 0
arr = sorted(hmp.keys())
seen = set()
fact = lambda x: 1 if x <= 1 else x * fact(x-1)
combo = lambda x, y: fact(x)//(fact(y) * fact(x-y))
for i in range(len(arr)):
for j in range(len(arr)-1, i-1, -1):
a = arr[i]
b = target-arr[i]-arr[j]
c = arr[j]
t = tuple(sorted([a, b, c]))
if b in hmp and t not in seen:
seen.add(t)
cnt = {} #how many times the number needs to be picked. for case like (2,3,3), or (3,3,3)
for x in [a, b , c]:
if x not in cnt:
cnt[x] = 0
cnt[x] += 1
curr = 1
for k, v in cnt.items():
curr *= combo(hmp[k], v)
output += curr
return output%(10**9+7)
# previous solution
# class Solution:
# def threeSumMulti(self, arr: 'List[int]', target: int) -> int:
# hmp = {}
# mod = 10**9+7
# for a in arr:
# if a not in hmp:
# hmp[a] = 0
# hmp[a]+=1
# fact = lambda x: 1 if x <= 1 else x*fact(x-1)
# combo = lambda m, n: fact(m)//(fact(n) * fact(m-n))
# sig = lambda x:'-'.join([str(_) for _ in sorted(x)])
# cnt = 0
# stk = list(hmp.keys())
# seen = set()
# for i in range(len(stk)):
# a = stk[i]
# if target == a*3 and hmp[a] >=3:
# cnt += combo(hmp[a], 3)
# seen.add(sig([a,a,a]))
# else:
# for j in range(i+1, len(stk)):
# b = stk[j]
# c = target - a-b
# if c in hmp and sig([a,b,c]) not in seen:
# if a == c:
# cnt += combo(hmp[a], 2) * combo(hmp[b], 1)
# elif b == c:
# cnt += combo(hmp[b], 2) * combo(hmp[a], 1)
# else:
# cnt += combo(hmp[a], 1) * combo(hmp[b], 1)* combo(hmp[c], 1)
# seen.add(sig([a,b,c]))
# return cnt % mod
| [
"[email protected]"
] | |
65d45ee320837f03ec2327071590305732848200 | af7a8e1fcacb1ac50ae6c8072db608ca3e80a839 | /tests/test_linearize.py | 7e5bb8ea9c414b953f498ee4f92ab10ba1a07ebb | [
"MIT"
] | permissive | wszhang/devito | 3c497af69a8420bae00b17ad8c0b9c08ca1de704 | a7dbdabe505ded73781ca06e0a1c40b4d582655d | refs/heads/master | 2023-09-06T00:33:43.694995 | 2021-10-13T07:51:29 | 2021-10-13T07:51:29 | 416,707,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,806 | py | import pytest
import numpy as np
import scipy.sparse
from devito import (Grid, Function, TimeFunction, SparseTimeFunction, Operator, Eq,
MatrixSparseTimeFunction)
from devito.ir import Expression, FindNodes
def test_basic():
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid)
u1 = TimeFunction(name='u', grid=grid)
eqn = Eq(u.forward, u + 1)
op0 = Operator(eqn)
op1 = Operator(eqn, opt=('advanced', {'linearize': True}))
# Check generated code
assert 'uL0' not in str(op0)
assert 'uL0' in str(op1)
op0.apply(time_M=10)
op1.apply(time_M=10, u=u1)
assert np.all(u.data == u1.data)
@pytest.mark.parallel(mode=[(1, 'basic'), (1, 'diag2'), (1, 'full')])
def test_mpi():
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, space_order=2)
u1 = TimeFunction(name='u', grid=grid, space_order=2)
eqn = Eq(u.forward, u.dx2 + 1.)
op0 = Operator(eqn)
op1 = Operator(eqn, opt=('advanced', {'linearize': True}))
# Check generated code
assert 'uL0' not in str(op0)
assert 'uL0' in str(op1)
op0.apply(time_M=10)
op1.apply(time_M=10, u=u1)
assert np.all(u.data == u1.data)
def test_cire():
grid = Grid(shape=(4, 4, 4))
u = TimeFunction(name='u', grid=grid, space_order=2)
u1 = TimeFunction(name='u', grid=grid, space_order=2)
eqn = Eq(u.forward, u.dy.dy + 1.)
op0 = Operator(eqn, opt=('advanced', {'cire-mingain': 0}))
op1 = Operator(eqn, opt=('advanced', {'linearize': True, 'cire-mingain': 0}))
# Check generated code
assert 'uL0' not in str(op0)
assert 'uL0' in str(op1)
op0.apply(time_M=10)
op1.apply(time_M=10, u=u1)
assert np.all(u.data == u1.data)
def test_nested_indexeds():
grid = Grid(shape=(4, 4))
t = grid.stepping_dim
x, y = grid.dimensions
f = Function(name='f', grid=grid, dtype=np.int32)
g = Function(name='g', grid=grid, dimensions=(x,), shape=(4,), dtype=np.int32)
u = TimeFunction(name='u', grid=grid, space_order=2)
u1 = TimeFunction(name='u', grid=grid, space_order=2)
eqn = Eq(u.forward, u[t, f[g[x], g[x]], y] + 1.)
op0 = Operator(eqn)
op1 = Operator(eqn, opt=('advanced', {'linearize': True}))
# Check generated code
assert 'uL0' not in str(op0)
assert 'uL0' in str(op1)
op0.apply(time_M=10)
op1.apply(time_M=10, u=u1)
assert np.all(u.data == u1.data)
def test_interpolation():
nt = 10
grid = Grid(shape=(4, 4))
src = SparseTimeFunction(name='src', grid=grid, npoint=1, nt=nt)
rec = SparseTimeFunction(name='rec', grid=grid, npoint=1, nt=nt)
u = TimeFunction(name="u", grid=grid, time_order=2)
u1 = TimeFunction(name="u", grid=grid, time_order=2)
src.data[:] = 1.
eqns = ([Eq(u.forward, u + 1)] +
src.inject(field=u.forward, expr=src) +
rec.interpolate(expr=u.forward))
op0 = Operator(eqns, opt='advanced')
op1 = Operator(eqns, opt=('advanced', {'linearize': True}))
# Check generated code
assert 'uL0' not in str(op0)
assert 'uL0' in str(op1)
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
def test_interpolation_msf():
grid = Grid(shape=(4, 4))
r = 2 # Because we interpolate across 2 neighbouring points in each dimension
nt = 10
m0 = TimeFunction(name="m0", grid=grid, space_order=0, save=nt, time_order=0)
m1 = TimeFunction(name="m1", grid=grid, space_order=0, save=nt, time_order=0)
mat = scipy.sparse.coo_matrix((0, 0), dtype=np.float32)
sf = MatrixSparseTimeFunction(name="s", grid=grid, r=r, matrix=mat, nt=nt)
eqns = sf.inject(field=m0.forward, expr=sf.dt2)
eqns += sf.inject(field=m1.forward, expr=sf.dt2)
op0 = Operator(eqns)
op1 = Operator(eqns, opt=('advanced', {'linearize': True}))
assert 'm0L0' in str(op1)
# There used to be a bug causing the jit compilation to fail because of
# the writing to `const int` variables
assert op0.cfunction
assert op1.cfunction
@pytest.mark.parallel(mode=[(1, 'diag2')])
def test_codegen_quality0():
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, space_order=2)
eqn = Eq(u.forward, u.dx2 + 1.)
op = Operator(eqn, opt=('advanced', {'linearize': True}))
assert 'uL0' in str(op)
# No useless exprs generated by linearize(), such as `int x_fsz5 = u_vec->size[1];`,
# since the linearization only has an effect in the elemental functions
exprs = FindNodes(Expression).visit(op)
assert len(exprs) == 1
# Only four access macros necessary, namely `uL0`, `aL0`, `bufL0`, `bufL1` (the
# other three obviously are _POSIX_C_SOURCE, START_TIMER, STOP_TIMER)
assert len(op._headers) == 7
exprs = FindNodes(Expression).visit(op._func_table['compute0'].root)
assert all('const int' in str(i) for i in exprs[:-1])
def test_codegen_quality1():
grid = Grid(shape=(4, 4, 4))
u = TimeFunction(name='u', grid=grid, space_order=2)
eqn = Eq(u.forward, u.dy.dy + 1.)
op = Operator(eqn, opt=('advanced', {'linearize': True, 'cire-mingain': 0}))
assert 'uL0' in str(op)
# 11 expressions in total are expected, 8 of which are for the linearized accesses
exprs = FindNodes(Expression).visit(op)
assert len(exprs) == 11
assert all('const int' in str(i) for i in exprs[:-3])
assert all('const int' not in str(i) for i in exprs[-3:])
# Only two access macros necessary, namely `uL0` and `r1L0` (the other five
# obviously are _POSIX_C_SOURCE, MIN, MAX, START_TIMER, STOP_TIMER)
assert len(op._headers) == 7
def test_pow():
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, space_order=2)
eqn = Eq(u.forward, 1./(u*u) + 1.)
op = Operator(eqn, opt=('advanced', {'linearize': True}))
# Make sure linearize() doesn't cause `a*a` -> `Pow(a, 2)`
assert 'uL0' in str(op)
expr = FindNodes(Expression).visit(op)[-1].expr
assert expr.rhs.is_Add
assert expr.rhs.args[1].is_Pow
assert expr.rhs.args[1].args[0].is_Mul
assert expr.rhs.args[1].args[1] == -1
def test_different_halos():
grid = Grid(shape=(8, 8, 8))
f = Function(name='f', grid=grid, space_order=8)
g = Function(name='g', grid=grid, space_order=16)
u = TimeFunction(name='u', grid=grid, space_order=12)
u1 = TimeFunction(name='u', grid=grid, space_order=12)
f.data[:] = 1.
g.data[:] = 2.
eqn = Eq(u.forward, u + f + g + 1)
op0 = Operator(eqn)
op1 = Operator(eqn, opt=('advanced', {'linearize': True}))
# Check generated code
assert 'uL0' not in str(op0)
assert 'uL0' in str(op1)
op0.apply(time_M=4)
op1.apply(time_M=4, u=u1)
assert np.all(u.data == u1.data)
| [
"[email protected]"
] | |
9934aca3276aec7f5db8547b6452a74f26e0922c | 54bb9ba6d507cd25b2c2ac553665bc5fc95280d1 | /tests/onegov/gazette/test_views_categories.py | f4a276ee54ff21d4de1bd1636e5d63f25c6af2d0 | [
"MIT"
] | permissive | href/onegov-cloud | 9ff736d968979380edba266b6eba0e9096438397 | bb292e8e0fb60fd1cd4e11b0196fbeff1a66e079 | refs/heads/master | 2020-12-22T07:59:13.691431 | 2020-01-28T08:51:54 | 2020-01-28T08:51:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,759 | py | from freezegun import freeze_time
from tests.onegov.gazette.common import login_editor_1
from tests.onegov.gazette.common import login_publisher
from pyquery import PyQuery as pq
from webtest import TestApp as Client
from xlrd import open_workbook
def test_view_categories(gazette_app):
with freeze_time("2017-10-20 12:00"):
client = Client(gazette_app)
login_publisher(client)
# Test data:
# 10 / Complaints / inactive
# 11 / Education / active
# 12 / Submissions / active
# 13 / Commercial Register / active
# 14 / Elections / active
# add a category
manage = client.get('/categories')
manage = manage.click('Neu')
manage.form['title'] = 'Rubrik XY'
manage.form['active'] = True
manage = manage.form.submit().maybe_follow()
assert 'Rubrik hinzugefügt.' in manage
assert 'Rubrik XY' in manage
categories = [
[
''.join((td.text_content(), td.attrib['class']))
for td in pq(tr)('td')[:1]
][0]
for tr in manage.pyquery('table.categories tbody tr')
]
assert categories == [
'Commercial Register (13)',
'Complaints (10)inactive',
'Education (11)',
'Elections (14)',
'Rubrik XY (15)',
'Submissions (12)'
]
# use the first category in a notice
manage = client.get('/notices/drafted/new-notice')
manage.form['title'] = 'Titel'
manage.form['organization'] = '200'
manage.form['category'] = '13'
manage.form['issues'] = ['2017-44']
manage.form['text'] = 'Text'
manage.form['author_place'] = 'Govikon'
manage.form['author_name'] = 'State Chancellerist'
manage.form['author_date'] = '2019-01-01'
manage = manage.form.submit().maybe_follow()
assert '<h2>Titel</h2>' in manage
assert 'Commercial Register' in manage
# edit the first category
manage = client.get('/categories')
manage = manage.click('Bearbeiten', index=0)
manage.form['title'] = 'Rubrik Z'
manage.form['active'] = False
manage = manage.form.submit().maybe_follow()
assert 'Rubrik geändert.' in manage
assert 'Commercial Register' not in manage
categories = [
[
''.join((td.text_content(), td.attrib['class']))
for td in pq(tr)('td')[:1]
][0]
for tr in manage.pyquery('table.categories tbody tr')
]
assert categories == [
'Complaints (10)inactive',
'Education (11)',
'Elections (14)',
'Rubrik XY (15)',
'Rubrik Z (13)inactive',
'Submissions (12)'
]
# check if the notice has been updated
manage = client.get('/notice/titel')
assert 'Commercial Register' not in manage
assert 'Rubrik Z' in manage
# delete all but one (unused) categories
manage = client.get('/categories')
manage.click('Löschen', index=0).form.submit()
manage.click('Löschen', index=1).form.submit()
manage.click('Löschen', index=2).form.submit()
manage.click('Löschen', index=3).form.submit()
manage.click('Löschen', index=5).form.submit()
manage = client.get('/categories')
assert 'Complaints' not in manage
assert 'Education' not in manage
assert 'Elections' not in manage
assert 'Rubrik XY' not in manage
assert 'Rubrik Z' in manage
assert 'Submissions' not in manage
# Try to delete the used category
manage = client.get('/categories')
manage = manage.click('Löschen')
assert 'Es können nur unbenutzte Rubriken gelöscht werden.' in manage
assert not manage.forms
def test_view_categories_permissions(gazette_app):
client = Client(gazette_app)
login_publisher(client)
manage = client.get('/categories').click('Neu')
manage.form['title'] = 'XY'
manage = manage.form.submit().maybe_follow()
edit_link = manage.click('Bearbeiten', index=0).request.url
delete_link = manage.click('Löschen', index=0).request.url
login_editor_1(client)
client.get('/categories', status=403)
client.get(edit_link, status=403)
client.get(delete_link, status=403)
def test_view_categories_export(gazette_app):
client = Client(gazette_app)
client.get('/categories/export', status=403)
login_editor_1(client)
client.get('/categories/export', status=403)
login_publisher(client)
response = client.get('/categories/export')
book = open_workbook(file_contents=response.body)
assert book.nsheets == 1
sheet = book.sheets()[0]
assert sheet.ncols == 4
assert sheet.nrows == 6
assert sheet.cell(0, 0).value == 'ID'
assert sheet.cell(0, 1).value == 'Name'
assert sheet.cell(0, 2).value == 'Titel'
assert sheet.cell(0, 3).value == 'Aktiv'
assert sheet.cell(1, 1).value == '13'
assert sheet.cell(1, 2).value == 'Commercial Register'
assert sheet.cell(1, 3).value == 1
assert sheet.cell(2, 1).value == '10'
assert sheet.cell(2, 2).value == 'Complaints'
assert sheet.cell(2, 3).value == 0
assert sheet.cell(3, 1).value == '11'
assert sheet.cell(3, 2).value == 'Education'
assert sheet.cell(3, 3).value == 1
assert sheet.cell(4, 1).value == '14'
assert sheet.cell(4, 2).value == 'Elections'
assert sheet.cell(4, 3).value == 1
assert sheet.cell(5, 1).value == '12'
assert sheet.cell(5, 2).value == 'Submissions'
assert sheet.cell(5, 3).value == 1
| [
"[email protected]"
] | |
4219927749366d39a842c23f600cc6771f74d51a | 3784495ba55d26e22302a803861c4ba197fd82c7 | /venv/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/canned/linear_optimizer/python/utils/sharded_mutable_dense_hashtable.py | b73d3f8878dd4a6f649ad6783b7a54e4bc242a91 | [
"MIT"
] | permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 14,573 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sharded mutable dense hash table."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from six.moves import range
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training.saver import BaseSaverBuilder
class _MutableDenseHashTable(lookup_ops.LookupInterface):
"""Copy of tf.contrib.lookup.MutableDenseHashTable."""
# TODO(b/118148303): Swap this with the core version
def __init__(self,
key_dtype,
value_dtype,
default_value,
empty_key,
deleted_key,
initial_num_buckets=None,
shared_name=None,
name="MutableDenseHashTable",
checkpoint=True):
"""Creates an empty `_MutableDenseHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
empty_key: the key to use to represent empty buckets internally. Must not
be used in insert, remove or lookup operations.
deleted_key: the key to use to represent deleted buckets internally. Must
not be used in insert, remove or lookup operations and be different from
the empty_key.
initial_num_buckets: the initial number of buckets.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `_MutableDenseHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(
default_value, dtype=value_dtype, name="default_value")
self._key_dtype = key_dtype
self._value_dtype = value_dtype
self._initial_num_buckets = initial_num_buckets
self._value_shape = self._default_value.get_shape()
self._checkpoint = checkpoint
self._name = name
self._empty_key = ops.convert_to_tensor(
empty_key, dtype=key_dtype, name="empty_key")
self._deleted_key = ops.convert_to_tensor(
deleted_key, dtype=key_dtype, name="deleted_key")
if context.executing_eagerly() and shared_name is None:
# TODO(allenl): This will leak memory due to kernel caching by the
# shared_name attribute value (but is better than the alternative of
# sharing everything by default when executing eagerly; hopefully creating
# tables in a loop is uncommon).
shared_name = "table_%d" % (ops.uid(),)
self._shared_name = shared_name
super(_MutableDenseHashTable, self).__init__(key_dtype, value_dtype)
self._resource_handle = self._create_resource()
if checkpoint:
saveable = _MutableDenseHashTable._Saveable(self, name)
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def _create_resource(self):
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = self._checkpoint and self._shared_name is None
table_ref = gen_lookup_ops.mutable_dense_hash_table_v2(
empty_key=self._empty_key,
deleted_key=self._deleted_key,
shared_name=self._shared_name,
use_node_name_sharing=use_node_name_sharing,
value_dtype=self._value_dtype,
value_shape=self._value_shape,
initial_num_buckets=self._initial_num_buckets,
name=self._name)
if context.executing_eagerly():
self._table_name = None
else:
self._table_name = table_ref.op.name.split("/")[-1]
return table_ref
@property
def name(self):
return self._table_name
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self.name,
[self.resource_handle]) as name:
with ops.colocate_with(self.resource_handle):
return gen_lookup_ops.lookup_table_size_v2(
self.resource_handle, name=name)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
with ops.name_scope(name, "%s_lookup_table_find" % self.name,
[self.resource_handle, keys]) as name:
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys")
with ops.colocate_with(self.resource_handle):
values = gen_lookup_ops.lookup_table_find_v2(
self.resource_handle, keys, self._default_value, name=name)
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the
table's key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
with ops.name_scope(name, "%s_lookup_table_insert" % self.name,
[self.resource_handle, keys, values]) as name:
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype, name="keys")
values = ops.convert_to_tensor(
values, dtype=self._value_dtype, name="values")
with ops.colocate_with(self.resource_handle):
op = gen_lookup_ops.lookup_table_insert_v2(
self.resource_handle, keys, values, name=name)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self.name,
[self.resource_handle]) as name:
with ops.colocate_with(self.resource_handle):
exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(
self.resource_handle, self._key_dtype, self._value_dtype, name=name)
return exported_keys, exported_values
def _gather_saveables_for_checkpoint(self):
"""For object-based checkpointing."""
return {"table": functools.partial(
_MutableDenseHashTable._Saveable, table=self)}
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for _MutableDenseHashTable."""
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
# pylint: disable=protected-access
super(_MutableDenseHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, restored_shapes):
del restored_shapes # unused
# pylint: disable=protected-access
with ops.colocate_with(self.op.resource_handle):
return gen_lookup_ops.lookup_table_import_v2(
self.op.resource_handle, restored_tensors[0], restored_tensors[1])
# TODO(rohanj): This should subclass Checkpointable and implement
# _gather_saveables_for_checkpoint.
class _ShardedMutableDenseHashTable(object):
"""A sharded version of _MutableDenseHashTable.
It is designed to be interface compatible with LookupInterface and
MutableDenseHashTable, with the exception of the export method, which is
replaced by an export_sharded method.
The _ShardedMutableDenseHashTable keeps `num_shards` _MutableDenseHashTable
internally. The shard is computed via the modulo operation on the key.
"""
def __init__(self,
key_dtype,
value_dtype,
default_value,
empty_key,
deleted_key,
num_shards=1,
checkpoint=True,
name="ShardedMutableHashTable"):
self._key_dtype = key_dtype
self._value_dtype = value_dtype
with ops.name_scope(name, "sharded_mutable_hash_table") as scope:
table_shards = []
for i in range(num_shards):
self._table_name = scope
table_shards.append(
_MutableDenseHashTable(
key_dtype=key_dtype,
value_dtype=value_dtype,
default_value=default_value,
empty_key=empty_key,
deleted_key=deleted_key,
checkpoint=checkpoint,
name="%s-%d-of-%d" % (name, i + 1, num_shards)))
self._table_shards = table_shards
# TODO(andreasst): add a value_shape() method to LookupInterface
# pylint: disable=protected-access
self._value_shape = self._table_shards[0]._value_shape
# pylint: enable=protected-access
@property
def name(self):
return self._table_name
@property
def _num_shards(self):
return len(self._table_shards)
@property
def table_shards(self):
return self._table_shards
def size(self, name=None):
with ops.name_scope(name, "sharded_mutable_hash_table_size"):
sizes = [
self._table_shards[i].size() for i in range(self._num_shards)
]
return math_ops.add_n(sizes)
def _shard_indices(self, keys):
key_shape = keys.get_shape()
if key_shape.ndims > 1:
# If keys are a matrix (i.e. a single key is a vector), we use the first
# element of each key vector to determine the shard.
keys = array_ops.reshape(array_ops.slice(keys, [0, 0], [-1, 1]), [-1])
indices = math_ops.mod(math_ops.abs(keys), self._num_shards)
return math_ops.cast(indices, dtypes.int32)
def _check_keys(self, keys):
if keys.get_shape().ndims != 1 and keys.get_shape().ndims != 2:
raise ValueError("Expected a vector or matrix for keys, got %s." %
keys.get_shape())
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
if keys.dtype.base_dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
self._check_keys(keys)
num_shards = self._num_shards
if num_shards == 1:
return self._table_shards[0].lookup(keys, name=name)
shard_indices = self._shard_indices(keys)
key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
num_shards)
value_shards = [
self._table_shards[i].lookup(key_shards[i], name=name)
for i in range(num_shards)
]
num_keys = array_ops.shape(keys)[0]
original_indices = math_ops.range(num_keys)
partitioned_indices = data_flow_ops.dynamic_partition(original_indices,
shard_indices,
num_shards)
return data_flow_ops.dynamic_stitch(partitioned_indices, value_shards)
def insert(self, keys, values, name=None):
"""Inserts `keys` in a table."""
self._check_keys(keys)
num_shards = self._num_shards
if num_shards == 1:
return self._table_shards[0].insert(keys, values, name=name)
shard_indices = self._shard_indices(keys)
key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
num_shards)
value_shards = data_flow_ops.dynamic_partition(values, shard_indices,
num_shards)
return_values = [
self._table_shards[i].insert(key_shards[i], value_shards[i], name=name)
for i in range(num_shards)
]
return control_flow_ops.group(*return_values)
def export_sharded(self, name=None):
"""Returns lists of the keys and values tensors in the sharded table.
Args:
name: name of the table.
Returns:
A pair of lists with the first list containing the key tensors and the
second list containing the value tensors from each shard.
"""
keys_list = []
values_list = []
for table_shard in self._table_shards:
exported_keys, exported_values = table_shard.export(name=name)
keys_list.append(exported_keys)
values_list.append(exported_values)
return keys_list, values_list
| [
"[email protected]"
] | |
8106dce1761f9380a357a6fea81f564b15eecbf6 | 82d588161a8f8cd27c3031c779120ea4380791b9 | /minjoo/Codility/CountDiv.py | 3da57e4f57e8f12ec1b46b4f62a80179f7909600 | [] | no_license | Yejin6911/Algorithm_Study | 3aa02a7d07169382a78c049d1de8251a52da816c | 98c968bfeed17ab6b62e3a077280e0310f08190a | refs/heads/master | 2023-09-01T00:31:07.212413 | 2021-10-24T07:56:21 | 2021-10-24T07:56:21 | 345,009,057 | 1 | 1 | null | 2021-09-20T13:08:33 | 2021-03-06T04:57:34 | Python | UTF-8 | Python | false | false | 131 | py | import math
def solution(A, B, K):
aq = A / K
bq = B / K
a = math.ceil(aq)
b = math.floor(bq)
return b - a + 1 | [
"[email protected]"
] | |
da37b96591b53f54d4027e1195cb8b282365e5e6 | 0f0484e60c4bffca000a41ec79d9bfe340f306dd | /mainclean.py | 7ae80fac601fb3115d4f86177d63d9182d66a454 | [] | no_license | drewlinsley/girik_tracker | a0e40d6baa3ac6ebb8a03ae75aefa157c21c2579 | 1f533637dc1d1c9b95f66a5642ef36fe0a985b9c | refs/heads/main | 2023-05-25T17:28:59.249668 | 2021-01-18T19:24:57 | 2021-01-18T19:24:57 | 330,765,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,971 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 21 14:04:57 2019
"""
import os
import time
import torch
from torchvision.transforms import Compose as transcompose
import torch.nn.parallel
import torch.optim
import numpy as np
# from utils.dataset import DataSetSeg
from utils.TFRDataset import tfr_data_loader
from models.hgrucleanSEG import hConvGRU, hConvGRUallSUP, hConvGRUtrunc
from models.convlstm import ConvLSTM
from models.FFnet import FFConvNet
from models.ffhgru import FFhGRU, FFhGRUwithGabor, FFhGRUwithoutGaussian, FFhGRUdown
from models.ffhgru3d import FFhGRU3D
from models.ffstlstm import FFSTLSTM
from models.fflstm import FFLSTM
from models.lrcn_style import LRCNStyle
from models.lrcn_style_last_readout import LRCNStyleLast
from utils.transforms import GroupScale, Augmentation, Stack, ToTorchFormatTensor
from utils.misc_functions import AverageMeter, FocalLoss, acc_scores, save_checkpoint
from statistics import mean
from utils.opts import parser
import matplotlib
# import imageio
from torch._six import inf
matplotlib.use('Agg')
# a=tfr_data_loader("/media/data_cifs_lrs/projects/prj_tracking/fixed_optic_flow_tfrecords_constrained_red_blue_datasets/14/train-*")
# a=tfr_data_loader(data_dir="/media/data_cifs_lrs/projects/prj_tracking/fixed_optic_flow_tfrecords_constrained_red_blue_datasets/14/train-of-batch_0-train-batch_0--00038-of-00040", batch_size=32)
# exit()
# import pdb; pdb.set_trace()
torch.backends.cudnn.benchmark = True
global best_prec1
best_prec1 = 0
args = parser.parse_args()
transform_list = transcompose([GroupScale((150, 150)), Augmentation(), Stack(), ToTorchFormatTensor(div=True)])
# pf_root = '/users/akarkada'
#pf_root = '/gpfs/data/tserre/data/lgovinda/'
# pf_root = '/media/data_cifs_lrs/projects/prj_tracking/fixed_optic_flow_tfrecords_constrained_red_blue_datasets/14/'
# pf_root = '/media/data_cifs/projects/prj_tracking/downsampled_constrained_red_blue_datasets_16_32_32/14_dist/tfrecords/'
# pf_root = '/media/data_cifs/projects/prj_tracking/downsampled_constrained_red_blue_datasets_32_32_32/14_dist/tfrecords/'
pf_root = '/media/data_cifs/projects/prj_tracking/downsampled_constrained_red_blue_datasets_64_32_32/14_dist/tfrecords/'
# pf_root = '/media/data_cifs/projects/prj_tracking/downsampled_constrained_red_blue_datasets_128_16_16/25_dist/tfrecords/'
# pf_root = '/media/data_cifs/projects/prj_tracking/girik/constrained_red_blue_dataset_all/14_dist/tfrecords/'
print("Loading training dataset")
# train_loader = torch.utils.data.DataLoader(DataSetSeg(pf_root, args.train_list, transform=transform_list),
# batch_size=args.batch_size, shuffle=True, num_workers=8,
# pin_memory=True, drop_last=True)
# train_loader = tfr_data_loader(data_dir=pf_root+'train-of-batch_0-train-batch_0--00000-of-00040', batch_size=args.batch_size, drop_remainder=True)
train_loader = tfr_data_loader(data_dir=pf_root+'train-*', batch_size=args.batch_size, drop_remainder=True)
print("Loading validation dataset")
# val_loader = torch.utils.data.DataLoader(DataSetSeg(pf_root, args.val_list, transform=transform_list),
# batch_size=args.batch_size, shuffle=False, num_workers=4,
# pin_memory=False, drop_last=True)
val_loader = tfr_data_loader(data_dir=pf_root+'test-*', batch_size=args.batch_size, drop_remainder=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
len_train_loader = 20000
len_val_loader = 20000
def validate(val_loader, model, criterion, device, logiters=None):
batch_timev = AverageMeter()
lossesv = AverageMeter()
top1v = AverageMeter()
precisionv = AverageMeter()
recallv = AverageMeter()
f1scorev = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (imgs, target) in enumerate(val_loader):
# Get into pytorch format
imgs = torch.from_numpy(imgs.numpy())
import pdb;pdb.set_trace()
imgs = imgs.permute(0,4,1,2,3)
target = torch.from_numpy(np.vectorize(ord)(target.numpy()))
imgs = imgs.to(device, dtype=torch.float)
target = target.to(device, dtype=torch.float)
# Convert imgs to 1-channel
imgs = imgs.mean(1, keepdim=True)
imgs = imgs / 255. # Normalize to [0, 1]
# target = target.cuda()
# target = (target > 0.2).squeeze().long()
# imgs = imgs.cuda()
output, gt2, loss = model.forward(imgs, 0, 0, target, criterion)
loss = loss.mean()
prec1, preci, rec, f1s = acc_scores(target, output.data)
lossesv.update(loss.data.item(), 1)
top1v.update(prec1.item(), 1)
precisionv.update(preci.item(), 1)
recallv.update(rec.item(), 1)
f1scorev.update(f1s.item(), 1)
batch_timev.update(time.time() - end)
end = time.time()
# if (i % args.print_freq == 0 or (i == len(val_loader) - 1)) and logiters is None:
if (i % args.print_freq == 0 or (i == len_val_loader - 1)) and logiters is None:
print_string = 'Test: [{0}/{1}]\t Time: {batch_time.avg:.3f}\t Loss: {loss.val:.8f} ({loss.avg: .8f})\t'\
'Bal_acc: {balacc:.8f} preci: {preci.val:.5f} ({preci.avg:.5f}) rec: {rec.val:.5f}'\
'({rec.avg:.5f}) f1: {f1s.val:.5f} ({f1s.avg:.5f})'\
.format(i, len_val_loader, batch_time=batch_timev, loss=lossesv, balacc=top1v.avg,
preci=precisionv, rec=recallv, f1s=f1scorev)
print(print_string)
with open(results_folder + args.name + '.txt', 'a+') as log_file:
log_file.write(print_string + '\n')
elif logiters is not None:
if i > logiters:
break
model.train()
return top1v.avg, precisionv.avg, recallv.avg, f1scorev.avg, lossesv.avg
def save_npz(epoch, log_dict, results_folder, savename='train'):
with open(results_folder + savename + '.npz', 'wb') as f:
np.savez(f, **log_dict)
if __name__ == '__main__':
results_folder = 'results/{0}/'.format(args.name)
# os.mkdir(results_folder)
os.makedirs(results_folder, exist_ok=True)
exp_logging = args.log
jacobian_penalty = args.penalty
timesteps = 16
if args.model == 'hgru':
print("Init model hgru ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = hConvGRU(timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'ffhgru':
print("Init model ffhgru ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = FFhGRU(batch_size=args.batch_size, timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'ffhgru3d':
print("Init model ffhgru ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = FFhGRU3D(timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'ffhgrudown':
print("Init model ffhgru with downsampled input ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = FFhGRUdown(batch_size=args.batch_size, timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'ffhgrunogaussian':
print("Init model ffhgru without Gaussian ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = FFhGRUwithoutGaussian(batch_size=args.batch_size, timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'ffhgrugabor':
print("Init model ffhgru with gabor init ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = FFhGRUwithGabor(timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'ffstlstm':
print("Init model ffstlstm ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = FFSTLSTM(timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'fflstm':
print("Init model fflstm ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = FFLSTM(timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'lrcn_style':
print("Init LRCN Style model ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = LRCNStyle(batch_size=args.batch_size, timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'lrcn_style_last_readout':
print("Init LRCN Style model ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = LRCNStyleLast(batch_size=args.batch_size, timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'clstm':
print("Init model clstm ", args.algo, 'penalty: ', args.penalty, 'steps: ', timesteps)
model = ConvLSTM(timesteps=timesteps, filt_size=15, num_iter=15, exp_name=args.name, jacobian_penalty=jacobian_penalty,
grad_method=args.algo)
elif args.model == 'ff':
print("Init model feedforw ", args.algo)
model = FFConvNet(filt_size=15)
else:
print('Model not found')
print(sum([p.numel() for p in model.parameters() if p.requires_grad]))
if args.parallel is True:
model = torch.nn.DataParallel(model).to(device)
print("Loading parallel finished on GPU count:", torch.cuda.device_count())
else:
model = model.to(device)
print("Loading finished")
# criterion = FocalLoss(gamma=2).to(device)
# criterion = torch.nn.BCELoss().to(device)
criterion = torch.nn.BCEWithLogitsLoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 3, gamma=0.7)
lr_init = args.lr
val_log_dict = {'loss': [], 'balacc': [], 'precision': [], 'recall': [], 'f1score': []}
train_log_dict = {'loss': [], 'balacc': [], 'precision': [], 'recall': [], 'f1score': [], 'jvpen': [], 'scaled_loss': []}
exp_loss = None
scale = torch.Tensor([1.0]).to(device)
for epoch in range(args.start_epoch, args.epochs):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
precision = AverageMeter()
recall = AverageMeter()
f1score = AverageMeter()
time_since_last = time.time()
model.train()
end = time.perf_counter()
for i, (imgs, target) in enumerate(train_loader):
# import pdb; pdb.set_trace()
data_time.update(time.perf_counter() - end)
# Get into pytorch format
imgs = torch.from_numpy(imgs.numpy())
imgs = imgs.permute(0,4,1,2,3)
target = torch.from_numpy(np.vectorize(ord)(target.numpy()))
imgs = imgs.to(device, dtype=torch.float)
target = target.to(device, dtype=torch.float)
# Convert imgs to 1-channel
imgs = imgs.mean(1, keepdim=True)
imgs = imgs / 255. # Normalize to [0, 1]
# Run training
output, jv_penalty, loss = model.forward(imgs, epoch, i, target, criterion)
loss = loss.mean()
losses.update(loss.data.item(), 1)
jv_penalty = jv_penalty.mean()
train_log_dict['jvpen'].append(jv_penalty.item())
if jacobian_penalty:
loss = loss + jv_penalty * 1e1
prec1, preci, rec, f1s = acc_scores(target[:], output.data[:])
top1.update(prec1.item(), 1)
precision.update(preci.item(), 1)
recall.update(rec.item(), 1)
f1score.update(f1s.item(), 1)
loss.backward()
optimizer.step()
optimizer.zero_grad()
batch_time.update(time.perf_counter() - end)
end = time.perf_counter()
if exp_logging and i % 20 == 0:
accv, precv, recv, f1sv, losv = validate(val_loader, model, criterion, device, logiters=3)
print('val f', f1sv)
val_log_dict['loss'].append(losv)
val_log_dict['balacc'].append(accv)
val_log_dict['precision'].append(precv)
val_log_dict['recall'].append(recv)
val_log_dict['f1score'].append(f1sv)
if i % (args.print_freq) == 0:
time_now = time.time()
print_string = 'Epoch: [{0}][{1}/{2}] lr: {lr:g} Time: {batch_time.val:.3f} (itavg:{timeiteravg:.3f}) '\
'({batch_time.avg:.3f}) Data: {data_time.val:.3f} ({data_time.avg:.3f}) ' \
'Loss: {loss.val:.8f} ({lossprint:.8f}) ({loss.avg:.8f}) bal_acc: {top1.val:.5f} '\
'({top1.avg:.5f}) preci: {preci.val:.5f} ({preci.avg:.5f}) rec: {rec.val:.5f} '\
'({rec.avg:.5f}) f1: {f1s.val:.5f} ({f1s.avg:.5f}) jvpen: {jpena:.12f} {timeprint:.3f} losscale:{losscale:.5f}'\
.format(epoch, i, len_train_loader, batch_time=batch_time, data_time=data_time, loss=losses,
lossprint=mean(losses.history[-args.print_freq:]), lr=optimizer.param_groups[0]['lr'],
top1=top1, timeiteravg=mean(batch_time.history[-args.print_freq:]),
timeprint=time_now - time_since_last, preci=precision, rec=recall,
f1s=f1score, jpena=jv_penalty.item(), losscale=scale.item())
print(print_string)
time_since_last = time_now
with open(results_folder + args.name + '.txt', 'a+') as log_file:
log_file.write(print_string + '\n')
#lr_scheduler.step()
train_log_dict['loss'].extend(losses.history)
train_log_dict['balacc'].extend(top1.history)
train_log_dict['precision'].extend(precision.history)
train_log_dict['recall'].extend(recall.history)
train_log_dict['f1score'].extend(f1score.history)
save_npz(epoch, train_log_dict, results_folder, 'train')
save_npz(epoch, val_log_dict, results_folder, 'val')
if (epoch + 1) % 1 == 0 or epoch == args.epochs - 1:
_, _, _, f1va, _ = validate(val_loader, model, criterion, device)
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'best_prec1': f1va}, True, results_folder)
| [
"[email protected]"
] | |
a8585cd8a0146d0882917a004252adfda2b8a5ce | 46853d317dcb7784dc84504c86cb0719a4fe471b | /project/settings.py | 5601662d65f43cacad8db8f4973817faa03c7486 | [
"MIT"
] | permissive | naritotakizawa/django-inlineformset-sample | f9d41a2f4fee5db7f0b769ddb9696ae773aae61b | f4a3e4420b20ab677cb535f7778c48dbc32ea70b | refs/heads/master | 2020-05-05T13:12:34.783845 | 2019-04-08T04:12:39 | 2019-04-08T04:12:39 | 180,066,167 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,186 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g8w(n%puz6vux51au4zic+i*-xg-f8w8-3h*cr1@992s(b%!qs'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app.apps.AppConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"[email protected]"
] | |
bcf66c3d94df586464daf88e9c71a471c5b330e1 | b011e7ef91fc42265a7eec23e606666b91656caf | /EmployeeApplicationProject/EmployeeApplicationApp/apps.py | 0048d61da207f875ed1a8f0700a06c3e7e5d97ff | [
"Apache-2.0"
] | permissive | cs-fullstack-2019-spring/django-formclassv2-cw-bettyjware11 | baf683ac37408c87406796d0a049cb64e0077161 | f3c5bab69044ef4d783c784dbf6d2672b50445ba | refs/heads/master | 2020-04-25T17:28:48.717233 | 2019-03-01T20:06:56 | 2019-03-01T20:06:56 | 172,949,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from django.apps import AppConfig
class EmployeeapplicationappConfig(AppConfig):
name = 'EmployeeApplicationApp'
| [
"[email protected]"
] | |
1dbb63f19b1ee2220310935ef14589378a92a1d4 | be9f862686ac12926d10b6bae4673da68460dfad | /constants.py | 4cfd552fc1773c7e91676a3667e6b0203ada9367 | [] | no_license | SHAKOTN/hopper | c95da58b11dc81bbb28196946b13f5e9e709621f | 536e7dc7ed695e6b1d5e7ba4f958f4b273668544 | refs/heads/main | 2023-02-11T11:05:08.777006 | 2021-01-08T20:07:02 | 2021-01-08T20:07:02 | 324,402,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from enum import Enum
class WaysToTravel(Enum):
BY_SEA = "by-sea"
AIRBORNE = "airborne"
| [
"[email protected]"
] | |
be1600bc1975659320667879170d3beef2f42c17 | 3b3585bb12becfe72af03814cec645b0c8e6c779 | /satchmo/product/factories.py | c455d7c2e2cd3167f6f5ae085bd3097dcc955e76 | [
"BSD-2-Clause"
] | permissive | juderino/jelly-roll | aac548073487511c5b935d9fb20c5a995c665b9b | ccac91bf3aab06fec4f83a7f9eabfa22d41b922a | refs/heads/master | 2021-01-18T21:04:15.232998 | 2015-07-21T20:35:26 | 2015-07-21T20:35:26 | 36,597,803 | 0 | 0 | null | 2015-05-31T10:16:21 | 2015-05-31T10:16:21 | null | UTF-8 | Python | false | false | 1,062 | py | import factory
from decimal import Decimal
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from satchmo.product.models import (
ConfigurableProduct,
Product,
Price,
)
class ProductFactory(factory.django.DjangoModelFactory):
class Meta:
model = Product
site = factory.LazyAttribute(lambda a: Site.objects.get_current())
name = factory.Sequence(lambda n: 'Product {0}'.format(n))
slug = factory.LazyAttribute(lambda a: slugify(a.name))
@factory.post_generation
def create_price(obj, create, extracted, **kwargs):
PriceFactory(product=obj)
class TaxableProductFactory(ProductFactory):
taxable = True
class ConfigurableProductFactory(factory.django.DjangoModelFactory):
class Meta:
model = ConfigurableProduct
product = factory.SubFactory(ProductFactory)
class PriceFactory(factory.django.DjangoModelFactory):
class Meta:
model = Price
product = factory.SubFactory(TaxableProductFactory)
price = Decimal("5.00")
| [
"[email protected]"
] | |
a26207237f873a85d24db735c0e883f1fc52fa4a | 3e8234adc26292085a78418d8475ca5fe83ef92a | /jupyterhub/sample_configs/cilogon/00-preamble.py | 644223ca6b98635583d6caeec1e984103bd820af | [
"MIT"
] | permissive | womullan/jupyterlabdemo | 5cef774de875606df5b2d759f1da9c1e766b5195 | e8da8920627d32d2d6fa60e2083a88a630ad5209 | refs/heads/master | 2021-08-14T06:59:29.128152 | 2017-11-13T21:34:23 | 2017-11-13T21:34:23 | 110,708,695 | 0 | 0 | null | 2017-11-14T15:32:30 | 2017-11-14T15:32:30 | null | UTF-8 | Python | false | false | 571 | py | """
This is the JupyterHub configuration directory that LSST DM-SQuaRE uses.
Different subconfiguration files in this directory do different things.
The major components are the options form, the spawner, the authenticator,
and the JupyterHub environment.
These files are mapped into the JupyterHub configuration as a ConfigMap.
Feel free to edit them to suit your needs.
The location is specified in the deployment file
/opt/lsst/software/jupyterhub/config/jupyterhub_config.py
and the contents of
/opt/lsst/software/jupyterhub/config/jupyterhub_config.d
"""
| [
"[email protected]"
] | |
fe0c2abe2824f50000d7ed13b9919502d8a2cff1 | a331ac86bf0dc281b1b819f70110deb873833698 | /python/python-cookbook/web.py | 421d3226809df35308d9acda67c1f97259eb17d1 | [] | no_license | sunhuachuang/study-demo | f0c2bbaca78a6735442039a33a051a8b715f8490 | 822dfec043d53678c62f5dce407477f9fdd42873 | refs/heads/master | 2020-07-22T06:16:00.361964 | 2018-01-08T09:50:50 | 2018-01-08T09:50:50 | 66,520,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,511 | py | from urllib import request, parse
import requests
from socketserver import BaseRequestHandler, StreamRequestHandler, ThreadingTCPServer, TCPServer, UDPServer
#from socket import socket, AF_INET, SOCK_STREAM
import time
import ssl
import multiprocessing
# 13 发送和接受大型数组 memoryviews
def send_from(arr, dest):
view = memoryview(arr).cast('B')
while len(view):
nsent = dest.send(view)
view = view[nsent:]
def recv_into(arr, source):
view = memoryview(arr).cast('B')
while len(view):
nrecv = source.recv_into(view)
view = view[nrecv:]
# test
# server
from socket import *
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', 25000))
s.listen(1)
c, a = s.accept()
import numpy
a = numpy.arange(0.0, 50000000.0)
send_from(a, c)
# client
from socket import *
c = socket(AF_INET, SOCK_STREAM)
c.connect(('localhost', 25000))
import numpy
a = numpy.zeros(shape=50000000, dtype=float)
a[0:10]
recv_into(a, c)
a[0:10]
# 12 理解事件驱动的 IO
class EventHandler:
def fileno(self):
'Return the associated file descriptor'
raise NotImplemented('must implement')
def wants_to_receive(self):
'Return True if receiving is allowed'
return False
def handle_receive(self):
'Perform the receive operation'
pass
def wants_to_send(self):
'Return True if sending is requested'
return False
def handle_send(self):
'Send outgoing data'
pass
import select
def event_loop(handlers):
while True:
wants_recv = [h for h in handlers if h.wants_to_receive()]
wants_send = [h for h in handlers if h.wants_to_send()]
can_recv, can_send, _ = select.select(wants_recv, wants_send, [])
for h in can_recv:
h.handle_receive()
for h in can_send:
h.handle_send()
import socket
import time
class UDPServer(EventHandler):
def __init__(self, address):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(address)
def fileno(self):
return self.sock.fileno()
def wants_to_receive(self):
return True
class UDPTimeServer(UDPServer):
def handle_receive(self):
msg, addr = self.sock.recvfrom(1)
self.sock.sendto(time.ctime().encode('ascii'), addr)
class UDPEchoServer(UDPServer):
def handle_receive(self):
msg, addr = self.sock.recvfrom(8192)
self.sock.sendto(msg, addr)
if __name__ == '__main__':
handlers = [UDPTimeServer(('', 14000)), UDPEchoServer(('', 15000))]
event_loop(handlers)
# test
from socket import *
s = socket(AF_INET, SOCK_DGRAM)
s.sendto(b'', ('localhost', 14000))
s.recvfrom(128)
s.sendto(b'Hello', ('localhost', 15000))
s.recvfrom(128)
class TCPServer(EventHandler):
def __init__(self, address, client_handler, handler_list):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.sock.bind(address)
self.sock.listen(1)
self.client_handler = client_handler
self.handler_list = handler_list
def fileno(self):
return self.sock.fileno()
def wants_to_receive(self):
return True
def handle_receive(self):
client, addr = self.sock.accept()
# Add the client to the event loop's handler list
self.handler_list.append(
self.client_handler(client, self.handler_list))
class TCPClient(EventHandler):
def __init__(self, sock, handler_list):
self.sock = sock
self.handler_list = handler_list
self.outgoing = bytearray()
def fileno(self):
return self.sock.fileno()
def close(self):
self.sock.close()
# Remove myself from the event loop's handler list
self.handler_list.remove(self)
def wants_to_send(self):
return True if self.outgoing else False
def handle_send(self):
nsent = self.sock.send(self.outgoing)
self.outgoing = self.outgoing[nsent:]
class TCPEchoClient(TCPClient):
def wants_to_receive(self):
return True
def handle_receive(self):
data = self.sock.recv(8192)
if not data:
self.close()
else:
self.outgoing.extend(data)
if __name__ == '__main__':
handlers = []
handlers.append(TCPServer(('', 16000), TCPEchoClient, handlers))
event_loop(handlers)
from concurrent.futures import ThreadPoolExecutor
import os
class ThreadPoolHandler(EventHandler):
def __init__(self, nworkers):
if os.name == 'posix':
self.signal_done_sock, self.done_sock = socket.socketpair()
else:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('127.0.0.1', 0))
server.listen(1)
self.signal_done_sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self.signal_done_sock.connect(server.getsockname())
self.done_sock, _ = server.accept()
server.close()
self.pending = []
self.pool = ThreadPoolExecutor(nworkers)
def fileno(self):
return self.done_sock.fileno()
# Callback that executes when the thread is done
def _complete(self, callback, r):
self.pending.append((callback, r.result()))
self.signal_done_sock.send(b'x')
# Run a function in a thread pool
def run(self, func, args=(), kwargs={}, *, callback):
r = self.pool.submit(func, *args, **kwargs)
r.add_done_callback(lambda r: self._complete(callback, r))
def wants_to_receive(self):
return True
# Run callback functions of completed work
def handle_receive(self):
# Invoke all pending callback functions
for callback, result in self.pending:
callback(result)
self.done_sock.recv(1)
self.pending = []
# 11 进程间传递 socket 文件描述符 multiprocessing
from multiprocessing.reduction import recv_handle, send_handle
import socket
def worker(in_p, out_p):
out_p.close()
while True:
fd = recv_handle(in_p)
print('CHILD: GOT FD', fd)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd) as s:
while True:
msg = s.recv(1024)
if not msg:
break
print('CHILD: RECV {!r}'.format(msg))
s.send(msg)
def server(address, in_p, out_p, worker_pid):
in_p.close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
s.bind(address)
s.listen(1)
while True:
client, addr = s.accept()
print('SERVER: Got connection from', addr)
send_handle(out_p, client.fileno(), worker_pid)
client.close()
if __name__ == '__main__':
c1, c2 = multiprocessing.Pipe()
worker_p = multiprocessing.Process(target=worker, args=(c1, c2))
worker_p.start()
server_p = multiprocessing.Process(
target=server, args=(('', 20000), c1, c2, worker_p.pid))
server_p.start()
c1.close()
c2.close()
# servermp.py
from multiprocessing.connection import Listener
from multiprocessing.reduction import send_handle
import socket
def server(work_address, port):
# Wait for the worker to connect
work_serv = Listener(work_address, authkey=b'peekaboo')
worker = work_serv.accept()
worker_pid = worker.recv()
# Now run a TCP/IP server and send clients to worker
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
s.bind(('', port))
s.listen(1)
while True:
client, addr = s.accept()
print('SERVER: Got connection from', addr)
send_handle(worker, client.fileno(), worker_pid)
client.close()
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print('Usage: server.py server_address port', file=sys.stderr)
raise SystemExit(1)
server(sys.argv[1], int(sys.argv[2]))
# workermp.py
from multiprocessing.connection import Client
from multiprocessing.reduction import recv_handle
import os
from socket import socket, AF_INET, SOCK_STREAM
def worker(server_address):
serv = Client(server_address, authkey=b'peekaboo')
serv.send(os.getpid())
while True:
fd = recv_handle(serv)
print('WORKER: GOT FD', fd)
with socket(AF_INET, SOCK_STREAM, fileno=fd) as client:
while True:
msg = client.recv(1024)
if not msg:
break
print('WORKER: RECV {!r}'.format(msg))
client.send(msg)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage: worker.py server_address', file=sys.stderr)
raise SystemExit(1)
worker(sys.argv[1])
# 套接字
# server.py
import socket
import struct
def send_fd(sock, fd):
'''
Send a single file descriptor.
'''
sock.sendmsg([b'x'],
[(socket.SOL_SOCKET, socket.SCM_RIGHTS, struct.pack('i', fd))])
ack = sock.recv(2)
assert ack == b'OK'
def server(work_address, port):
# Wait for the worker to connect
work_serv = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
work_serv.bind(work_address)
work_serv.listen(1)
worker, addr = work_serv.accept()
# Now run a TCP/IP server and send clients to worker
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
s.bind(('', port))
s.listen(1)
while True:
client, addr = s.accept()
print('SERVER: Got connection from', addr)
send_fd(worker, client.fileno())
client.close()
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print('Usage: server.py server_address port', file=sys.stderr)
raise SystemExit(1)
server(sys.argv[1], int(sys.argv[2]))
# worker.py
import socket
import struct
def recv_fd(sock):
'''
Receive a single file descriptor
'''
msg, ancdata, flags, addr = sock.recvmsg(1,
socket.CMSG_LEN(struct.calcsize('i')))
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
assert cmsg_level == socket.SOL_SOCKET and cmsg_type == socket.SCM_RIGHTS
sock.sendall(b'OK')
return struct.unpack('i', cmsg_data)[0]
def worker(server_address):
serv = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
serv.connect(server_address)
while True:
fd = recv_fd(serv)
print('WORKER: GOT FD', fd)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd) as client:
while True:
msg = client.recv(1024)
if not msg:
break
print('WORKER: RECV {!r}'.format(msg))
client.send(msg)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage: worker.py server_address', file=sys.stderr)
raise SystemExit(1)
worker(sys.argv[1])
exit(1)
# 10 在网络服务中加入 SSL ssl 模块
KEYFILE = 'privatekey.pem'
CERTFILE = 'cert.pem'
def echo_client(s):
while True:
data = s.recv(8192)
if data == b'over':
s.send(b'connection closed')
break
s.send(data)
s.close()
print('Connection closed')
def echo_server(address):
s = socket(AF_INET, SOCK_STREAM)
s.bind(address)
s.listen(1)
s_ssl = ssl.wrap_socket(
s, keyfile=KEYFILE, certfile=CERTFILE, server_side=True)
while True:
try:
c, a = s_ssl.accept()
print('Got connection', c, a)
echo_client(c)
except Exception as e:
print('{}: {}'.format(e.__class__.__name__, e))
echo_server(('', 20000))
# test
s = socket(AF_INET, SOCK_STREAM)
s_ssl = ssl.wrap_socket(s, cert_reqs=ssl.CERT_REQUIRED, ca_certs=CERTFILE)
s_ssl.connect(('localhost', 20000))
s_ssl.send(b'aaaa')
s_ssl.recv()
exit(1)
# 9 简单的客户端认证 hmac
import os
import hmac
def client_authenticate(connection, secert_key):
message = connection.recv(32)
hash = hmac.new(secert_key, message)
digest = hash.digest()
connection.send(digest)
def server_authenticate(connection, secert_key):
message = os.urandom(32)
connection.send(message)
hash = hmac.new(secert_key, message)
digest = hash.digest()
response = connection.recv(len(digest))
return hmac.compare_digest(digest, response)
# test server
secert_key = b'secertsun'
def echo_handler(client_sock):
if not server_authenticate(client_sock, secert_key):
client_sock.close()
return
while True:
msg = client_sock.recv(8192)
if not msg:
break
client_sock.sendall(msg)
def echo_server(address):
s = socket(AF_INET, SOCK_STREAM)
s.bind(address)
s.listen(5)
while True:
c, a = s.accept()
echo_handler(c)
echo_server(('', 20000))
# test client
s = socket(AF_INET, SOCK_STREAM)
s.connect(('localhost', 20000))
client_authenticate(s, secert_key)
s.send(b'hello world')
resp = s.recv(1024)
exit(1)
# 8 实现远程方法调用
import pickle
class RPCHandler:
def __init__(self):
self._functions = {}
def register_function(self, func):
self._functions[func.__name__] = func
def handle_connection(self, connection):
try:
while True:
func_name, args, kwargs = pickle.loads(connection.recv())
try:
r = self._functions[func_name](*args, **kwargs)
connection.send(pickle.dumps(r))
except Exception as e:
connection.send(pickle.dumps(e))
except EOFError:
pass
from multiprocessing.connection import Listener
from threading import Thread
def rpc_server(handler, address, authkey):
sock = Listener(address, authkey=authkey)
while True:
client = sock.accept()
t = Thread(target=handler.handle_connection, args=(client,))
t.daemon = True
t.start()
def add(x, y):
return x + y
def sub(x, y):
return x - y
handler = RPCHandler()
handler.register_function(add)
handler.register_function(sub)
rpc_server(handler, ('localhost', 20000), authkey=b'sun')
# rpc 发送请求的代理类
import pickle
class RPCProxy:
def __init__(self, connection):
self._connection = connection
def __getattr__(self, name):
def do_rpc(*args, **kwargs):
self._connection.send(pickle.dumps((name, args, kwargs)))
result = pickle.loads(self._connection.recv())
if isinstance(result, Exception):
raise result
return result
return do_rpc
from multiprocessing.connection import Client
c = Client(('localhost', 20000), authkey=b'sun')
proxy = RPCProxy(c)
proxy.add(2, 3)
proxy.sub(2, 3)
# 7 在不同的 python 解析器之间互动 multiprocessing.connection
from multiprocessing.connection import Listener
import traceback
def echo_client(conn):
try:
while True:
msg = conn.recv()
conn.send(msg)
except EOFError:
print('Connection closed')
def echo_server(address, authkey):
serv = Listener(address, authkey=authkey)
while True:
try:
client = serv.accept()
echo_client(client)
except Exception:
traceback.print_exc()
echo_server(('', 20000), authkey=b'peekaboo')
exit(1)
# test
from multiprocessing.connection import Client
c = Client(('localhost', 20000), authkey=b'peekaboo')
c.send('hello')
c.recv()
exit(1)
# 6 通过 XML-RPC 实现简单的远程调用
from xmlrpc.server import SimpleXMLRPCServer
class KeyValueServer:
_rpc_methods_ = ['get', 'set', 'delete', 'exists', 'keys']
def __init__(self, address):
self._data = {}
self._serv = SimpleXMLRPCServer(address, allow_none=True)
for name in self._rpc_methods_:
self._serv.register_function(getattr(self, name))
def get(self, name):
return self._data[name]
def set(self, name, value):
self._data[name] = value
def delete(self, name):
del self._data[name]
def exists(self, name):
return name in self._data
def keys(self):
return list(self._data)
def serve_forever(self):
self._serv.serve_forever()
if __name__ == '__main__':
kvserv = KeyValueServer(('', 20000))
kvserv.serve_forever()
# test
from xmlrpc.client import ServerProxy
s = ServerProxy('http://localhost:20000', allow_none=True)
s.set('foo', '123foo')
s.set('bar', [1, 2, 3])
s.keys()
s.get('foo')
s.get('bar')
s.delete('foo')
s.exists('foo')
exit(1)
# 5 创建一个简单的 rest 接口
import cgi
def notfound_404(environ, start_response):
start_response('404 Not found', [('Content-type', 'text/plain')])
return [b'Not found']
# 调度器
class PathDispatcher:
def __init__(self):
self.pathmap = {}
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
params = cgi.FieldStorage(
environ['wsgi.input'],
environ=environ
)
method = environ['REQUEST_METHOD'].lower()
environ['params'] = {key: params.getvalue(key) for key in params}
handler = self.pathmap.get((method, path), notfound_404)
return handler(environ, start_response)
def register(self, method, path, function):
self.pathmap[method.lower(), path] = function
return function
_hello_resp = '''
<html>
<head>
<title>Hello {name}</title>
</head>
<body>
<h1>Hello {name}!</h1>
</body>
</html>
'''
def hello_world(environ, start_response):
start_response('200 OK', [('Content-type', 'text/html')])
params = environ['params']
resp = _hello_resp.format(name=params.get('name'))
yield resp.encode('utf-8')
_localtime_resp = '''\
<?xml version="1.0"?>
<time>
<year>{t.tm_year}</year>
<month>{t.tm_mon}</month>
<day>{t.tm_mday}</day>
<hour>{t.tm_hour}</hour>
<minute>{t.tm_min}</minute>
<second>{t.tm_sec}</second>
</time>
'''
def localtime(environ, start_response):
start_response('200 OK', [('Content-type', 'application/xml')])
resp = _localtime_resp.format(t=time.localtime())
yield resp.encode('utf-8')
if __name__ == '__main__':
from wsgiref.simple_server import make_server
dispatcher = PathDispatcher()
dispatcher.register('GET', '/hello', hello_world)
dispatcher.register('GET', '/localtime', localtime)
httpd = make_server('', 5000, dispatcher)
print('Serving on port 5000...')
httpd.serve_forever()
exit(1)
# 4 通过 CIDR 地址生成对应的 IP 地址集 ipaddress 模块
import ipaddress
net = ipaddress.ip_network('123.45.67.64/27')
print(net) # IPv4Network('123.45.67.64/27')
for n in net:
pass
# print(n)
print(net.num_addresses) # 32
print(net[0]) # 123.45.67.64
print(net[net.num_addresses - 1]) # 123.45.67.95
a = ipaddress.ip_address('123.45.67.65')
print(a in net) # True
b = ipaddress.ip_address('123.45.67.96')
print(b in net) # False
inet = ipaddress.ip_interface('123.45.67.64/27')
print(type(inet)) # <class 'ipaddress.IPv4Interface'>
print(inet.network) # 123.45.67.64/27 str()
print(inet.ip) # 123.45.67.64
exit(1)
# 3 创建 UDP 服务器
class TimeHandler(BaseRequestHandler):
def handle(self):
print('Got connection from', self.client_address)
msg, sock = self.request
resp = time.ctime()
sock.sendto(resp.encode('ascii'), self.client_address)
if __name__ == '__main__':
serv = UDPServer(('', 20000), TimeHandler)
serv.serve_forever()
exit(1)
# 2 创建 tcp 服务器 socketserver
class EchoHandler(BaseRequestHandler):
def handle(self):
print('Got connection from', self.client_address)
while True:
msg = self.request.recv(8192)
if not msg:
break
self.request.send(msg)
class StreamHandler(StreamRequestHandler):
def handle(self):
print('Got connection from', self.client_address)
for line in self.rfile:
self.wfile.write(line)
if __name__ == '__main__':
serv = TCPServer(('', 20000), EchoHandler) # 单用户
# serv = ThreadingTCPServer(('', 20000), EchoHandler) 多用户
serv.serve_forever()
# security action
if __name__ == '__main__':
from threading import Thread
NWORKERS = 16
serv = TCPServer(('', 20000), EchoHandler)
for n in range(NWORKERS):
t = Thread(target=serve_forever)
t.daemon = True
t.start()
serv.serve_forever
# 服务器实现
from socket import socket, AF_INET, SOCK_STREAM
def echo_handler(address, client_sock):
print('Got connetction from {}'.format(address))
while True:
msg = client_sock.recv(8192)
if not msg:
break
client_sock.sendall(msg)
client_sock.close()
def echo_server(address, backlog=5):
sock = socket(AF_INET, SOCK_STREAM)
sock.bind(address)
sock.listen(backlog)
while True:
client_sock, client_addr = sock.accept()
echo_handler(client_addr, client_sock)
if __name__ == '__main__':
echo_server(('', 20000))
exit(1)
# 1 作为客户端与 http 服务交互 测试站点 http://httpbin.org
url = 'http://127.0.0.1:5000/'
params = {
'name1': 'name1value',
'name2': 'name2value'
}
querystring = parse.urlencode(params)
# GET
u = request.urlopen(url + '?' + querystring)
resp = u.read()
print(resp)
# POST
u = request.urlopen(url, querystring.encode('ascii'))
resp = u.read()
print(resp)
# header
headers = {
'User-agent': 'none/ofyourbusiness',
'Spam': 'Eggs',
'Token': 'aaaaaaaa'
}
req = request.Request(url, querystring.encode('ascii'), headers=headers)
u = request.urlopen(req)
resp = u.read()
print(resp)
# requests 库
resp = requests.post(url, data=params, headers=headers)
text = resp.text
print(text)
# headers
resp = requests.head('https://www.python.org/')
print(resp.status_code)
# print(resp.headers['last-modified'])
print(resp.headers['content-type'])
print(resp.headers['content-length'])
# login auth
resp = requests.get('http://pypi.python.org/pypi?:action=login',
auth=('user', 'password'))
print(resp)
# cookies
resp1 = requests.get('https://www.python.org/')
resp2 = requests.get('https://www.python.org', cookies=resp1.cookies)
# files
files = {'file': ('data.csv', open('tmp/data.d', 'rb'))}
resp = requests.post(url, files=files)
print(resp)
# 底层库 http.client
from http.client import HTTPConnection
c = HTTPConnection('www.python.org', 80)
c.request('HEAD', '/')
resp = c.getresponse()
print('status:', resp.status)
for name, value in resp.getheaders():
print(name, ':', value)
# 实现 auth
auth = urllib.request.HTTPBasicAuthHandler()
auth.add_password('pypi', 'http://pypi.python.org', 'username', 'password')
opener = urllib.request.build_opener(auth)
r = urllib.request.Request('http://pypi.python.org/pypi?:action=login')
u = opener.open(r)
resp = u.read()
| [
"[email protected]"
] | |
04b790b2a3106481a3b23156f43208a786867d2a | 98b9521915fc87b963344e33ebfd779b02e9c33f | /virtual/bin/symilar | 3b07a38d2af2742b5df49d3a7a393f6597f2422c | [
"MIT"
] | permissive | Jeffmusa/smartFridge | 33f35483ddfd310f54b4aea7ccae4f6caf57c8bf | 89c5dfca68ca80a36c062aa1bb195e6cf8f1a10f | refs/heads/master | 2020-04-03T15:49:34.854722 | 2018-11-01T12:41:07 | 2018-11-01T12:41:07 | 155,379,225 | 0 | 0 | MIT | 2018-10-30T12:10:30 | 2018-10-30T12:10:29 | null | UTF-8 | Python | false | false | 255 | #!/home/vicklyne/smartFridge/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"[email protected]"
] | ||
a31b97f409e4a93874a09654a33245ce9caf47fb | 6531a7c162f83cfd5e7a3c4ea8a93dae58717ff4 | /le_social/openid/middleware.py | 518b4a19c9f0638872c23ef170a66d27aae3753d | [] | no_license | brutasse/django-le-social | 4a9103a4f24abcc054cdaaa222af9f349f24a03b | abf8a3bc57cf5f25d83b9806406cef8f9b87da63 | refs/heads/master | 2021-07-11T18:00:14.420981 | 2016-10-17T12:06:00 | 2016-10-17T12:06:00 | 1,677,246 | 26 | 4 | null | 2016-09-25T18:05:17 | 2011-04-28T19:07:01 | Python | UTF-8 | Python | false | false | 199 | py | class OpenIDMiddleware(object):
"""
Populates request.openid and request.openids
"""
def process_request(self, request):
request.openids = request.session.get('openids', [])
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.