repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ejherran/acaest | Listas/DoubleLoopList.py | 1 | 4771 | # - Class to create a double-linked circular list.
from DoubleNode import DoubleNode
class DoubleLoopList():
def __init__(self):
self.head = None # Master reference to the first item in the list
self.tail = None # Master reference the last item in the list.
def add(self, value): # Add elements in stack format.
new = DoubleNode()
new.value = value
if(self.head == None):
self.head = new
self.tail = new
self.head.R = self.tail # Links the header to the queue in the list.
self.tail.L = self.head # Link the list queue to the header.
else:
new.L = self.head # Cross references: L for the next, R for the previous.
self.head.R = new
new.R = self.tail
self.tail.L = new
self.head = new # The header moves to the new node.
def search(self, value):
aux = self.head # Auxiliary reference for scrolling through the list.
while(aux.L != self.head):
if(aux.value == value):
break
aux = aux.L # Go to the next list item.
if(aux.value == value):
return aux
else:
return None
def edit(self, old, new): # Gets the node containing a specified load.
target = self.search(old)
if(target != None):
target.value = new # Updates the node payload.
def insertBefore(self, tar, value):
target = self.search(tar)
if(target != None):
if(target != self.head):
new = DoubleNode()
new.value = value
bef = target.R # Obtains the node immediately preceding the target node.
bef.L = new # Cross references.
new.R = bef
new.L = target
target.R = new
else:
self.add(value)
def insertAfter(self, tar, value):
target = self.search(tar)
if(target != None):
new = DoubleNode()
new.value = value
if(target != self.tail):
aft = target.L # Retrieves the node immediately following.
aft.R = new
new.L = aft
else:
self.tail = new
self.head.R = self.tail
target.L = new
new.R = target
def delete(self, value):
target = self.search(value)
if(target != None):
if(target == self.head):
self.head = self.head.L # Save the header by moving it to the next node in the list.
self.head.R = self.tail # Updates the double link to the list queue.
self.tail.L = self.head
elif(target == self.tail):
self.tail = self.tail.R # Save the queue by moving it to the previous node.
self.tail.L = self.head # Update the double link to the list header.
self.head.R = self.tail
else:
bef = target.R
aft = target.L
bef.L = aft
aft.R = bef
target.L = None # Break the node links.
target.R = None
del(target) # Deletes the node from memory.
def printHead(self):
aux = self.head
while(True and aux != None):
print(aux.value)
aux = aux.L
if(aux == self.head):
break
def printTail(self):
aux = self.tail
while(True and aux != None):
print(aux.value)
aux = aux.R
if(aux == self.tail):
break
| gpl-3.0 | -1,631,237,601,568,324,900 | 29.780645 | 112 | 0.390275 | false |
karel-brinda/rnftools | rnftools/mishmash/MasonIllumina.py | 1 | 5762 | import rnftools
from .Source import Source
import snakemake
import os
class MasonIllumina(Source):
"""Class for the Mason - Illumina mode (https://www.seqan.de/projects/mason/).
Single-end reads and pair-end reads simulations are supported. For pair-end simulations,
lengths of both ends must be equal.
Args:
fasta (str): File name of the genome from which read tuples are created (FASTA file). Corresponding Mason parameter: ``-ir, --input-reference``.
sequences (set of int or str): FASTA sequences to extract. Sequences can be specified either by their ids, or by their names.
coverage (float): Average coverage of the genome (if number_of_reads specified, then it must be equal to zero).
number_of_read_tuples (int): Number of read tuples (if coverage specified, then it must be equal to zero). Corresponding Mason parameter: ``-n, --num-fragments``.
read_length_1 (int): Length of the first read. Corresponding Mason parameter: ``--illumina-read-length``.
read_length_2 (int): Length of the read read (if zero, then single-end reads are simulated). Corresponding Mason parameter: ``--illumina-read-length``.
distance (int): Mean inner distance between reads. Corresponding Mason parameter: ``--fragment-mean-size``.
distance_deviation (int): Standard devation of inner distances between reads. Corresponding Mason parameter: ``--fragment-size-std-dev``.
rng_seed (int): Seed for simulator's random number generator. Corresponding Mason parameter: ``--seed``.
other_params (str): Other parameters which are used on command-line.
Raises:
ValueError
"""
def __init__(
self,
fasta,
sequences=None,
coverage=0,
number_of_read_tuples=0,
read_length_1=100,
read_length_2=0,
distance=500,
distance_deviation=50,
rng_seed=1,
other_params="",
):
if read_length_2 == 0:
ends = 1
else:
ends = 2
self.distance = int(distance)
self.distance_deviation = int(distance_deviation)
if read_length_1 != read_length_2:
rnftools.utils.error(
"Mason can simulate only pairs with equal lengths",
program="RNFtools",
subprogram="MIShmash",
exception=ValueError,
)
super().__init__(
fasta=fasta,
sequences=sequences,
reads_in_tuple=ends,
rng_seed=rng_seed,
)
self.read_length_1 = read_length_1
self.read_length_2 = read_length_2
self.other_params = other_params
if coverage * number_of_read_tuples != 0:
rnftools.utils.error(
"coverage or number_of_read_tuples must be equal to zero", program="RNFtools", subprogram="MIShmash",
exception=ValueError
)
self.number_of_read_tuples = number_of_read_tuples
self.coverage = coverage
self.mason_prefix = os.path.join(
self.get_dir(),
"mason_files.{}.{}".format("se" if self.number_of_read_tuples == 1 else "pe", self.genome_id)
)
self._sam_fn = self.mason_prefix + ".sam"
def get_input(self):
return [
self._fa_fn,
self._fai_fn,
]
def get_output(self):
if self._reads_in_tuple == 1:
fqs = [self.mason_prefix + "1.fq"]
else:
fqs = [self.mason_prefix + "1.fq", self.mason_prefix + "2.fq"]
return [
self._fq_fn,
self._sam_fn,
] + fqs
def create_fq(self):
if self.coverage == 0 and self.number_of_read_tuples == 0:
for x in self.get_output():
with open(x, "w+") as f:
f.write(os.linesep)
else:
if self.coverage == 0:
genome_size = os.stat(self._fa_fn).st_size
self.coverage = 1.0 * self.number_of_read_tuples * (self.read_length_1 +
self.read_length_2) / (0.8 * genome_size)
if self._reads_in_tuple == 2:
paired_params = '--fragment-mean-size {dist} --fragment-size-std-dev {dist_dev} -or "{fq2}"'.format(
dist=self.distance,
dist_dev=self.distance_deviation,
fq2=self.mason_prefix + "2.fq",
)
else:
paired_params = ""
command = """
"{mason}" \
-n {number_of_read_tuples} \
-ir "{fasta}" \
--illumina-read-length {rlen} \
--seed {rng_seed} \
-o "{fq1}" \
-oa "{sam}" \
{paired_params} \
{other_params} \
> /dev/null
""".format(
mason="mason_simulator",
paired_params=paired_params,
fasta=self._fa_fn,
rlen=self.read_length_1,
other_params=self.other_params,
number_of_read_tuples=self.number_of_read_tuples,
fq1=self.mason_prefix + "1.fq",
rng_seed=self._rng_seed,
sam=self._sam_fn,
)
rnftools.utils.shell(command)
with open(self._fq_fn, "w+") as fq_fo:
with open(self._fai_fn) as fai_fo:
self.recode_sam_reads(
sam_fn=self._sam_fn,
fastq_rnf_fo=fq_fo,
fai_fo=fai_fo,
genome_id=self.genome_id,
number_of_read_tuples=10**9,
simulator_name="mason",
allow_unmapped=False,
)
| mit | -9,209,962,143,865,474,000 | 35.700637 | 164 | 0.537313 | false |
bhairavmehta95/flashcard-helper-alexa-skill | lambda_function.py | 1 | 24184 | """
Bhairav Mehta - Alexa Hack the Dorm Competition
Decemeber 2016 / January 2017
Alexa Abstractions based off a library developed by Anjishu Kumar
"""
from ask import alexa
import quizlet
import json
from random import randint
###### HANDLERS ######
# Lambda Handler function
def lambda_handler(request_obj, context=None):
return alexa.route_request(request_obj)
# Default Handler
@alexa.default
def default_handler(request):
alexa_response_str = "Welcome! Let's start studying! Please give me your pin code, found on the link listed in the Alexa app. Start by saying 'my pin is', followed by your unique pin number on the website."
return alexa.create_response(message=alexa_response_str)
###### END HANDLERS ######
###### REQUESTS ######
# Launch the App
@alexa.request("LaunchRequest")
def launch_request_handler(request):
alexa_response_str = "Welcome! Let's start studying! Please give me your pin code, found on the link listed in the Alexa app. Start by saying 'my pin is', followed by your unique pin number on the website."
return alexa.create_response(message=alexa_response_str)
# End the session
@alexa.request("SessionEndedRequest")
def session_ended_request_handler(request):
alexa_response_str = "Goodbye, and thanks for using Flashcard Helper!"
return alexa.create_response(message=alexa_response_str, end_session=True)
###### END REQUESTS ######
###### INTENTS ######
# Verifies users pin code
@alexa.intent('PinCodeIntent')
def pin_code_confirm_intent_hander(request):
pin = None
### error checking ###
try:
pin = request.slots['pin_code']
except:
alexa_response_str = "I couldn't find a username associated with that pin. Please try again."
return alexa.create_response(message=alexa_response_str, end_session=True)
### end error checking ###
# verify user by querying database using pin
username = quizlet.verify_user(pin)
# add username to session, welcome the user
if username != None:
request.session['username'] = username
request.session['pin_code_verified'] = True
alexa_response_str = "Welcome to Flashcard Helper {}. You can ask me a bunch of things, like: 'List my sets', or 'Please help'. To get started with studying, say, 'study', \
followed by your set name from quizlet.com. For example, \
to study your Quizlet set named History, you can say 'let's study history'. What would you like to do?".format(username)
return alexa.create_response(message=alexa_response_str)
# speak out the digits so the user can hear what the input was
else:
alexa_response_str = "<speak> The pin I heard was <say-as interpret-as='digits'> {} </say-as> and I couldn't find a username associated with that pin. Please try again. </speak>".format(pin)
return alexa.create_response(message=alexa_response_str, is_ssml=True, end_session=True)
# List all the sets you own and can study from
@alexa.intent("ListAllSetsIntent")
def list_all_sets_intent_handler(request):
if request.session.get('pin_code_verified') != True:
alexa_response_str = "Please verify your pin first, using the link listed in the Alexa app. Start by saying, 'my pin is', followed by your unique pin number on the website."
return alexa.create_response(message=alexa_response_str, end_session=True)
# get all of the sets
user_id = request.session['username']
sets = quizlet.get_all_sets_from_user(user_id)
all_sets_titles = []
set_id = None
set_title = None
# add each title to the list
for set_ in sets:
all_sets_titles.append(set_['title'])
# user has no sets
if all_sets_titles == []:
alexa_response_str = "Oops, it looks like you don't have any sets. Please visit quizlet.com to create a flashcard set, and then I will be more than happy to help! Goodbye, \
and I hope to speak to you again soon!"
return alexa.create_response(message=alexa_response_str, end_session=True)
# prepare response string
all_sets_string = ", ".join(all_sets_titles)
alexa_response_str = "Here are the sets you can choose from: {}. Which set would you like to work with? Start by saying, let's study, followed by the set you want to choose.".format(all_sets_string)
# return message to user
return alexa.create_response(message=alexa_response_str)
# Exiting the application
@alexa.intent("EndSessionIntent")
def end_session_intent(request):
alexa_response_str = "Goodbye, and thanks for using Flashcard Helper!"
return alexa.create_response(message=alexa_response_str, end_session=True)
# Review all of the wrong answers a user had during a session
@alexa.intent("ReviewWrongAnswersIntent")
def review_all_wrong_answers_intent_handler(request):
if request.session.get('pin_code_verified') != True:
alexa_response_str = "Please verify your pin first, using the link listed in the Alexa app. Start by saying, 'my pin is', followed by your unique pin number on the website."
return alexa.create_response(message=alexa_response_str, end_session=True)
if request.session.get('study_session_started') == True:
if request.session['incorrect_terms'] != []:
request.session['reviewing_wrong'] = True
request.session['reviewing_index'] = 0
alexa_response_str = "Sure, we can definitely review your most troublesome words. To start, please define {}.".format(request.session['incorrect_terms'][0][1])
return alexa.create_response(message=alexa_response_str)
else:
index = request.session['current_index']
alexa_response_str = "Lucky for you, you didn't get anything wrong! Now, please define {}.".format(request.session['all_terms'][index]['term'])
return alexa.create_response(message=alexa_response_str)
# user hasn't started a study session
else:
# get all of the sets
user_id = request.session['username']
sets = quizlet.get_all_sets_from_user(user_id)
all_sets_titles = []
set_id = None
set_title = None
# add each title to the list
for set_ in sets:
all_sets_titles.append(set_['title'])
# user has no sets
if all_sets_titles == []:
alexa_response_str = "Oops. You haven't started a study session, and it looks like you don't have any sets. Please visit quizlet.com to create a flashcard set, and then I will be more than happy to help! Goodbye, \
and I hope to speak to you again soon!"
return alexa.create_response(message=alexa_response_str, end_session=True)
# prepare response string
all_sets_string = ", ".join(all_sets_titles)
alexa_response_str = "Uh oh, you haven't started a study session. Here are the sets you can choose from: {}. Which set would you like to work with? Start by saying, let's study, followed by the set you want to choose.".format(all_sets_string)
# return message to user
return alexa.create_response(message=alexa_response_str)
# Starts a study session when given a set title
@alexa.intent("StartStudySessionIntent")
def start_study_session_intent_handler(request):
if request.session.get('pin_code_verified') != True:
alexa_response_str = "Please verify your pin first, using the link listed in the Alexa app. Start by saying, 'my pin is', followed by your unique pin number on the website."
return alexa.create_response(message=alexa_response_str, end_session=True)
### error checking ###
# grabs the title of the set from the slot
try:
title = request.slots["title"]
except:
title = None
### end error checking ###
user_id = request.session['username']
# clears any old session data
try:
request.session.clear()
except:
pass
# resets so user doesn't have to relog in
request.session['username'] = user_id
request.session['pin_code_verified'] = True
# get all of the sets
sets = quizlet.get_all_sets_from_user(user_id)
all_sets_titles = []
# variables to store API call variables
set_id = None
set_title = None
for set_ in sets:
all_sets_titles.append(set_['title'])
# found the set
if title.lower() == str(set_['title']).lower():
set_id = set_['id']
set_title = set_['title']
break
# returns all of the options the user can choose from
if set_id == None and all_sets_titles != []:
all_sets_string = ", ".join(all_sets_titles)
alexa_response_str = "Oops! Couldn't find that set. Here are the sets you can choose from: {}. Which set would you like to work with? Start by saying, let's study, followed by the set you want to choose.".format(all_sets_string)
return alexa.create_response(message=alexa_response_str)
# the user has no sets
elif set_id == None and all_sets_titles == []:
alexa_response_str = "Oops, it looks like you don't have any sets. Please visit quizlet.com to create a flashcard set, and then I will be more than happy to help! Goodbye, \
and I hope to speak to you again soon!"
return alexa.create_response(message=alexa_response_str, end_session=True)
# found the set, looks for user confirmation
else:
request.session['study_session_set_id'] = set_id
request.session['awaiting_set_id_confirmation'] = True
alexa_response_str = "I have found your set. Can you confirm you want to study the set named {}?".format(set_title)
return alexa.create_response(message=alexa_response_str)
# Ends the study session, gives user statistics for session
@alexa.intent("EndStudySessionIntent")
def end_study_session_intent_handler(request):
if request.session.get('pin_code_verified') != True:
alexa_response_str = "Please verify your pin first, using the link listed in the Alexa app. Start by saying, 'my pin is', followed by your unique pin number on the website."
return alexa.create_response(message=alexa_response_str, end_session=True)
# study session had been started
try:
total_percent_correct = int((float(request.session['correct_count']) / request.session['total_terms']) * 100)
alexa_response_str = "I am sorry you want to leave. During this session, you got {} correct and {} incorrect out of {} \
total terms. You got {} percent correct. Goodbye, and hopefully we speak again soon!".format(request.session['correct_count'], \
request.session['incorrect_count'], request.session['total_terms'], total_percent_correct)
return alexa.create_response(message=alexa_response_str,end_session=True)
# any error (i.e no session was started)
except:
alexa_response_str = "Goodbye, and thank you for using Flashcard Helper!"
return alexa.create_response(message=alexa_response_str, end_session=True)
@alexa.intent("HelpIntent")
def help_intent_handler(request):
if request.session.get('pin_code_verified') != True:
alexa_response_str = "Make sure you visit the website listed in the Alexa app in order to link your account. Then, say 'my pin is', followed by your unique pin number on the website to get started."
return alexa.create_response(message=alexa_response_str, end_session=True)
elif request.session.get('study_session_started') == True:
index = request.session['current_index']
word_to_define = request.session['all_terms'][index]['term']
alexa_response_str = "To answer a question, start with 'the answer is', followed by the definition of the flashcard. Or, if you want to review some incorrect terms, say, 'help me review my incorrect terms'. \
Now, back to the study session. Can you please define {}?".format(word_to_define)
return alexa.create_response(message=alexa_response_str)
else:
alexa_response_str = "Here are some things you can ask me after the pin confirmation. The Alexa app is the best way to view my full command set, but to start, you can ask me to \
list your flashcard sets, help you study a specific set, or help review your incorrect answers. What would you like to do?"
return alexa.create_response(message=alexa_response_str)
# Used to decline confirmation of the set repeated back to the user
@alexa.intent("DeclinationIntent")
def declination_intent_handler(request):
if request.session.get('pin_code_verified') != True:
alexa_response_str = "Please verify your pin first, using the link listed in the Alexa app. Start by saying, 'my pin is', followed by your unique pin number on the website."
return alexa.create_response(message=alexa_response_str, end_session=True)
alexa_response_str = "Okay, we will not study that set. What would you like to do?"
return alexa.create_response(message=alexa_response_str)
# Confirms that this is the set the user wanted to study
@alexa.intent("ConfirmationIntent")
def confirmation_intent_handler(request):
if request.session.get('pin_code_verified') != True:
alexa_response_str = "Please verify your pin first, using the link listed in the Alexa app. Start by saying, 'my pin is', followed by your unique pin number on the website."
return alexa.create_response(message=alexa_response_str, end_session=True)
# store that the session has been started
request.session['study_session_started'] = True
request.session.pop('awaiting_set_id_confirmation')
# loads all of the terms, gets them ready to be added to session
terms = json.loads(quizlet.get_all_terms_given_set(request.session['study_session_set_id']))
all_terms = []
# total term counter
total_terms = 0
# creates a dictionary for each term, appends to a total term list
'''
{
'id',
'definition',
'term'
}
'''
for t in terms:
term_new = {}
term_new['id'] = t['id']
term_new['definition'] = t['definition']
term_new['term'] = t['term']
total_terms += 1
all_terms.append(term_new)
# session variables
request.session['total_terms'] = total_terms
# list of all terms
request.session['all_terms'] = all_terms
# used terms boolean list
request.session['used_terms'] = [False] * total_terms
# incorrect term list
request.session['incorrect_terms'] = []
# count variables
request.session['correct_count'] = 0
request.session['incorrect_count'] = 0
# reviewing wrong boolean
request.session['reviewing_wrong'] = False
# picks a random word to start at, marks that word used
index = randint(0, total_terms - 1)
request.session['used_terms'][index] = True
# begins the session with that word
request.session['current_index'] = index
alexa_response_str = "Great. Let's get started with the first term. Please define {}".format(all_terms[index]['term'])
return alexa.create_response(message=alexa_response_str)
# Answers to a term
'''
Currently, the utterances and speech model support answers of 1-8 words, as most flash cards definitions are
within this limit.
The answer to questions needs to be preceded by "the answer is ..." or the "the definition is ..."
'''
@alexa.intent('AnswerIntent')
def answer_intent_handler(request):
if request.session.get('pin_code_verified') != True:
alexa_response_str = "Please verify your pin first, using the link listed in the Alexa app. Start by saying, 'my pin is', followed by your unique pin number on the website."
return alexa.create_response(message=alexa_response_str, end_session=True)
# TODO: Make so that it does not need to match exactly
answer = request.slots.get("answer")
### error checking ###
# has a session been started yet, and was an answer given?
if request.session.get('study_session_started') == True:
if answer == None:
index = request.session['current_index']
word_to_define = request.session['all_terms'][index]['term']
alexa_response_str = "Uh oh. I didn't quite get your answer. Make sure you start with 'The answer is', and then proceed to give your answer. Let's try again. \
Can you please define {}?".format(word_to_define)
return alexa.create_response(message=alexa_response_str)
# study session hasn't been started yet
else:
# get all of the sets
user_id = request.session['username']
sets = quizlet.get_all_sets_from_user(user_id)
all_sets_titles = []
set_id = None
set_title = None
# add each title to the list
for set_ in sets:
all_sets_titles.append(set_['title'])
# user has no sets
if all_sets_titles == []:
alexa_response_str = "Oops. You haven't started a study session, and it looks like you don't have any sets. Please visit quizlet.com to create a flashcard set, and then I will be more than happy to help! Goodbye, \
and I hope to speak to you again soon!"
return alexa.create_response(message=alexa_response_str, end_session=True)
# prepare response string
all_sets_string = ", ".join(all_sets_titles)
alexa_response_str = "Uh oh, you haven't started a study session. Here are the sets you can choose from: {}. Which set would you like to work with? Start by saying, let's study, followed by the set you want to choose.".format(all_sets_string)
# return message to user
return alexa.create_response(message=alexa_response_str)
### end error checking ###
# makes sure a study session has started and user is not reviewing his/her wrong answers
if request.session['reviewing_wrong'] == False:
index = request.session['current_index']
total_terms = request.session['total_terms']
# user got this answer correct
if str(answer).lower() == str(request.session['all_terms'][index]['definition']).lower():
# increment correct count for the session
request.session['correct_count'] += 1
# checks if this was the last term
if request.session['correct_count'] + request.session['incorrect_count'] == total_terms:
# percentage user got correct
total_percent_correct = int((float(request.session['correct_count']) / total_terms) * 100)
# default string if nothing was wrong during the entire session
incorrect_terms_string = "everything, even though you don't really need it."
# loads all of the incorrect terms into a string
if request.session['incorrect_terms'] != []:
incorrect_terms_list = [x[1] for x in request.session['incorrect_terms']]
incorrect_terms_string = ", ".join(incorrect_terms_list)
alexa_response_str = "Good job, you got that one right! Thanks for finishing! You got {} correct and {} incorrect out of {} \
total terms. You got {} percent correct, and you might want to study up on {}. Would you like to go over your incorrect answers, \
or end the study session?".format(request.session['correct_count'], \
request.session['incorrect_count'], total_terms, total_percent_correct, incorrect_terms_string)
return alexa.create_response(message=alexa_response_str)
# not the last term, find the next term randomly
else:
# loop to find the next term
while True:
index_try = randint(0, total_terms - 1)
if request.session['used_terms'][index_try] == False:
index = index_try
request.session['used_terms'][index_try] = True
request.session['current_index'] = index
break
alexa_response_str = "Good job, you got that one right! Now, please define {}".format(request.session['all_terms'][index]['term'])
return alexa.create_response(message=alexa_response_str)
# user got this answer incorrect
else:
# increment incorrect count
request.session['incorrect_count'] += 1
# append tuple of the index and the term -- (index, term) -- to the incorrect terms list
request.session['incorrect_terms'].append((index, request.session['all_terms'][index]['term']))
# checks if this was the last term
if request.session['correct_count'] + request.session['incorrect_count'] == total_terms:
# percentage user got correct
total_percent_correct = int((float(request.session['correct_count']) / total_terms) * 100)
# loads all of the incorrect terms into a string
incorrect_terms_list = [x[1] for x in request.session['incorrect_terms']]
incorrect_terms_string = ", ".join(incorrect_terms_list)
alexa_response_str = "Uh Oh, you got that one wrong! Thanks for finishing! You got {} correct and {} \
incorrect out of {} total terms. You got {} percent correct, and you might want to study up on {}. Would you like to go over your incorrect answers, \
or end the study session?" \
.format(request.session['correct_count'], request.session['incorrect_count'], total_terms, total_percent_correct, incorrect_terms_string)
return alexa.create_response(message=alexa_response_str)
# not the last term, find the next term randomly
else:
# gets the correct definition
correct_def = request.session['all_terms'][index]['definition']
# loop to find the next term
while True:
index_try = randint(0, total_terms - 1)
if request.session['used_terms'][index_try] == False:
index = index_try
request.session['used_terms'][index_try] = True
request.session['current_index'] = index
break
alexa_response_str = "Uh oh, you didn't get that one right! The correct answer was {}. Now, please define {}."\
.format(correct_def, request.session['all_terms'][index]['term'])
return alexa.create_response(message=alexa_response_str)
# study session was started, but user is reviewing their wrong answers
elif request.session.get('reviewing_wrong') == True:
# index of the tuple (index, term) for the word in "incorrect terms" list
incorrect_index = request.session['reviewing_index']
# index of that term
index = request.session['incorrect_terms'][incorrect_index][0]
# answer given by user
answer = request.slots["answer"]
# checks if user got the answer correct
if str(answer).lower() == str(request.session['all_terms'][index]['definition']).lower():
# pops that term of the incorrect terms -- doesn't increment index
request.session['incorrect_terms'].pop(incorrect_index)
# user is not done with all of the incorrect terms
if request.session['incorrect_terms'] != []:
# checks if the term was the last term in the list
try:
alexa_response_str = "Congratulations, you got that right! Now, can you define {}?".format(request.session['incorrect_terms'][incorrect_index][1])
except:
incorrect_index = 0
request.session['reviewing_index'] = 0
alexa_response_str = "Congratulations, you got that right! Now, can you define {}?".format(request.session['incorrect_terms'][incorrect_index][1])
return alexa.create_response(message=alexa_response_str)
# user has finished reviewing all of the incorrect terms
else:
alexa_response_str = "Congratulations, you finished reviewing the incorrect words from this session. Thanks for studying!"
return alexa.create_response(message=alexa_response_str, end_session=True)
# user did not get the correct answer
else:
# increment index circularly
incorrect_index += 1
incorrect_index = incorrect_index % len(request.session['incorrect_terms'])
alexa_response_str = "Oops, still not right. We'll come back to that. Now, can you define {}?".format(request.session['incorrect_terms'][incorrect_index][1])
return alexa.create_response(message=alexa_response_str)
## Error:
alexa_response_str = "Oops, could you repeat that?"
return alexa.create_response(message=alexa_response_str)
###### END INTENTS ######
''' TODO:
Added functionality of adding to a user set called difficult, so a user can say "quiz me on the difficult terms"
'''
# # Add current term to a set called "difficult"
# @alexa.request("AddToDifficultIntent")
# def add_current_word_to_difficult_set_request_handler(request):
# # grab the word and add it to the difficult set
# current_index = request.session['current_index']
# term_defn_to_add = request.session['all_terms'][current_index]
# quizlet.add_to_difficult(term_defn_to_add)
# return alexa.create_response(message="Got it, added to your difficult list. Let's try to define it now. Please define Please define {}".format(all_terms[current_index]['term']))
# Flask server for testing locally
if __name__ == "__main__":
print("Serving ASK")
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--serve','-s', action='store_true', default=False)
args = parser.parse_args()
if args.serve:
###
# This will only be run if you try to run the server in local mode
##
print('Serving ASK functionality locally.')
import flask
server = flask.Flask(__name__)
@server.route('/')
def alexa_skills_kit_requests():
request_obj = flask.request.get_json()
return lambda_handler(request_obj)
server.run()
| mit | -3,983,422,360,200,674,300 | 39.577181 | 244 | 0.716465 | false |
s20121035/rk3288_android5.1_repo | external/lldb/scripts/verify_api.py | 2 | 3969 | #!/usr/bin/env python
import commands
import optparse
import os
import os.path
import re
import sys
def extract_exe_symbol_names (arch, exe_path, match_str):
command = 'dsymutil --arch %s -s "%s" | grep "%s" | colrm 1 69' % (arch, exe_path, match_str)
(command_exit_status, command_output) = commands.getstatusoutput(command)
if command_exit_status == 0:
if command_output:
return command_output[0:-1].split("'\n")
else:
print 'error: command returned no output'
else:
print 'error: command failed with exit status %i\n command: %s' % (command_exit_status, command)
return list()
def verify_api(all_args):
'''Verify the API in the specified library is valid given one or more binaries.'''
usage = "usage: verify_api --library <path> [ --library <path> ...] executable1 [executable2 ...]"
description='''Verify the API in the specified library is valid given one or more binaries.
Example:
verify_api.py --library ~/Documents/src/lldb/build/Debug/LLDB.framework/LLDB --arch x86_64 /Applications/Xcode.app/Contents/PlugIns/DebuggerLLDB.ideplugin/Contents/MacOS/DebuggerLLDB --api-regex lldb
'''
parser = optparse.OptionParser(description=description, prog='verify_api',usage=usage)
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='display verbose debug info', default=False)
parser.add_option('-a', '--arch', type='string', action='append', dest='archs', help='architecure to use when checking the api')
parser.add_option('-r', '--api-regex', type='string', dest='api_regex_str', help='Exclude any undefined symbols that do not match this regular expression when searching for missing APIs.')
parser.add_option('-l', '--library', type='string', action='append', dest='libraries', help='Specify one or more libraries that will contain all needed APIs for the executables.')
(options, args) = parser.parse_args(all_args)
api_external_symbols = list()
if options.archs:
for arch in options.archs:
for library in options.libraries:
external_symbols = extract_exe_symbol_names(arch, library, "( SECT EXT)");
if external_symbols:
for external_symbol in external_symbols:
api_external_symbols.append(external_symbol)
else:
sys.exit(1)
else:
print 'error: must specify one or more architectures with the --arch option'
sys.exit(4)
if options.verbose:
print "API symbols:"
for (i, external_symbol) in enumerate(api_external_symbols):
print "[%u] %s" % (i, external_symbol)
api_regex = None
if options.api_regex_str:
api_regex = re.compile(options.api_regex_str)
for arch in options.archs:
for exe_path in args:
print 'Verifying (%s) "%s"...' % (arch, exe_path)
exe_errors = 0
undefined_symbols = extract_exe_symbol_names(arch, exe_path, "( UNDF EXT)");
for undefined_symbol in undefined_symbols:
if api_regex:
match = api_regex.search(undefined_symbol)
if not match:
if options.verbose:
print 'ignoring symbol: %s' % (undefined_symbol)
continue
if undefined_symbol in api_external_symbols:
if options.verbose:
print 'verified symbol: %s' % (undefined_symbol)
else:
print 'missing symbol: %s' % (undefined_symbol)
exe_errors += 1
if exe_errors:
print 'error: missing %u API symbols from %s' % (exe_errors, options.libraries)
else:
print 'success'
if __name__ == '__main__':
verify_api(sys.argv[1:]) | gpl-3.0 | 8,913,025,677,703,340,000 | 46.261905 | 207 | 0.596372 | false |
yaoyi92/qbox_libxc | util/qbox_dos.py | 1 | 2774 | #!/usr/bin/python
# qbox_dos.py: extract electronic DOS from Qbox output
# generate DOS plot in gnuplot format
# use: qbox_dos.py [-last] emin emax width file.r
# emin, emax: bounds of plot in [eV]
# width: gaussian broadening in [eV]
# the DOS is accumulated separately for each spin
# With the -last option, only the last <eigenset> is used to compute the DOS
import xml.sax
import sys
import math
if (len(sys.argv) != 5) and (len(sys.argv) != 6) :
print "use: ",sys.argv[0]," [-last] emin emax width file.r"
sys.exit()
iarg = 1
lastonly = False
if (sys.argv[iarg] == "-last") :
lastonly = True
iarg += 1
emin = float(sys.argv[iarg])
iarg += 1
emax = float(sys.argv[iarg])
iarg += 1
width = float(sys.argv[iarg])
iarg += 1
infile = sys.argv[iarg]
ndos = 501
de = (emax - emin)/(ndos-1)
# normalized gaussian distribution in one dimension
# f(x) = 1/(sqrt(pi)*width) * exp(-(x/width)^2 )
def gauss(x, width):
return (1.0/(math.sqrt(math.pi)*width)) * math.exp(-(x/width)**2)
# Qbox output handler to extract and process data
class QboxOutputHandler(xml.sax.handler.ContentHandler):
def __init__(self):
self.nspin = 1
self.readData = 0
self.dos_up = [0] * ndos
self.dos_dn = [0] * ndos
def startElement(self, name, attributes):
if (name == "eigenset") and (lastonly):
self.dos_up = [0] * ndos
self.dos_dn = [0] * ndos
if name == "eigenvalues":
self.n = attributes["n"]
self.spin = int(attributes["spin"])
self.kpoint = attributes["kpoint"]
self.weight = float(attributes["weight"])
self.readData = 1
self.buffer = ""
if self.spin == 1:
self.nspin = 2
def characters(self, data):
if self.readData:
self.buffer += data
def endElement(self, name):
if name == "eigenvalues":
self.readData = 0
self.accumulate_dos()
def accumulate_dos(self):
self.e = self.buffer.split()
if self.spin == 0:
for i in range(len(self.e)):
for j in range(ndos):
ej = emin + j * de
self.dos_up[j] += gauss(float(self.e[i])-ej, width ) * self.weight
if self.spin == 1:
for i in range(len(self.e)):
for j in range(ndos):
ej = emin + j * de
self.dos_dn[j] += gauss(float(self.e[i])-ej, width ) * self.weight
def print_dos(self):
print "# ",infile," spin=0 width=",width
for j in range(ndos):
ej = emin + j * de
print ej, self.dos_up[j]
if self.nspin == 2:
print
print
print "# ",infile," spin=1 width=",width
for j in range(ndos):
ej = emin + j * de
print ej, self.dos_dn[j]
parser = xml.sax.make_parser()
handler = QboxOutputHandler()
parser.setContentHandler(handler)
parser.parse(infile)
handler.print_dos()
| gpl-2.0 | -3,526,923,138,324,216,000 | 26.465347 | 76 | 0.607787 | false |
googleapis/python-bigtable | google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py | 1 | 40655 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.bigtable_admin_v2.types import bigtable_table_admin
from google.cloud.bigtable_admin_v2.types import table
from google.cloud.bigtable_admin_v2.types import table as gba_table
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO
class BigtableTableAdminGrpcTransport(BigtableTableAdminTransport):
"""gRPC backend transport for BigtableTableAdmin.
Service for creating, configuring, and deleting Cloud
Bigtable tables.
Provides access to the table schemas only, not the data stored
within the tables.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "bigtableadmin.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "bigtableadmin.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_table(
self,
) -> Callable[[bigtable_table_admin.CreateTableRequest], gba_table.Table]:
r"""Return a callable for the create table method over gRPC.
Creates a new table in the specified instance.
The table can be created with a full set of initial
column families, specified in the request.
Returns:
Callable[[~.CreateTableRequest],
~.Table]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_table" not in self._stubs:
self._stubs["create_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable",
request_serializer=bigtable_table_admin.CreateTableRequest.serialize,
response_deserializer=gba_table.Table.deserialize,
)
return self._stubs["create_table"]
@property
def create_table_from_snapshot(
self,
) -> Callable[
[bigtable_table_admin.CreateTableFromSnapshotRequest], operations_pb2.Operation
]:
r"""Return a callable for the create table from snapshot method over gRPC.
Creates a new table from the specified snapshot. The
target table must not exist. The snapshot and the table
must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.CreateTableFromSnapshotRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_table_from_snapshot" not in self._stubs:
self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot",
request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_table_from_snapshot"]
@property
def list_tables(
self,
) -> Callable[
[bigtable_table_admin.ListTablesRequest],
bigtable_table_admin.ListTablesResponse,
]:
r"""Return a callable for the list tables method over gRPC.
Lists all tables served from a specified instance.
Returns:
Callable[[~.ListTablesRequest],
~.ListTablesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_tables" not in self._stubs:
self._stubs["list_tables"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListTables",
request_serializer=bigtable_table_admin.ListTablesRequest.serialize,
response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize,
)
return self._stubs["list_tables"]
@property
def get_table(
self,
) -> Callable[[bigtable_table_admin.GetTableRequest], table.Table]:
r"""Return a callable for the get table method over gRPC.
Gets metadata information about the specified table.
Returns:
Callable[[~.GetTableRequest],
~.Table]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_table" not in self._stubs:
self._stubs["get_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetTable",
request_serializer=bigtable_table_admin.GetTableRequest.serialize,
response_deserializer=table.Table.deserialize,
)
return self._stubs["get_table"]
@property
def delete_table(
self,
) -> Callable[[bigtable_table_admin.DeleteTableRequest], empty_pb2.Empty]:
r"""Return a callable for the delete table method over gRPC.
Permanently deletes a specified table and all of its
data.
Returns:
Callable[[~.DeleteTableRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_table" not in self._stubs:
self._stubs["delete_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable",
request_serializer=bigtable_table_admin.DeleteTableRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_table"]
@property
def modify_column_families(
self,
) -> Callable[[bigtable_table_admin.ModifyColumnFamiliesRequest], table.Table]:
r"""Return a callable for the modify column families method over gRPC.
Performs a series of column family modifications on
the specified table. Either all or none of the
modifications will occur before this method returns, but
data requests received prior to that point may see a
table where only some modifications have taken effect.
Returns:
Callable[[~.ModifyColumnFamiliesRequest],
~.Table]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "modify_column_families" not in self._stubs:
self._stubs["modify_column_families"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies",
request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize,
response_deserializer=table.Table.deserialize,
)
return self._stubs["modify_column_families"]
@property
def drop_row_range(
self,
) -> Callable[[bigtable_table_admin.DropRowRangeRequest], empty_pb2.Empty]:
r"""Return a callable for the drop row range method over gRPC.
Permanently drop/delete a row range from a specified
table. The request can specify whether to delete all
rows in a table, or only those that match a particular
prefix.
Returns:
Callable[[~.DropRowRangeRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "drop_row_range" not in self._stubs:
self._stubs["drop_row_range"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange",
request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["drop_row_range"]
@property
def generate_consistency_token(
self,
) -> Callable[
[bigtable_table_admin.GenerateConsistencyTokenRequest],
bigtable_table_admin.GenerateConsistencyTokenResponse,
]:
r"""Return a callable for the generate consistency token method over gRPC.
Generates a consistency token for a Table, which can
be used in CheckConsistency to check whether mutations
to the table that finished before this call started have
been replicated. The tokens will be available for 90
days.
Returns:
Callable[[~.GenerateConsistencyTokenRequest],
~.GenerateConsistencyTokenResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "generate_consistency_token" not in self._stubs:
self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken",
request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize,
response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize,
)
return self._stubs["generate_consistency_token"]
@property
def check_consistency(
self,
) -> Callable[
[bigtable_table_admin.CheckConsistencyRequest],
bigtable_table_admin.CheckConsistencyResponse,
]:
r"""Return a callable for the check consistency method over gRPC.
Checks replication consistency based on a consistency
token, that is, if replication has caught up based on
the conditions specified in the token and the check
request.
Returns:
Callable[[~.CheckConsistencyRequest],
~.CheckConsistencyResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "check_consistency" not in self._stubs:
self._stubs["check_consistency"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency",
request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize,
response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize,
)
return self._stubs["check_consistency"]
@property
def snapshot_table(
self,
) -> Callable[
[bigtable_table_admin.SnapshotTableRequest], operations_pb2.Operation
]:
r"""Return a callable for the snapshot table method over gRPC.
Creates a new snapshot in the specified cluster from
the specified source table. The cluster and the table
must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.SnapshotTableRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "snapshot_table" not in self._stubs:
self._stubs["snapshot_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable",
request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["snapshot_table"]
@property
def get_snapshot(
self,
) -> Callable[[bigtable_table_admin.GetSnapshotRequest], table.Snapshot]:
r"""Return a callable for the get snapshot method over gRPC.
Gets metadata information about the specified
snapshot.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.GetSnapshotRequest],
~.Snapshot]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_snapshot" not in self._stubs:
self._stubs["get_snapshot"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot",
request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize,
response_deserializer=table.Snapshot.deserialize,
)
return self._stubs["get_snapshot"]
@property
def list_snapshots(
self,
) -> Callable[
[bigtable_table_admin.ListSnapshotsRequest],
bigtable_table_admin.ListSnapshotsResponse,
]:
r"""Return a callable for the list snapshots method over gRPC.
Lists all snapshots associated with the specified
cluster.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.ListSnapshotsRequest],
~.ListSnapshotsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_snapshots" not in self._stubs:
self._stubs["list_snapshots"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots",
request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize,
response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize,
)
return self._stubs["list_snapshots"]
@property
def delete_snapshot(
self,
) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], empty_pb2.Empty]:
r"""Return a callable for the delete snapshot method over gRPC.
Permanently deletes the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable
snapshots. This feature is not currently available to
most Cloud Bigtable customers. This feature might be
changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any
SLA or deprecation policy.
Returns:
Callable[[~.DeleteSnapshotRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_snapshot" not in self._stubs:
self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot",
request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_snapshot"]
@property
def create_backup(
self,
) -> Callable[[bigtable_table_admin.CreateBackupRequest], operations_pb2.Operation]:
r"""Return a callable for the create backup method over gRPC.
Starts creating a new Cloud Bigtable Backup. The returned backup
[long-running operation][google.longrunning.Operation] can be
used to track creation of the backup. The
[metadata][google.longrunning.Operation.metadata] field type is
[CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata].
The [response][google.longrunning.Operation.response] field type
is [Backup][google.bigtable.admin.v2.Backup], if successful.
Cancelling the returned operation will stop the creation and
delete the backup.
Returns:
Callable[[~.CreateBackupRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_backup" not in self._stubs:
self._stubs["create_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup",
request_serializer=bigtable_table_admin.CreateBackupRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_backup"]
@property
def get_backup(
self,
) -> Callable[[bigtable_table_admin.GetBackupRequest], table.Backup]:
r"""Return a callable for the get backup method over gRPC.
Gets metadata on a pending or completed Cloud
Bigtable Backup.
Returns:
Callable[[~.GetBackupRequest],
~.Backup]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_backup" not in self._stubs:
self._stubs["get_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup",
request_serializer=bigtable_table_admin.GetBackupRequest.serialize,
response_deserializer=table.Backup.deserialize,
)
return self._stubs["get_backup"]
@property
def update_backup(
self,
) -> Callable[[bigtable_table_admin.UpdateBackupRequest], table.Backup]:
r"""Return a callable for the update backup method over gRPC.
Updates a pending or completed Cloud Bigtable Backup.
Returns:
Callable[[~.UpdateBackupRequest],
~.Backup]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_backup" not in self._stubs:
self._stubs["update_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup",
request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize,
response_deserializer=table.Backup.deserialize,
)
return self._stubs["update_backup"]
@property
def delete_backup(
self,
) -> Callable[[bigtable_table_admin.DeleteBackupRequest], empty_pb2.Empty]:
r"""Return a callable for the delete backup method over gRPC.
Deletes a pending or completed Cloud Bigtable backup.
Returns:
Callable[[~.DeleteBackupRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_backup" not in self._stubs:
self._stubs["delete_backup"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup",
request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_backup"]
@property
def list_backups(
self,
) -> Callable[
[bigtable_table_admin.ListBackupsRequest],
bigtable_table_admin.ListBackupsResponse,
]:
r"""Return a callable for the list backups method over gRPC.
Lists Cloud Bigtable backups. Returns both completed
and pending backups.
Returns:
Callable[[~.ListBackupsRequest],
~.ListBackupsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_backups" not in self._stubs:
self._stubs["list_backups"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups",
request_serializer=bigtable_table_admin.ListBackupsRequest.serialize,
response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize,
)
return self._stubs["list_backups"]
@property
def restore_table(
self,
) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations_pb2.Operation]:
r"""Return a callable for the restore table method over gRPC.
Create a new table by restoring from a completed backup. The new
table must be in the same project as the instance containing the
backup. The returned table [long-running
operation][google.longrunning.Operation] can be used to track
the progress of the operation, and to cancel it. The
[metadata][google.longrunning.Operation.metadata] field type is
[RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata].
The [response][google.longrunning.Operation.response] type is
[Table][google.bigtable.admin.v2.Table], if successful.
Returns:
Callable[[~.RestoreTableRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "restore_table" not in self._stubs:
self._stubs["restore_table"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable",
request_serializer=bigtable_table_admin.RestoreTableRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["restore_table"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy for a Table or Backup
resource. Returns an empty policy if the resource exists
but does not have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on a Table or Backup
resource. Replaces any existing policy.
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
iam_policy_pb2.TestIamPermissionsResponse,
]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns permissions that the caller has on the
specified Table or Backup resource.
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
__all__ = ("BigtableTableAdminGrpcTransport",)
| apache-2.0 | 5,847,444,254,912,788,000 | 42.998918 | 104 | 0.628976 | false |
raeeschachar/edx-e2e-mirror | regression/tests/studio/test_uploaded_files.py | 1 | 4722 | """
Test uploaded files.
"""
import os
from shutil import copyfile
from regression.tests.studio.studio_base_test import StudioBaseTestClass
from regression.pages.studio.utils import upload_new_file
from regression.pages.studio.login_studio import StudioLogin
from regression.tests.helpers import LoginHelper, get_course_info
from regression.pages.studio.asset_index_studio import AssetIndexPageExtended
from regression.pages import UPLOAD_FILE_DIR
class UploadedFileTest(StudioBaseTestClass):
"""
Test uploaded files.
"""
def setUp(self):
super(UploadedFileTest, self).setUp()
self.login_page = StudioLogin(self.browser)
LoginHelper.login(self.login_page)
self.course_info = get_course_info()
self.asset_page = AssetIndexPageExtended(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.asset_page.visit()
self.file_names = [
'README.rst',
'test_pdf.pdf'
]
def upload_files(self):
"""
Upload files
"""
# Open file upload prompt.
self.asset_page.open_upload_file_prompt()
# Upload the files.
upload_new_file(self.asset_page, self.file_names)
# Assert that files has been uploaded.
self.assertEqual(self.file_names, self.asset_page.get_file_names())
def test_lock_files(self):
"""
Scenario: Lock the files
Given that I am on the 'Files & uploads" section of the course.
And I lock a file.
Then file should get locked.
"""
self.upload_files()
index_of_lock_file = 0
# Lock the asset present at index passed.
self.asset_page.lock_asset(index_of_lock_file)
locked_file_elements = self.asset_page.q(
css='.assets-table tbody tr .actions-col .lock-checkbox')
# Assert that file has been locked.
self.assertTrue(
locked_file_elements.attrs('checked')[index_of_lock_file])
def test_sort_files(self):
"""
Scenario: Sort the files
Given that I am on the 'Files & uploads" section of the course.
And I sort the files.
Then I should see files in sorted order.
"""
self.upload_files()
initial_order = self.asset_page.get_file_names()
# Sort the assets
self.asset_page.sort_assets()
initial_order.sort()
# Assert that assets has been sorted.
self.assertEqual(initial_order, self.asset_page.get_file_names())
def test_delete_files(self):
"""
Scenario: Delete the file
Given that I am on the 'Files & uploads" section of the course.
And I delete a file.
Then file should be deleted and no longer available.
"""
self.upload_files()
file_names = self.asset_page.get_file_names()
for name in file_names:
self.asset_page.click_delete_file()
# Assert files have been deleted.
self.assertNotIn(name, self.asset_page.get_file_names())
class UploadedFilePaginationTest(StudioBaseTestClass):
"""
Test uploaded files.
"""
def setUp(self):
super(UploadedFilePaginationTest, self).setUp()
self.login_page = StudioLogin(self.browser)
LoginHelper.login(self.login_page)
self.course_info = get_course_info()
self.asset_page = AssetIndexPageExtended(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.asset_page.visit()
def test_pagination(self):
"""
Verifies that user can successfully navigate between multiple pages
"""
file_name = '{}.png'
file_names = ['1.png']
for num in range(2, 52):
file_names.append(file_name.format(num))
copyfile(
UPLOAD_FILE_DIR + '/' + '1.png',
UPLOAD_FILE_DIR + '/' + file_name.format(num)
)
# Open file upload prompt.
self.asset_page.open_upload_file_prompt()
# Upload the files.
upload_new_file(self.asset_page, file_names)
# Assert that pages are now 2 in total.
self.assertEqual('1', self.asset_page.get_page_count())
self.asset_page.click_next_page_link()
# Assert that pages are now 2 in total.
self.assertEqual('2', self.asset_page.get_page_count())
file_names.pop(0)
# Remove files from directory
for file_name in file_names:
os.remove(UPLOAD_FILE_DIR + '/' + file_name)
| agpl-3.0 | 5,461,955,600,145,901,000 | 32.971223 | 77 | 0.603134 | false |
googleapis/proto-breaking-change-detector | test/comparator/wrappers/test_service.py | 1 | 3415 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test.tools.mock_descriptors import (
make_method,
make_message,
make_field,
make_service,
)
from google.protobuf import descriptor_pb2
class ServiceTest(unittest.TestCase):
def test_service_properties(self):
service = make_service(name="ThingDoer")
self.assertEqual(service.name, "ThingDoer")
self.assertEqual(service.proto_file_name, "foo")
self.assertEqual(service.path, ())
self.assertFalse(service.api_version)
self.assertEqual(
service.source_code_line,
-1,
)
def test_service_api_version(self):
service = make_service(api_version="v1alpha")
self.assertEqual(service.api_version, "v1alpha")
def test_service_host(self):
service = make_service(host="thingdoer.googleapis.com")
self.assertEqual(service.host.value, "thingdoer.googleapis.com")
def test_service_no_host(self):
service = make_service()
self.assertFalse(service.host)
def test_service_scopes(self):
service = make_service(scopes=("https://foo/user/", "https://foo/admin/"))
oauth_scopes = [scope.value for scope in service.oauth_scopes]
self.assertIn("https://foo/user/", oauth_scopes)
self.assertIn("https://foo/admin/", oauth_scopes)
def test_service_no_scopes(self):
service = make_service()
self.assertEqual(len(service.oauth_scopes), 0)
def test_service_methods(self):
input_message = make_message("InputRequest")
output_message = make_message("OutputResponse")
service = make_service(
name="ThingDoer",
methods=(
make_method(
name="DoThing",
input_message=input_message,
output_message=output_message,
),
make_method(
name="Jump",
input_message=input_message,
output_message=output_message,
),
make_method(
name="Yawn",
input_message=input_message,
output_message=output_message,
),
),
)
expected_names = ["DoThing", "Jump", "Yawn"]
self.assertEqual(list(service.methods.keys()), expected_names)
def test_source_code_line(self):
L = descriptor_pb2.SourceCodeInfo.Location
locations = [
L(path=(4, 0, 2, 1), span=(1, 2, 3, 4)),
]
service = make_service(
proto_file_name="test.proto",
locations=locations,
path=(4, 0, 2, 1),
)
self.assertEqual(service.source_code_line, 2)
self.assertEqual(service.proto_file_name, "test.proto")
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -3,387,512,627,948,391,400 | 33.494949 | 82 | 0.59795 | false |
pczerkas/tempest | tempest/cmd/javelin.py | 1 | 40208 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Javelin is a tool for creating, verifying, and deleting a small set of
resources in a declarative way.
Javelin is meant to be used as a way to validate quickly that resources can
survive an upgrade process.
Authentication
--------------
Javelin will be creating (and removing) users and tenants so it needs the admin
credentials of your cloud to operate properly. The corresponding info can be
given the usual way, either through CLI options or environment variables.
You're probably familiar with these, but just in case::
+----------+------------------+----------------------+
| Param | CLI | Environment Variable |
+----------+------------------+----------------------+
| Username | --os-username | OS_USERNAME |
| Password | --os-password | OS_PASSWORD |
| Tenant | --os-tenant-name | OS_TENANT_NAME |
+----------+------------------+----------------------+
Runtime Arguments
-----------------
**-m/--mode**: (Required) Has to be one of 'check', 'create' or 'destroy'. It
indicates which actions javelin is going to perform.
**-r/--resources**: (Required) The path to a YAML file describing the resources
used by Javelin.
**-d/--devstack-base**: (Required) The path to the devstack repo used to
retrieve artefacts (like images) that will be referenced in the resource files.
**-c/--config-file**: (Optional) The path to a valid Tempest config file
describing your cloud. Javelin may use this to determine if certain services
are enabled and modify its behavior accordingly.
Resource file
-------------
The resource file is a valid YAML file describing the resources that will be
created, checked and destroyed by javelin. Here's a canonical example of a
resource file::
tenants:
- javelin
- discuss
users:
- name: javelin
pass: gungnir
tenant: javelin
- name: javelin2
pass: gungnir2
tenant: discuss
# resources that we want to create
images:
- name: javelin_cirros
owner: javelin
file: cirros-0.3.2-x86_64-blank.img
disk_format: ami
container_format: ami
aki: cirros-0.3.2-x86_64-vmlinuz
ari: cirros-0.3.2-x86_64-initrd
servers:
- name: peltast
owner: javelin
flavor: m1.small
image: javelin_cirros
floating_ip_pool: public
- name: hoplite
owner: javelin
flavor: m1.medium
image: javelin_cirros
An important piece of the resource definition is the *owner* field, which is
the user (that we've created) that is the owner of that resource. All
operations on that resource will happen as that regular user to ensure that
admin level access does not mask issues.
The check phase will act like a unit test, using well known assert methods to
verify that the correct resources exist.
"""
import argparse
import collections
import datetime
import os
import sys
import unittest
import netaddr
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from tempest_lib import auth
from tempest_lib import exceptions as lib_exc
import yaml
from tempest.common import waiters
from tempest import config
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import floating_ips_client
from tempest.services.compute.json import security_group_rules_client
from tempest.services.compute.json import security_groups_client
from tempest.services.compute.json import servers_client
from tempest.services.identity.v2.json import identity_client
from tempest.services.image.v2.json import image_client
from tempest.services.network.json import network_client
from tempest.services.object_storage import container_client
from tempest.services.object_storage import object_client
from tempest.services.telemetry.json import telemetry_client
from tempest.services.volume.json import volumes_client
CONF = config.CONF
OPTS = {}
USERS = {}
RES = collections.defaultdict(list)
LOG = None
JAVELIN_START = datetime.datetime.utcnow()
class OSClient(object):
_creds = None
identity = None
servers = None
def __init__(self, user, pw, tenant):
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
compute_params = {
'service': CONF.compute.catalog_type,
'region': CONF.compute.region or CONF.identity.region,
'endpoint_type': CONF.compute.endpoint_type,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
compute_params.update(default_params)
object_storage_params = {
'service': CONF.object_storage.catalog_type,
'region': CONF.object_storage.region or CONF.identity.region,
'endpoint_type': CONF.object_storage.endpoint_type
}
object_storage_params.update(default_params)
_creds = auth.KeystoneV2Credentials(
username=user,
password=pw,
tenant_name=tenant)
auth_provider_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
_auth = auth.KeystoneV2AuthProvider(
_creds, CONF.identity.uri, **auth_provider_params)
self.identity = identity_client.IdentityClient(
_auth,
CONF.identity.catalog_type,
CONF.identity.region,
endpoint_type='adminURL',
**default_params_with_timeout_values)
self.servers = servers_client.ServersClient(_auth,
**compute_params)
self.flavors = flavors_client.FlavorsClient(_auth,
**compute_params)
self.floating_ips = floating_ips_client.FloatingIPsClient(
_auth, **compute_params)
self.secgroups = security_groups_client.SecurityGroupsClient(
_auth, **compute_params)
self.secrules = security_group_rules_client.SecurityGroupRulesClient(
_auth, **compute_params)
self.objects = object_client.ObjectClient(_auth,
**object_storage_params)
self.containers = container_client.ContainerClient(
_auth, **object_storage_params)
self.images = image_client.ImageClientV2(
_auth,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**default_params)
self.telemetry = telemetry_client.TelemetryClient(
_auth,
CONF.telemetry.catalog_type,
CONF.identity.region,
endpoint_type=CONF.telemetry.endpoint_type,
**default_params_with_timeout_values)
self.volumes = volumes_client.VolumesClient(
_auth,
CONF.volume.catalog_type,
CONF.volume.region or CONF.identity.region,
endpoint_type=CONF.volume.endpoint_type,
build_interval=CONF.volume.build_interval,
build_timeout=CONF.volume.build_timeout,
**default_params)
self.networks = network_client.NetworkClient(
_auth,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**default_params)
def load_resources(fname):
"""Load the expected resources from a yaml file."""
return yaml.load(open(fname, 'r'))
def keystone_admin():
return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name)
def client_for_user(name):
LOG.debug("Entering client_for_user")
if name in USERS:
user = USERS[name]
LOG.debug("Created client for user %s" % user)
return OSClient(user['name'], user['pass'], user['tenant'])
else:
LOG.error("%s not found in USERS: %s" % (name, USERS))
###################
#
# TENANTS
#
###################
def create_tenants(tenants):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
admin = keystone_admin()
body = admin.identity.list_tenants()['tenants']
existing = [x['name'] for x in body]
for tenant in tenants:
if tenant not in existing:
admin.identity.create_tenant(tenant)
else:
LOG.warn("Tenant '%s' already exists in this environment" % tenant)
def destroy_tenants(tenants):
admin = keystone_admin()
for tenant in tenants:
tenant_id = admin.identity.get_tenant_by_name(tenant)['id']
admin.identity.delete_tenant(tenant_id)
##############
#
# USERS
#
##############
def _users_for_tenant(users, tenant):
u_for_t = []
for user in users:
for n in user:
if user[n]['tenant'] == tenant:
u_for_t.append(user[n])
return u_for_t
def _tenants_from_users(users):
tenants = set()
for user in users:
for n in user:
tenants.add(user[n]['tenant'])
return tenants
def _assign_swift_role(user, swift_role):
admin = keystone_admin()
roles = admin.identity.list_roles()
role = next(r for r in roles if r['name'] == swift_role)
LOG.debug(USERS[user])
try:
admin.identity.assign_user_role(
USERS[user]['tenant_id'],
USERS[user]['id'],
role['id'])
except lib_exc.Conflict:
# don't care if it's already assigned
pass
def create_users(users):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
global USERS
LOG.info("Creating users")
admin = keystone_admin()
for u in users:
try:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
except lib_exc.NotFound:
LOG.error("Tenant: %s - not found" % u['tenant'])
continue
try:
admin.identity.get_user_by_username(tenant['id'], u['name'])
LOG.warn("User '%s' already exists in this environment"
% u['name'])
except lib_exc.NotFound:
admin.identity.create_user(
u['name'], u['pass'], tenant['id'],
"%s@%s" % (u['name'], tenant['id']),
enabled=True)
def destroy_users(users):
admin = keystone_admin()
for user in users:
tenant_id = admin.identity.get_tenant_by_name(user['tenant'])['id']
user_id = admin.identity.get_user_by_username(tenant_id,
user['name'])['id']
admin.identity.delete_user(user_id)
def collect_users(users):
global USERS
LOG.info("Collecting users")
admin = keystone_admin()
for u in users:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
u['tenant_id'] = tenant['id']
USERS[u['name']] = u
body = admin.identity.get_user_by_username(tenant['id'], u['name'])
USERS[u['name']]['id'] = body['id']
class JavelinCheck(unittest.TestCase):
def __init__(self, users, resources):
super(JavelinCheck, self).__init__()
self.users = users
self.res = resources
def runTest(self, *args):
pass
def _ping_ip(self, ip_addr, count, namespace=None):
if namespace is None:
ping_cmd = "ping -c1 " + ip_addr
else:
ping_cmd = "sudo ip netns exec %s ping -c1 %s" % (namespace,
ip_addr)
for current in range(count):
return_code = os.system(ping_cmd)
if return_code is 0:
break
self.assertNotEqual(current, count - 1,
"Server is not pingable at %s" % ip_addr)
def check(self):
self.check_users()
self.check_objects()
self.check_servers()
self.check_volumes()
self.check_telemetry()
self.check_secgroups()
# validate neutron is enabled and ironic disabled:
# Tenant network isolation is not supported when using ironic.
# "admin" has set up a neutron flat network environment within a shared
# fixed network for all tenants to use.
# In this case, network/subnet/router creation can be skipped and the
# server booted the same as nova network.
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled):
self.check_networking()
def check_users(self):
"""Check that the users we expect to exist, do.
We don't use the resource list for this because we need to validate
that things like tenantId didn't drift across versions.
"""
LOG.info("checking users")
for name, user in six.iteritems(self.users):
client = keystone_admin()
found = client.identity.get_user(user['id'])
self.assertEqual(found['name'], user['name'])
self.assertEqual(found['tenantId'], user['tenant_id'])
# also ensure we can auth with that user, and do something
# on the cloud. We don't care about the results except that it
# remains authorized.
client = client_for_user(user['name'])
client.servers.list_servers()
def check_objects(self):
"""Check that the objects created are still there."""
if not self.res.get('objects'):
return
LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
r, contents = client.objects.get_object(
obj['container'], obj['name'])
source = _file_contents(obj['file'])
self.assertEqual(contents, source)
def check_servers(self):
"""Check that the servers are still up and running."""
if not self.res.get('servers'):
return
LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
found = _get_server_by_name(client, server['name'])
self.assertIsNotNone(
found,
"Couldn't find expected server %s" % server['name'])
found = client.servers.show_server(found['id'])
# validate neutron is enabled and ironic disabled:
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled):
_floating_is_alive = False
for network_name, body in found['addresses'].items():
for addr in body:
ip = addr['addr']
# If floatingip_for_ssh is at True, it's assumed
# you want to use the floating IP to reach the server,
# fallback to fixed IP, then other type.
# This is useful in multi-node environment.
if CONF.compute.use_floatingip_for_ssh:
if addr.get('OS-EXT-IPS:type',
'floating') == 'floating':
self._ping_ip(ip, 60)
_floating_is_alive = True
elif addr.get('OS-EXT-IPS:type', 'fixed') == 'fixed':
namespace = _get_router_namespace(client,
network_name)
self._ping_ip(ip, 60, namespace)
else:
self._ping_ip(ip, 60)
# if floatingip_for_ssh is at True, validate found a
# floating IP and ping worked.
if CONF.compute.use_floatingip_for_ssh:
self.assertTrue(_floating_is_alive,
"Server %s has no floating IP." %
server['name'])
else:
addr = found['addresses']['private'][0]['addr']
self._ping_ip(addr, 60)
def check_secgroups(self):
"""Check that the security groups still exist."""
LOG.info("Checking security groups")
for secgroup in self.res['secgroups']:
client = client_for_user(secgroup['owner'])
found = _get_resource_by_name(client.secgroups, 'security_groups',
secgroup['name'])
self.assertIsNotNone(
found,
"Couldn't find expected secgroup %s" % secgroup['name'])
def check_telemetry(self):
"""Check that ceilometer provides a sane sample.
Confirm that there is more than one sample and that they have the
expected metadata.
If in check mode confirm that the oldest sample available is from
before the upgrade.
"""
if not self.res.get('telemetry'):
return
LOG.info("checking telemetry")
for server in self.res['servers']:
client = client_for_user(server['owner'])
body = client.telemetry.list_samples(
'instance',
query=('metadata.display_name', 'eq', server['name'])
)
self.assertTrue(len(body) >= 1, 'expecting at least one sample')
self._confirm_telemetry_sample(server, body[-1])
def check_volumes(self):
"""Check that the volumes are still there and attached."""
if not self.res.get('volumes'):
return
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
vol_body = _get_volume_by_name(client, volume['name'])
self.assertIsNotNone(
vol_body,
"Couldn't find expected volume %s" % volume['name'])
# Verify that a volume's attachment retrieved
server_id = _get_server_by_name(client, volume['server'])['id']
attachment = client.volumes.get_attachment_from_volume(vol_body)
self.assertEqual(vol_body['id'], attachment['volume_id'])
self.assertEqual(server_id, attachment['server_id'])
def _confirm_telemetry_sample(self, server, sample):
"""Check this sample matches the expected resource metadata."""
# Confirm display_name
self.assertEqual(server['name'],
sample['resource_metadata']['display_name'])
# Confirm instance_type of flavor
flavor = sample['resource_metadata'].get(
'flavor.name',
sample['resource_metadata'].get('instance_type')
)
self.assertEqual(server['flavor'], flavor)
# Confirm the oldest sample was created before upgrade.
if OPTS.mode == 'check':
oldest_timestamp = timeutils.normalize_time(
timeutils.parse_isotime(sample['timestamp']))
self.assertTrue(
oldest_timestamp < JAVELIN_START,
'timestamp should come before start of second javelin run'
)
def check_networking(self):
"""Check that the networks are still there."""
for res_type in ('networks', 'subnets', 'routers'):
for res in self.res[res_type]:
client = client_for_user(res['owner'])
found = _get_resource_by_name(client.networks, res_type,
res['name'])
self.assertIsNotNone(
found,
"Couldn't find expected resource %s" % res['name'])
#######################
#
# OBJECTS
#
#######################
def _file_contents(fname):
with open(fname, 'r') as f:
return f.read()
def create_objects(objects):
if not objects:
return
LOG.info("Creating objects")
for obj in objects:
LOG.debug("Object %s" % obj)
swift_role = obj.get('swift_role', 'Member')
_assign_swift_role(obj['owner'], swift_role)
client = client_for_user(obj['owner'])
client.containers.create_container(obj['container'])
client.objects.create_object(
obj['container'], obj['name'],
_file_contents(obj['file']))
def destroy_objects(objects):
for obj in objects:
client = client_for_user(obj['owner'])
r, body = client.objects.delete_object(obj['container'], obj['name'])
if not (200 <= int(r['status']) < 299):
raise ValueError("unable to destroy object: [%s] %s" % (r, body))
#######################
#
# IMAGES
#
#######################
def _resolve_image(image, imgtype):
name = image[imgtype]
fname = os.path.join(OPTS.devstack_base, image['imgdir'], name)
return name, fname
def _get_image_by_name(client, name):
body = client.images.list_images()
for image in body:
if name == image['name']:
return image
return None
def create_images(images):
if not images:
return
LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
# DEPRECATED: 'format' was used for ami images
# Use 'disk_format' and 'container_format' instead
if 'format' in image:
LOG.warning("Deprecated: 'format' is deprecated for images "
"description. Please use 'disk_format' and 'container_"
"format' instead.")
image['disk_format'] = image['format']
image['container_format'] = image['format']
# only upload a new image if the name isn't there
if _get_image_by_name(client, image['name']):
LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
extras = {}
if image['disk_format'] == 'ami':
name, fname = _resolve_image(image, 'aki')
aki = client.images.create_image(
'javelin_' + name, 'aki', 'aki')
client.images.store_image_file(aki.get('id'), open(fname, 'r'))
extras['kernel_id'] = aki.get('id')
name, fname = _resolve_image(image, 'ari')
ari = client.images.create_image(
'javelin_' + name, 'ari', 'ari')
client.images.store_image_file(ari.get('id'), open(fname, 'r'))
extras['ramdisk_id'] = ari.get('id')
_, fname = _resolve_image(image, 'file')
body = client.images.create_image(
image['name'], image['container_format'],
image['disk_format'], **extras)
image_id = body.get('id')
client.images.store_image_file(image_id, open(fname, 'r'))
def destroy_images(images):
if not images:
return
LOG.info("Destroying images")
for image in images:
client = client_for_user(image['owner'])
response = _get_image_by_name(client, image['name'])
if not response:
LOG.info("Image '%s' does not exist" % image['name'])
continue
client.images.delete_image(response['id'])
#######################
#
# NETWORKS
#
#######################
def _get_router_namespace(client, network):
network_id = _get_resource_by_name(client.networks,
'networks', network)['id']
n_body = client.networks.list_routers()
for router in n_body['routers']:
router_id = router['id']
r_body = client.networks.list_router_interfaces(router_id)
for port in r_body['ports']:
if port['network_id'] == network_id:
return "qrouter-%s" % router_id
def _get_resource_by_name(client, resource, name):
get_resources = getattr(client, 'list_%s' % resource)
if get_resources is None:
raise AttributeError("client doesn't have method list_%s" % resource)
# Until all tempest client methods are changed to return only one value,
# we cannot assume they all have the same signature so we need to discard
# the unused response first value it two values are being returned.
body = get_resources()
if type(body) == tuple:
body = body[1]
if isinstance(body, dict):
body = body[resource]
for res in body:
if name == res['name']:
return res
raise ValueError('%s not found in %s resources' % (name, resource))
def create_networks(networks):
LOG.info("Creating networks")
for network in networks:
client = client_for_user(network['owner'])
# only create a network if the name isn't here
body = client.networks.list_networks()
if any(item['name'] == network['name'] for item in body['networks']):
LOG.warning("Duplicated network name: %s" % network['name'])
continue
client.networks.create_network(name=network['name'])
def destroy_networks(networks):
LOG.info("Destroying subnets")
for network in networks:
client = client_for_user(network['owner'])
network_id = _get_resource_by_name(client.networks, 'networks',
network['name'])['id']
client.networks.delete_network(network_id)
def create_subnets(subnets):
LOG.info("Creating subnets")
for subnet in subnets:
client = client_for_user(subnet['owner'])
network = _get_resource_by_name(client.networks, 'networks',
subnet['network'])
ip_version = netaddr.IPNetwork(subnet['range']).version
# ensure we don't overlap with another subnet in the network
try:
client.networks.create_subnet(network_id=network['id'],
cidr=subnet['range'],
name=subnet['name'],
ip_version=ip_version)
except lib_exc.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
def destroy_subnets(subnets):
LOG.info("Destroying subnets")
for subnet in subnets:
client = client_for_user(subnet['owner'])
subnet_id = _get_resource_by_name(client.networks,
'subnets', subnet['name'])['id']
client.networks.delete_subnet(subnet_id)
def create_routers(routers):
LOG.info("Creating routers")
for router in routers:
client = client_for_user(router['owner'])
# only create a router if the name isn't here
body = client.networks.list_routers()
if any(item['name'] == router['name'] for item in body['routers']):
LOG.warning("Duplicated router name: %s" % router['name'])
continue
client.networks.create_router(router['name'])
def destroy_routers(routers):
LOG.info("Destroying routers")
for router in routers:
client = client_for_user(router['owner'])
router_id = _get_resource_by_name(client.networks,
'routers', router['name'])['id']
for subnet in router['subnet']:
subnet_id = _get_resource_by_name(client.networks,
'subnets', subnet)['id']
client.networks.remove_router_interface_with_subnet_id(router_id,
subnet_id)
client.networks.delete_router(router_id)
def add_router_interface(routers):
for router in routers:
client = client_for_user(router['owner'])
router_id = _get_resource_by_name(client.networks,
'routers', router['name'])['id']
for subnet in router['subnet']:
subnet_id = _get_resource_by_name(client.networks,
'subnets', subnet)['id']
# connect routers to their subnets
client.networks.add_router_interface_with_subnet_id(router_id,
subnet_id)
# connect routers to external network if set to "gateway"
if router['gateway']:
if CONF.network.public_network_id:
ext_net = CONF.network.public_network_id
client.networks._update_router(
router_id, set_enable_snat=True,
external_gateway_info={"network_id": ext_net})
else:
raise ValueError('public_network_id is not configured.')
#######################
#
# SERVERS
#
#######################
def _get_server_by_name(client, name):
body = client.servers.list_servers()
for server in body['servers']:
if name == server['name']:
return server
return None
def _get_flavor_by_name(client, name):
body = client.flavors.list_flavors()['flavors']
for flavor in body:
if name == flavor['name']:
return flavor
return None
def create_servers(servers):
if not servers:
return
LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
# validate neutron is enabled and ironic disabled
kwargs = dict()
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled and server.get('networks')):
get_net_id = lambda x: (_get_resource_by_name(
client.networks, 'networks', x)['id'])
kwargs['networks'] = [{'uuid': get_net_id(network)}
for network in server['networks']]
body = client.servers.create_server(
server['name'], image_id, flavor_id, **kwargs)
server_id = body['id']
client.servers.wait_for_server_status(server_id, 'ACTIVE')
# create security group(s) after server spawning
for secgroup in server['secgroups']:
client.servers.add_security_group(server_id, secgroup)
if CONF.compute.use_floatingip_for_ssh:
floating_ip_pool = server.get('floating_ip_pool')
floating_ip = client.floating_ips.create_floating_ip(
pool_name=floating_ip_pool)['floating_ip']
client.floating_ips.associate_floating_ip_to_server(
floating_ip['ip'], server_id)
def destroy_servers(servers):
if not servers:
return
LOG.info("Destroying servers")
for server in servers:
client = client_for_user(server['owner'])
response = _get_server_by_name(client, server['name'])
if not response:
LOG.info("Server '%s' does not exist" % server['name'])
continue
# TODO(EmilienM): disassociate floating IP from server and release it.
client.servers.delete_server(response['id'])
waiters.wait_for_server_termination(client.servers, response['id'],
ignore_error=True)
def create_secgroups(secgroups):
LOG.info("Creating security groups")
for secgroup in secgroups:
client = client_for_user(secgroup['owner'])
# only create a security group if the name isn't here
# i.e. a security group may be used by another server
# only create a router if the name isn't here
body = client.secgroups.list_security_groups()['security_groups']
if any(item['name'] == secgroup['name'] for item in body):
LOG.warning("Security group '%s' already exists" %
secgroup['name'])
continue
body = client.secgroups.create_security_group(
name=secgroup['name'],
description=secgroup['description'])['security_group']
secgroup_id = body['id']
# for each security group, create the rules
for rule in secgroup['rules']:
ip_proto, from_port, to_port, cidr = rule.split()
client.secrules.create_security_group_rule(
parent_group_id=secgroup_id, ip_protocol=ip_proto,
from_port=from_port, to_port=to_port, cidr=cidr)
def destroy_secgroups(secgroups):
LOG.info("Destroying security groups")
for secgroup in secgroups:
client = client_for_user(secgroup['owner'])
sg_id = _get_resource_by_name(client.secgroups,
'security_groups',
secgroup['name'])
# sg rules are deleted automatically
client.secgroups.delete_security_group(sg_id['id'])
#######################
#
# VOLUMES
#
#######################
def _get_volume_by_name(client, name):
body = client.volumes.list_volumes()
for volume in body:
if name == volume['display_name']:
return volume
return None
def create_volumes(volumes):
if not volumes:
return
LOG.info("Creating volumes")
for volume in volumes:
client = client_for_user(volume['owner'])
# only create a volume if the name isn't here
if _get_volume_by_name(client, volume['name']):
LOG.info("volume '%s' already exists" % volume['name'])
continue
size = volume['gb']
v_name = volume['name']
body = client.volumes.create_volume(size=size,
display_name=v_name)
client.volumes.wait_for_volume_status(body['id'], 'available')
def destroy_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
volume_id = _get_volume_by_name(client, volume['name'])['id']
client.volumes.detach_volume(volume_id)
client.volumes.delete_volume(volume_id)
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
server_id = _get_server_by_name(client, volume['server'])['id']
volume_id = _get_volume_by_name(client, volume['name'])['id']
device = volume['device']
client.volumes.attach_volume(volume_id, server_id, device)
#######################
#
# MAIN LOGIC
#
#######################
def create_resources():
LOG.info("Creating Resources")
# first create keystone level resources, and we need to be admin
# for this.
create_tenants(RES['tenants'])
create_users(RES['users'])
collect_users(RES['users'])
# next create resources in a well known order
create_objects(RES['objects'])
create_images(RES['images'])
# validate neutron is enabled and ironic is disabled
if CONF.service_available.neutron and not CONF.baremetal.driver_enabled:
create_networks(RES['networks'])
create_subnets(RES['subnets'])
create_routers(RES['routers'])
add_router_interface(RES['routers'])
create_secgroups(RES['secgroups'])
create_volumes(RES['volumes'])
# Only attempt attaching the volumes if servers are defined in the
# resource file
if 'servers' in RES:
create_servers(RES['servers'])
attach_volumes(RES['volumes'])
def destroy_resources():
LOG.info("Destroying Resources")
# Destroy in inverse order of create
destroy_servers(RES['servers'])
destroy_images(RES['images'])
destroy_objects(RES['objects'])
destroy_volumes(RES['volumes'])
if CONF.service_available.neutron and not CONF.baremetal.driver_enabled:
destroy_routers(RES['routers'])
destroy_subnets(RES['subnets'])
destroy_networks(RES['networks'])
destroy_secgroups(RES['secgroups'])
destroy_users(RES['users'])
destroy_tenants(RES['tenants'])
LOG.warn("Destroy mode incomplete")
def get_options():
global OPTS
parser = argparse.ArgumentParser(
description='Create and validate a fixed set of OpenStack resources')
parser.add_argument('-m', '--mode',
metavar='<create|check|destroy>',
required=True,
help=('One of (create, check, destroy)'))
parser.add_argument('-r', '--resources',
required=True,
metavar='resourcefile.yaml',
help='Resources definition yaml file')
parser.add_argument(
'-d', '--devstack-base',
required=True,
metavar='/opt/stack/old',
help='Devstack base directory for retrieving artifacts')
parser.add_argument(
'-c', '--config-file',
metavar='/etc/tempest.conf',
help='path to javelin2(tempest) config file')
# auth bits, letting us also just source the devstack openrc
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=os.environ.get('OS_USERNAME'),
help=('Defaults to env[OS_USERNAME].'))
parser.add_argument('--os-password',
metavar='<auth-password>',
default=os.environ.get('OS_PASSWORD'),
help=('Defaults to env[OS_PASSWORD].'))
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=os.environ.get('OS_TENANT_NAME'),
help=('Defaults to env[OS_TENANT_NAME].'))
OPTS = parser.parse_args()
if OPTS.mode not in ('create', 'check', 'destroy'):
print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
parser.print_help()
sys.exit(1)
if OPTS.config_file:
config.CONF.set_config_path(OPTS.config_file)
def setup_logging():
global LOG
logging.setup(CONF, __name__)
LOG = logging.getLogger(__name__)
def main():
global RES
get_options()
setup_logging()
RES.update(load_resources(OPTS.resources))
if OPTS.mode == 'create':
create_resources()
# Make sure the resources we just created actually work
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
collect_users(RES['users'])
destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
LOG.info('javelin2 successfully finished')
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -8,450,923,955,698,417,000 | 34.9 | 79 | 0.57889 | false |
Weertik/weertik | auth_users/views.py | 1 | 7698 | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.template import RequestContext, loader
from django.contrib import auth
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
import uuid
from .models import *
from .forms import *
from django.contrib.auth.models import Group
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
def _send_email(template, context, emails, subject, origin='[email protected]'):
plaintext = get_template(template + '.txt')
html = get_template(template + '.html')
text_content = plaintext.render(context)
html_content = html.render(context)
msg = EmailMultiAlternatives(subject,
text_content, origin, emails)
msg.attach_alternative(html_content, "text/html")
msg.send()
def login(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('panel'))
if request.method != 'POST':
return render_to_response('login.html',
{'form': SignupForm()},
context_instance=RequestContext(request))
user = auth.authenticate(username=request.POST.get('username', ''),
password=request.POST.get('password', ''))
if user is not None and user.is_active:
auth.login(request, user)
return HttpResponseRedirect(reverse('panel'))
else:
error = 'username and password not is correct'
return render_to_response('login.html',
{'error': error,
'form': SignupForm()},
context_instance=RequestContext(request))
def logout(request):
auth.logout(request)
return HttpResponseRedirect(reverse('login'))
def recovery(request):
if request.method != 'POST':
return render(request, 'recovery.html')
email = request.POST.get('email', '')
user = None
try:
user = User.objects.get(email=email)
token_user = TokenAuth.objects.get(user=user)
except User.DoesNotExist:
return render_to_response('recovery.html',
{'error': True, 'email': email},
context_instance=RequestContext(request))
except TokenAuth.DoesNotExist:
token_user = TokenAuth()
token_user.user = user
token_user.token = uuid.uuid4().hex
token_user.save()
context = Context({'username': user.username,
'name': (str(user.first_name) +
' ' + str(user.last_name)),
'token': token_user.token})
_send_email('recovery_email',
context, [email],
'Weertik - Recuperar contraseña')
return render_to_response('recovery.html',
{'send': True},
context_instance=RequestContext(request))
def change(request, token):
try:
token_user = TokenAuth.objects.get(token=token)
user = token_user.user
except TokenAuth.DoesNotExist:
return render_to_response('change.html',
{'e_token': True},
context_instance=RequestContext(request))
if request.method == 'POST':
password = request.POST.get('password', '1')
password_repeat = request.POST.get('password_repeat', '2')
if password == password_repeat:
user.set_password(password)
if not user.is_active:
user.is_active = True
user.save()
token_user.delete()
context = Context(
{'username': user.username,
'name': (str(user.first_name) +
' ' + str(user.last_name))})
_send_email('change_email', context,
[user.email], 'Weertik - Contraseña modificada')
return render_to_response('change.html',
{'send': True},
context_instance=RequestContext(request))
return render_to_response('change.html',
{'e_pass': True, 'user': user},
context_instance=RequestContext(request))
return render_to_response('change.html',
{'user': user},
context_instance=RequestContext(request))
def signup(request):
if request.method != 'POST':
return render_to_response('signup.html',
{'form': SignupForm()},
context_instance=RequestContext(request))
form = SignupForm(request.POST)
if form.is_valid():
exist = False
send = False
try:
user = User.objects.create_user(
first_name=form.data['first_name'],
last_name=form.data['last_name'],
password=form.data['password'],
username=form.data['email'],
email=form.data['email'])
except Exception, e:
user = User.objects.get(email=form.data['email'])
exist = True
if not exist:
user.is_active = False
user.save()
group = Group.objects.get(name='Free')
group.user_set.add(user)
try:
token_user = TokenAuth.objects.get(user=user)
except TokenAuth.DoesNotExist:
token_user = TokenAuth()
token_user.user = user
token_user.token = uuid.uuid4().hex
token_user.save()
if not user.is_active and not exist:
context = Context(
{'username': user.username,
'name': (str(user.first_name) +
' ' + str(user.last_name)),
'token': token_user.token})
_send_email('signup_email',
context, [user.email],
'Weertik - Activa tu cuenta')
send = True
return render_to_response('signup.html',
{'send': send,
'exist': exist,
'email': user.email,
'form': form},
context_instance=RequestContext(request))
return render_to_response('signup.html',
{'form': form},
context_instance=RequestContext(request))
def active(request, token):
try:
token_user = TokenAuth.objects.get(token=token)
user = token_user.user
except TokenAuth.DoesNotExist:
return render_to_response('active.html',
{'e_token': True},
context_instance=RequestContext(request))
user.is_active = True
user.save()
token_user.delete()
context = Context(
{'username': user.username,
'name': (str(user.first_name) +
' ' + str(user.last_name))})
_send_email('active_email', context,
[user.email], 'Weertik - Cuenta activada')
return render_to_response('active.html',
{'send': True},
context_instance=RequestContext(request))
def delete(request):
# NOT IMPLEMENT
return
| gpl-3.0 | 5,643,385,532,282,984,000 | 36.178744 | 79 | 0.529236 | false |
weaverba137/rv | setup.py | 1 | 4541 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (
register_commands, adjust_compiler, get_debug_option, get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from distutils import config
conf = config.ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.0.1.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Adjust the compiler in case the default on this platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {'console_scripts': []}
entry_point_list = conf.items('entry_points')
for entry_point in entry_point_list:
entry_points['console_scripts'].append('{0} = {1}'.format(entry_point[0],
entry_point[1]))
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
# Note that requires and provides should not be included in the call to
# ``setup``, since these are now deprecated. See this link for more details:
# https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
install_requires=['astropy'],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=True,
entry_points=entry_points,
classifiers = [ 'Development Status :: 3 - Alpha',
'Environment :: Console',
'Framework :: Flask',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Astronomy',
],
**package_info
)
| bsd-3-clause | -2,002,494,423,995,351,600 | 33.664122 | 79 | 0.697864 | false |
endlessm/chromium-browser | third_party/catapult/telemetry/telemetry/timeline/chrome_trace_category_filter.py | 1 | 7821 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
def CreateLowOverheadFilter():
"""Returns a filter with the least overhead possible.
This contains no sub-traces of thread tasks, so it's only useful for
capturing the cpu-time spent on threads (as well as needed benchmark
traces).
FIXME: Remove webkit.console when blink.console lands in chromium and
the ref builds are updated. crbug.com/386847
"""
categories = [
"toplevel",
"benchmark",
"webkit.console",
"blink.console",
"trace_event_overhead"
]
return ChromeTraceCategoryFilter(filter_string=','.join(categories))
def CreateDefaultOverheadFilter():
"""Returns a filter with the best-effort amount of overhead.
This matches Chrome tracing's default category filter setting, i.e., enable
all categories except the disabled-by-default-* ones.
We should use '*' instead of '' (empty string) here. On the Chrome side, both
'*' and '' mean default category filter setting. However, if someone adds
additional category filters, the behavior becomes different.
For example:
'*': enable all categories except the disabled-by-default-* ones.
'': enable all categories except the disabled-by-default-* ones.
Now add an additional category filter 'abc' to '*' and '':
'*,abc': enable all categories (including 'abc') except the
disabled-by-default-* ones.
'abc': enable only 'abc', and disable all other ones.
"""
return ChromeTraceCategoryFilter(filter_string='*')
def CreateDebugOverheadFilter():
"""Returns a filter with as many traces enabled as is useful."""
return ChromeTraceCategoryFilter(
filter_string='*,disabled-by-default-cc.debug')
_delay_re = re.compile(r'DELAY[(][A-Za-z0-9._;]+[)]')
class ChromeTraceCategoryFilter(object):
"""A set of included and excluded categories that should be traced.
The ChromeTraceCategoryFilter allows fine tuning of what data is traced for
Chrome. Basic choice of which tracers to use is done by TracingConfig.
Providing filter_string=None gives the default category filter, which leaves
what to trace up to the individual trace systems.
"""
def __init__(self, filter_string=None):
self._included_categories = set()
self._excluded_categories = set()
self._disabled_by_default_categories = set()
self._synthetic_delays = set()
self.contains_wildcards = False
self.AddFilterString(filter_string)
def AddFilterString(self, filter_string):
if filter_string is None:
return
filter_set = set([cf.strip() for cf in filter_string.split(',')])
for category in filter_set:
self.AddFilter(category)
def AddFilter(self, category):
if category == '':
return
if ',' in category:
raise ValueError("Invalid category filter name: '%s'" % category)
if '*' in category or '?' in category:
self.contains_wildcards = True
if _delay_re.match(category):
self._synthetic_delays.add(category)
return
if category[0] == '-':
assert not category[1:] in self._included_categories
self._excluded_categories.add(category[1:])
return
if category.startswith('disabled-by-default-'):
self._disabled_by_default_categories.add(category)
return
assert not category in self._excluded_categories
self._included_categories.add(category)
@property
def included_categories(self):
return self._included_categories
@property
def excluded_categories(self):
return self._excluded_categories
@property
def disabled_by_default_categories(self):
return self._disabled_by_default_categories
@property
def synthetic_delays(self):
return self._synthetic_delays
@property
def filter_string(self):
return self._GetFilterString(stable_output=False)
@property
def stable_filter_string(self):
return self._GetFilterString(stable_output=True)
def _GetFilterString(self, stable_output):
# Note: This outputs fields in an order that intentionally matches
# trace_event_impl's CategoryFilter string order.
lists = []
lists.append(self._included_categories)
lists.append(self._disabled_by_default_categories)
lists.append(['-%s' % x for x in self._excluded_categories])
lists.append(self._synthetic_delays)
categories = []
for l in lists:
if stable_output:
l = list(l)
l.sort()
categories.extend(l)
return ','.join(categories)
def GetDictForChromeTracing(self):
INCLUDED_CATEGORIES_PARAM = 'included_categories'
EXCLUDED_CATEGORIES_PARAM = 'excluded_categories'
SYNTHETIC_DELAYS_PARAM = 'synthetic_delays'
result = {}
if self._included_categories or self._disabled_by_default_categories:
result[INCLUDED_CATEGORIES_PARAM] = list(
self._included_categories | self._disabled_by_default_categories)
if self._excluded_categories:
result[EXCLUDED_CATEGORIES_PARAM] = list(self._excluded_categories)
if self._synthetic_delays:
result[SYNTHETIC_DELAYS_PARAM] = list(self._synthetic_delays)
return result
def AddDisabledByDefault(self, category):
assert category.startswith('disabled-by-default-')
self._disabled_by_default_categories.add(category)
def AddIncludedCategory(self, category_glob):
"""Explicitly enables anything matching category_glob."""
assert not category_glob.startswith('disabled-by-default-')
assert not category_glob in self._excluded_categories
self._included_categories.add(category_glob)
def AddExcludedCategory(self, category_glob):
"""Explicitly disables anything matching category_glob."""
assert not category_glob.startswith('disabled-by-default-')
assert not category_glob in self._included_categories
self._excluded_categories.add(category_glob)
def AddSyntheticDelay(self, delay):
assert _delay_re.match(delay)
self._synthetic_delays.add(delay)
def IsSubset(self, other):
""" Determine if filter A (self) is a subset of filter B (other).
Returns True if A is a subset of B, False if A is not a subset of B,
and None if we can't tell for sure.
"""
# We don't handle filters with wildcards in this test.
if self.contains_wildcards or other.contains_wildcards:
return None
# Disabled categories get into a trace if and only if they are contained in
# the 'disabled' set. Return False if A's disabled set is not a subset of
# B's disabled set.
if not self.disabled_by_default_categories <= \
other.disabled_by_default_categories:
return False
# If A defines more or different synthetic delays than B, then A is not a
# subset.
if not self.synthetic_delays <= other.synthetic_delays:
return False
if self.included_categories and other.included_categories:
# A and B have explicit include lists. If A includes something that B
# doesn't, return False.
if not self.included_categories <= other.included_categories:
return False
elif self.included_categories:
# Only A has an explicit include list. If A includes something that B
# excludes, return False.
if self.included_categories.intersection(other.excluded_categories):
return False
elif other.included_categories:
# Only B has an explicit include list. We don't know which categories are
# contained in the default list, so return None.
return None
else:
# None of the filter have explicit include list. If B excludes categories
# that A doesn't exclude, return False.
if not other.excluded_categories <= self.excluded_categories:
return False
return True
| bsd-3-clause | 8,379,687,264,017,720,000 | 33.606195 | 79 | 0.703746 | false |
dawnpower/nova | nova/network/manager.py | 1 | 96390 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network Hosts are responsible for allocating ips and setting up network.
There are multiple backend drivers that handle specific types of networking
topologies. All of the network commands are issued to a subclass of
:class:`NetworkManager`.
"""
import collections
import datetime
import functools
import itertools
import math
import re
import uuid
import eventlet
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from nova import conductor
from nova import context
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova import ipv6
from nova import manager
from nova.network import api as network_api
from nova.network import driver
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova.objects import quotas as quotas_obj
from nova.openstack.common import periodic_task
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
network_opts = [
cfg.StrOpt('flat_network_bridge',
help='Bridge for simple network instances'),
cfg.StrOpt('flat_network_dns',
default='8.8.4.4',
help='DNS server for simple network'),
cfg.BoolOpt('flat_injected',
default=False,
help='Whether to attempt to inject network setup into guest'),
cfg.StrOpt('flat_interface',
help='FlatDhcp will bridge into this interface if set'),
cfg.IntOpt('vlan_start',
default=100,
help='First VLAN for private networks'),
cfg.StrOpt('vlan_interface',
help='VLANs will bridge into this interface if set'),
cfg.IntOpt('num_networks',
default=1,
help='Number of networks to support'),
cfg.StrOpt('vpn_ip',
default='$my_ip',
help='Public IP for the cloudpipe VPN servers'),
cfg.IntOpt('vpn_start',
default=1000,
help='First Vpn port for private networks'),
cfg.IntOpt('network_size',
default=256,
help='Number of addresses in each private subnet'),
cfg.StrOpt('fixed_range_v6',
default='fd00::/48',
help='Fixed IPv6 address block'),
cfg.StrOpt('gateway',
help='Default IPv4 gateway'),
cfg.StrOpt('gateway_v6',
help='Default IPv6 gateway'),
cfg.IntOpt('cnt_vpn_clients',
default=0,
help='Number of addresses reserved for vpn clients'),
cfg.IntOpt('fixed_ip_disassociate_timeout',
default=600,
help='Seconds after which a deallocated IP is disassociated'),
cfg.IntOpt('create_unique_mac_address_attempts',
default=5,
help='Number of attempts to create unique mac address'),
cfg.BoolOpt('fake_call',
default=False,
help='If True, skip using the queue and make local calls'),
cfg.BoolOpt('teardown_unused_network_gateway',
default=False,
help='If True, unused gateway devices (VLAN and bridge) are '
'deleted in VLAN network mode with multi hosted '
'networks'),
cfg.BoolOpt('force_dhcp_release',
default=True,
help='If True, send a dhcp release on instance termination'),
cfg.BoolOpt('update_dns_entries',
default=False,
help='If True, when a DNS entry must be updated, it sends a '
'fanout cast to all network hosts to update their DNS '
'entries in multi host mode'),
cfg.IntOpt("dns_update_periodic_interval",
default=-1,
help='Number of seconds to wait between runs of updates to DNS '
'entries.'),
cfg.StrOpt('dhcp_domain',
default='novalocal',
help='Domain to use for building the hostnames'),
cfg.StrOpt('l3_lib',
default='nova.network.l3.LinuxNetL3',
help="Indicates underlying L3 management library"),
]
CONF = cfg.CONF
CONF.register_opts(network_opts)
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('network_topic', 'nova.network.rpcapi')
CONF.import_opt('fake_network', 'nova.network.linux_net')
CONF.import_opt('share_dhcp_address', 'nova.objects.network')
CONF.import_opt('network_device_mtu', 'nova.objects.network')
class RPCAllocateFixedIP(object):
"""Mixin class originally for FlatDCHP and VLAN network managers.
used since they share code to RPC.call allocate_fixed_ip on the
correct network host to configure dnsmasq
"""
servicegroup_api = None
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
green_threads = []
vpn = kwargs.get('vpn')
requested_networks = kwargs.get('requested_networks')
addresses_by_network = {}
if requested_networks is not None:
for request in requested_networks:
addresses_by_network[request.network_id] = request.address
for network in networks:
if 'uuid' in network and network['uuid'] in addresses_by_network:
address = addresses_by_network[network['uuid']]
else:
address = None
# NOTE(vish): if we are not multi_host pass to the network host
# NOTE(tr3buchet): but if we are, host came from instance.host
if not network['multi_host']:
host = network['host']
# NOTE(vish): if there is no network host, set one
if host is None:
network_p = obj_base.obj_to_primitive(network)
host = self.network_rpcapi.set_network_host(context,
network_p)
if host != self.host:
# need to call allocate_fixed_ip to correct network host
green_threads.append(eventlet.spawn(
self.network_rpcapi._rpc_allocate_fixed_ip,
context, instance_id, network['id'], address, vpn,
host))
else:
# i am the correct host, run here
self.allocate_fixed_ip(context, instance_id, network,
vpn=vpn, address=address)
# wait for all of the allocates (if any) to finish
for gt in green_threads:
gt.wait()
def _rpc_allocate_fixed_ip(self, context, instance_id, network_id,
**kwargs):
"""Sits in between _allocate_fixed_ips and allocate_fixed_ip to
perform network lookup on the far side of rpc.
"""
network = self._get_network_by_id(context, network_id)
return self.allocate_fixed_ip(context, instance_id, network, **kwargs)
def deallocate_fixed_ip(self, context, address, host=None, teardown=True,
instance=None):
"""Call the superclass deallocate_fixed_ip if i'm the correct host
otherwise call to the correct host
"""
fixed_ip = objects.FixedIP.get_by_address(
context, address, expected_attrs=['network'])
network = fixed_ip.network
# NOTE(vish): if we are not multi_host pass to the network host
# NOTE(tr3buchet): but if we are, host came from instance.host
if not network.multi_host:
host = network.host
if host == self.host:
# NOTE(vish): deallocate the fixed ip locally
return super(RPCAllocateFixedIP, self).deallocate_fixed_ip(context,
address, instance=instance)
if network.multi_host:
service = objects.Service.get_by_host_and_binary(
context, host, 'nova-network')
if not service or not self.servicegroup_api.service_is_up(service):
# NOTE(vish): deallocate the fixed ip locally but don't
# teardown network devices
return super(RPCAllocateFixedIP, self).deallocate_fixed_ip(
context, address, teardown=False, instance=instance)
self.network_rpcapi.deallocate_fixed_ip(context, address, host,
instance)
class NetworkManager(manager.Manager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
host management:
hosts configure themselves for networks they are assigned to in the
table upon startup. If there are networks in the table which do not
have hosts, those will be filled in and have hosts configured
as the hosts pick them up one at time during their periodic task.
The one at a time part is to flatten the layout to help scale
"""
target = messaging.Target(version='1.13')
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
# If True, this manager requires VIF to create VLAN tag.
SHOULD_CREATE_VLAN = False
# if True, this manager leverages DHCP
DHCP = False
timeout_fixed_ips = True
required_create_args = []
def __init__(self, network_driver=None, *args, **kwargs):
self.driver = driver.load_network_driver(network_driver)
self.instance_dns_manager = importutils.import_object(
CONF.instance_dns_manager)
self.instance_dns_domain = CONF.instance_dns_domain
self.floating_dns_manager = importutils.import_object(
CONF.floating_ip_dns_manager)
self.network_api = network_api.API()
self.network_rpcapi = network_rpcapi.NetworkAPI()
self.conductor_api = conductor.API()
self.servicegroup_api = servicegroup.API()
l3_lib = kwargs.get("l3_lib", CONF.l3_lib)
self.l3driver = importutils.import_object(l3_lib)
self.quotas_cls = objects.Quotas
super(NetworkManager, self).__init__(service_name='network',
*args, **kwargs)
@staticmethod
def _uses_shared_ip(network):
shared = network.get('share_address') or CONF.share_dhcp_address
return not network.get('multi_host') or shared
@utils.synchronized('get_dhcp')
def _get_dhcp_ip(self, context, network_ref, host=None):
"""Get the proper dhcp address to listen on."""
# NOTE(vish): If we are sharing the dhcp_address then we can just
# return the dhcp_server from the database.
if self._uses_shared_ip(network_ref):
return network_ref.get('dhcp_server') or network_ref['gateway']
if not host:
host = self.host
network_id = network_ref['id']
try:
fip = objects.FixedIP.get_by_network_and_host(context,
network_id,
host)
return fip.address
except exception.FixedIpNotFoundForNetworkHost:
elevated = context.elevated()
fip = objects.FixedIP.associate_pool(elevated,
network_id,
host=host)
return fip.address
def get_dhcp_leases(self, ctxt, network_ref):
"""Broker the request to the driver to fetch the dhcp leases."""
LOG.debug('Get DHCP leases for network %s', network_ref['uuid'])
return self.driver.get_dhcp_leases(ctxt, network_ref)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
# NOTE(vish): Set up networks for which this host already has
# an ip address.
ctxt = context.get_admin_context()
for network in objects.NetworkList.get_by_host(ctxt, self.host):
self._setup_network_on_host(ctxt, network)
if CONF.update_dns_entries:
LOG.debug('Update DNS on network %s for host %s',
network['uuid'], self.host)
dev = self.driver.get_dev(network)
self.driver.update_dns(ctxt, dev, network)
LOG.info(_LI('Configured network %(network)s on host %(host)s'),
{'network': network['uuid'], 'host': self.host})
@periodic_task.periodic_task
def _disassociate_stale_fixed_ips(self, context):
if self.timeout_fixed_ips:
now = timeutils.utcnow()
timeout = CONF.fixed_ip_disassociate_timeout
time = now - datetime.timedelta(seconds=timeout)
num = objects.FixedIP.disassociate_all_by_timeout(context,
self.host,
time)
if num:
LOG.debug('Disassociated %s stale fixed ip(s)', num)
def set_network_host(self, context, network_ref):
"""Safely sets the host of the network."""
if not isinstance(network_ref, obj_base.NovaObject):
network_ref = objects.Network._from_db_object(
context, objects.Network(), network_ref)
LOG.debug('Setting host %s for network %s', self.host,
network_ref['uuid'], context=context)
network_ref.host = self.host
network_ref.save()
return self.host
def _do_trigger_security_group_members_refresh_for_instance(self,
instance_id):
# NOTE(francois.charlier): the instance may have been deleted already
# thus enabling `read_deleted`
admin_context = context.get_admin_context(read_deleted='yes')
instance = objects.Instance.get_by_uuid(admin_context, instance_id)
try:
# NOTE(vish): We need to make sure the instance info cache has been
# updated with new ip info before we trigger the
# security group refresh. This is somewhat inefficient
# but avoids doing some dangerous refactoring for a
# bug fix.
nw_info = self.get_instance_nw_info(admin_context, instance_id,
None, None)
ic = objects.InstanceInfoCache.new(admin_context, instance_id)
ic.network_info = nw_info
ic.save(update_cells=False)
except exception.InstanceInfoCacheNotFound:
pass
groups = instance.security_groups
group_ids = [group.id for group in groups]
self.conductor_api.security_groups_trigger_members_refresh(
admin_context, group_ids)
# NOTE(hanlind): This method can be removed in version 2.0 of the RPC API
def get_instance_uuids_by_ip_filter(self, context, filters):
fixed_ip_filter = filters.get('fixed_ip')
ip_filter = re.compile(str(filters.get('ip')))
ipv6_filter = re.compile(str(filters.get('ip6')))
LOG.debug('Get instance uuids by IP filters. Fixed IP filter: %s. '
'IP filter: %s. IPv6 filter: %s', fixed_ip_filter,
str(filters.get('ip')), str(filters.get('ip6')))
# NOTE(jkoelker) Should probably figure out a better way to do
# this. But for now it "works", this could suck on
# large installs.
vifs = objects.VirtualInterfaceList.get_all(context)
results = []
for vif in vifs:
if vif.instance_uuid is None:
continue
network = self._get_network_by_id(context, vif.network_id)
fixed_ipv6 = None
if network['cidr_v6'] is not None:
fixed_ipv6 = ipv6.to_global(network['cidr_v6'],
vif.address,
context.project_id)
if fixed_ipv6 and ipv6_filter.match(fixed_ipv6):
results.append({'instance_uuid': vif.instance_uuid,
'ip': fixed_ipv6})
fixed_ips = objects.FixedIPList.get_by_virtual_interface_id(
context, vif.id)
for fixed_ip in fixed_ips:
if not fixed_ip or not fixed_ip.address:
continue
if str(fixed_ip.address) == fixed_ip_filter:
results.append({'instance_uuid': vif.instance_uuid,
'ip': fixed_ip.address})
continue
if ip_filter.match(str(fixed_ip.address)):
results.append({'instance_uuid': vif.instance_uuid,
'ip': fixed_ip.address})
continue
for floating_ip in fixed_ip.floating_ips:
if not floating_ip or not floating_ip.address:
continue
if ip_filter.match(str(floating_ip.address)):
results.append({'instance_uuid': vif.instance_uuid,
'ip': floating_ip.address})
continue
return results
def _get_networks_for_instance(self, context, instance_id, project_id,
requested_networks=None):
"""Determine & return which networks an instance should connect to."""
# TODO(tr3buchet) maybe this needs to be updated in the future if
# there is a better way to determine which networks
# a non-vlan instance should connect to
if requested_networks is not None and len(requested_networks) != 0:
network_uuids = [request.network_id
for request in requested_networks]
networks = self._get_networks_by_uuids(context, network_uuids)
else:
try:
networks = objects.NetworkList.get_all(context)
except exception.NoNetworksFound:
return []
# return only networks which are not vlan networks
return [network for network in networks if not network.vlan]
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the various network resources for an instance.
rpc.called by network_api
"""
instance_uuid = kwargs['instance_id']
if not uuidutils.is_uuid_like(instance_uuid):
instance_uuid = kwargs.get('instance_uuid')
host = kwargs['host']
project_id = kwargs['project_id']
rxtx_factor = kwargs['rxtx_factor']
requested_networks = kwargs.get('requested_networks')
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in requested_networks])
vpn = kwargs['vpn']
macs = kwargs['macs']
admin_context = context.elevated()
networks = self._get_networks_for_instance(context,
instance_uuid, project_id,
requested_networks=requested_networks)
networks_list = [self._get_network_dict(network)
for network in networks]
LOG.debug('Networks retrieved for instance: |%s|',
networks_list, context=context, instance_uuid=instance_uuid)
try:
self._allocate_mac_addresses(admin_context, instance_uuid,
networks, macs)
except Exception:
with excutils.save_and_reraise_exception():
# If we fail to allocate any one mac address, clean up all
# allocated VIFs
objects.VirtualInterface.delete_by_instance_uuid(
context, instance_uuid)
self._allocate_fixed_ips(admin_context, instance_uuid,
host, networks, vpn=vpn,
requested_networks=requested_networks)
if CONF.update_dns_entries:
network_ids = [network['id'] for network in networks]
self.network_rpcapi.update_dns(context, network_ids)
net_info = self.get_instance_nw_info(admin_context, instance_uuid,
rxtx_factor, host)
LOG.info(_LI("Allocated network: '%s' for instance"), net_info,
instance_uuid=instance_uuid,
context=context)
return net_info
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating various network resources for an instance.
rpc.called by network_api
kwargs can contain fixed_ips to circumvent another db lookup
"""
# NOTE(francois.charlier): in some cases the instance might be
# deleted before the IPs are released, so we need to get deleted
# instances too
read_deleted_context = context.elevated(read_deleted='yes')
if 'instance' in kwargs:
instance = kwargs['instance']
instance_uuid = instance.uuid
host = instance.host
else:
instance_id = kwargs['instance_id']
if uuidutils.is_uuid_like(instance_id):
instance = objects.Instance.get_by_uuid(
read_deleted_context, instance_id)
else:
instance = objects.Instance.get_by_id(
read_deleted_context, instance_id)
# NOTE(russellb) in case instance_id was an ID and not UUID
instance_uuid = instance.uuid
host = kwargs.get('host')
try:
requested_networks = kwargs.get('requested_networks')
if requested_networks:
# NOTE(obondarev): Temporary and transitional
if isinstance(requested_networks, objects.NetworkRequestList):
requested_networks = requested_networks.as_tuples()
network_ids = set([net_id for (net_id, ip)
in requested_networks])
fixed_ips = [ip for (net_id, ip) in requested_networks if ip]
else:
fixed_ip_list = objects.FixedIPList.get_by_instance_uuid(
read_deleted_context, instance_uuid)
network_ids = set([str(fixed_ip.network_id) for fixed_ip
in fixed_ip_list])
fixed_ips = [str(ip.address) for ip in fixed_ip_list]
except exception.FixedIpNotFoundForInstance:
network_ids = set([])
fixed_ips = []
LOG.debug("Network deallocation for instance",
context=context, instance_uuid=instance_uuid)
# deallocate fixed ips
for fixed_ip in fixed_ips:
self.deallocate_fixed_ip(context, fixed_ip, host=host,
instance=instance)
if CONF.update_dns_entries:
self.network_rpcapi.update_dns(context, list(network_ids))
# deallocate vifs (mac addresses)
objects.VirtualInterface.delete_by_instance_uuid(
read_deleted_context, instance_uuid)
LOG.info(_LI("Network deallocated for instance (fixed ips: '%s')"),
fixed_ips, context=context, instance_uuid=instance_uuid)
@messaging.expected_exceptions(exception.InstanceNotFound)
def get_instance_nw_info(self, context, instance_id, rxtx_factor,
host, instance_uuid=None, **kwargs):
"""Creates network info list for instance.
called by allocate_for_instance and network_api
context needs to be elevated
:returns: network info list [(network,info),(network,info)...]
where network = dict containing pertinent data from a network db object
and info = dict containing pertinent networking data
"""
if not uuidutils.is_uuid_like(instance_id):
instance_id = instance_uuid
instance_uuid = instance_id
LOG.debug('Get instance network info', instance_uuid=instance_uuid)
try:
fixed_ips = objects.FixedIPList.get_by_instance_uuid(
context, instance_uuid)
except exception.FixedIpNotFoundForInstance:
fixed_ips = []
nw_info = network_model.NetworkInfo()
vifs = collections.OrderedDict()
for fixed_ip in fixed_ips:
vif = fixed_ip.virtual_interface
if not vif:
continue
if not fixed_ip.network:
continue
if vif.uuid in vifs:
current = vifs[vif.uuid]
else:
current = {
'id': vif.uuid,
'type': network_model.VIF_TYPE_BRIDGE,
'address': vif.address,
}
vifs[vif.uuid] = current
net_dict = self._get_network_dict(fixed_ip.network)
network = network_model.Network(**net_dict)
subnets = self._get_subnets_from_network(context,
fixed_ip.network,
host)
network['subnets'] = subnets
current['network'] = network
try:
current['rxtx_cap'] = (fixed_ip.network['rxtx_base'] *
rxtx_factor)
except (TypeError, KeyError):
pass
if fixed_ip.network.cidr_v6 and vif.address:
# NOTE(vish): I strongy suspect the v6 subnet is not used
# anywhere, but support it just in case
# add the v6 address to the v6 subnet
address = ipv6.to_global(fixed_ip.network.cidr_v6,
vif.address,
fixed_ip.network.project_id)
model_ip = network_model.FixedIP(address=address)
current['network']['subnets'][1]['ips'].append(model_ip)
# add the v4 address to the v4 subnet
model_ip = network_model.FixedIP(address=str(fixed_ip.address))
for ip in fixed_ip.floating_ips:
floating_ip = network_model.IP(address=str(ip['address']),
type='floating')
model_ip.add_floating_ip(floating_ip)
current['network']['subnets'][0]['ips'].append(model_ip)
for vif in vifs.values():
nw_info.append(network_model.VIF(**vif))
LOG.debug('Built network info: |%s|', nw_info)
return nw_info
@staticmethod
def _get_network_dict(network):
"""Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = {'id': network['uuid'],
'bridge': network['bridge'],
'label': network['label'],
'tenant_id': network['project_id']}
# get extra information
if network.get('injected'):
network_dict['injected'] = network['injected']
return network_dict
@staticmethod
def _extract_subnets(network):
"""Returns information about the IPv4 and IPv6 subnets
associated with a Neutron Network UUID.
"""
subnet_v4 = {
'network_id': network.uuid,
'cidr': network.cidr,
'gateway': network.gateway,
'dhcp_server': getattr(network, 'dhcp_server'),
'broadcast': network.broadcast,
'netmask': network.netmask,
'version': 4,
'dns1': network.dns1,
'dns2': network.dns2}
# TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
# this is probably bad as there is no way to add v6
# dns to nova
subnet_v6 = {
'network_id': network.uuid,
'cidr': network.cidr_v6,
'gateway': network.gateway_v6,
'dhcp_server': None,
'broadcast': None,
'netmask': network.netmask_v6,
'version': 6,
'dns1': None,
'dns2': None}
def ips_to_strs(net):
for key, value in net.items():
if isinstance(value, netaddr.ip.BaseIP):
net[key] = str(value)
return net
return [ips_to_strs(subnet_v4), ips_to_strs(subnet_v6)]
def _get_subnets_from_network(self, context, network, instance_host=None):
"""Returns the 1 or 2 possible subnets for a nova network."""
extracted_subnets = self._extract_subnets(network)
subnets = []
for subnet in extracted_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway'],
type='gateway')}
# deal with dhcp
if self.DHCP:
if network.get('multi_host'):
dhcp_server = self._get_dhcp_ip(context, network,
instance_host)
else:
dhcp_server = self._get_dhcp_ip(context, subnet)
subnet_dict['dhcp_server'] = dhcp_server
subnet_object = network_model.Subnet(**subnet_dict)
# add dns info
for k in ['dns1', 'dns2']:
if subnet.get(k):
subnet_object.add_dns(
network_model.IP(address=subnet[k], type='dns'))
subnet_object['ips'] = []
subnets.append(subnet_object)
return subnets
def _allocate_mac_addresses(self, context, instance_uuid, networks, macs):
"""Generates mac addresses and creates vif rows in db for them."""
# make a copy we can mutate
if macs is not None:
available_macs = set(macs)
for network in networks:
if macs is None:
self._add_virtual_interface(context, instance_uuid,
network['id'])
else:
try:
mac = available_macs.pop()
except KeyError:
raise exception.VirtualInterfaceCreateException()
self._add_virtual_interface(context, instance_uuid,
network['id'], mac)
def _add_virtual_interface(self, context, instance_uuid, network_id,
mac=None):
attempts = 1 if mac else CONF.create_unique_mac_address_attempts
for i in range(attempts):
try:
vif = objects.VirtualInterface(context)
vif.address = mac or utils.generate_mac_address()
vif.instance_uuid = instance_uuid
vif.network_id = network_id
vif.uuid = str(uuid.uuid4())
vif.create()
return vif
except exception.VirtualInterfaceCreateException:
# Try again up to max number of attempts
pass
raise exception.VirtualInterfaceMacAddressException()
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id,
rxtx_factor=None):
"""Adds a fixed ip to an instance from specified network."""
if uuidutils.is_uuid_like(network_id):
network = self.get_network(context, network_id)
else:
network = self._get_network_by_id(context, network_id)
LOG.debug('Add fixed ip on network %s', network['uuid'],
instance_uuid=instance_id)
self._allocate_fixed_ips(context, instance_id, host, [network])
return self.get_instance_nw_info(context, instance_id, rxtx_factor,
host)
# NOTE(russellb) This method can be removed in 2.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
"""Return backdoor port for eventlet_backdoor."""
return self.backdoor_port
def remove_fixed_ip_from_instance(self, context, instance_id, host,
address, rxtx_factor=None):
"""Removes a fixed ip from an instance from specified network."""
LOG.debug('Remove fixed ip %s', address, instance_uuid=instance_id)
fixed_ips = objects.FixedIPList.get_by_instance_uuid(context,
instance_id)
for fixed_ip in fixed_ips:
if str(fixed_ip.address) == address:
self.deallocate_fixed_ip(context, address, host)
# NOTE(vish): this probably isn't a dhcp ip so just
# deallocate it now. In the extremely rare
# case that this is a race condition, we
# will just get a warn in lease or release.
if not fixed_ip.leased:
fixed_ip.disassociate()
return self.get_instance_nw_info(context, instance_id,
rxtx_factor, host)
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance_id, ip=address)
def _validate_instance_zone_for_dns_domain(self, context, instance):
if not self.instance_dns_domain:
return True
instance_domain = self.instance_dns_domain
domainref = objects.DNSDomain.get_by_domain(context, instance_domain)
if domainref is None:
LOG.warning(_LW('instance-dns-zone not found |%s|.'),
instance_domain, instance=instance)
return True
dns_zone = domainref.availability_zone
instance_zone = instance.get('availability_zone')
if dns_zone and (dns_zone != instance_zone):
LOG.warning(_LW('instance-dns-zone is |%(domain)s|, '
'which is in availability zone |%(zone)s|. '
'Instance is in zone |%(zone2)s|. '
'No DNS record will be created.'),
{'domain': instance_domain,
'zone': dns_zone,
'zone2': instance_zone},
instance=instance)
return False
else:
return True
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
# and use that network here with a method like
# network_get_by_compute_host
address = None
# NOTE(vish) This db query could be removed if we pass az and name
# (or the whole instance object).
instance = objects.Instance.get_by_uuid(context, instance_id)
LOG.debug('Allocate fixed ip on network %s', network['uuid'],
instance=instance)
# A list of cleanup functions to call on error
cleanup = []
# Check the quota; can't put this in the API because we get
# called into from other places
quotas = self.quotas_cls(context=context)
quota_project, quota_user = quotas_obj.ids_from_instance(context,
instance)
try:
quotas.reserve(fixed_ips=1, project_id=quota_project,
user_id=quota_user)
cleanup.append(functools.partial(quotas.rollback, context))
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
used = (usages['fixed_ips']['in_use'] +
usages['fixed_ips']['reserved'])
LOG.warning(_LW("Quota exceeded for project %(pid)s, tried to "
"allocate fixed IP. %(used)s of %(allowed)s are "
"in use or are already reserved."),
{'pid': quota_project, 'used': used,
'allowed': exc.kwargs['quotas']['fixed_ips']},
instance_uuid=instance_id)
raise exception.FixedIpLimitExceeded()
try:
if network['cidr']:
address = kwargs.get('address', None)
if address:
LOG.debug('Associating instance with specified fixed IP '
'%(address)s in network %(network)s on subnet '
'%(cidr)s.' %
{'address': address, 'network': network['id'],
'cidr': network['cidr']},
instance=instance)
fip = objects.FixedIP.associate(context,
str(address),
instance_id,
network['id'])
else:
LOG.debug('Associating instance with fixed IP from pool '
'in network %(network)s on subnet %(cidr)s.' %
{'network': network['id'],
'cidr': network['cidr']},
instance=instance)
fip = objects.FixedIP.associate_pool(
context.elevated(), network['id'], instance_id)
address = str(fip.address)
vif = objects.VirtualInterface.get_by_instance_and_network(
context, instance_id, network['id'])
if vif is None:
LOG.debug('vif for network %(network)s is used up, '
'trying to create new vif',
{'network': network['id']}, instance=instance)
vif = self._add_virtual_interface(context,
instance_id, network['id'])
fip.allocated = True
fip.virtual_interface_id = vif.id
fip.save()
cleanup.append(functools.partial(fip.disassociate, context))
LOG.debug('Refreshing security group members for instance.',
instance=instance)
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
cleanup.append(functools.partial(
self._do_trigger_security_group_members_refresh_for_instance, # noqa
instance_id))
name = instance.display_name
if self._validate_instance_zone_for_dns_domain(context, instance):
self.instance_dns_manager.create_entry(
name, str(fip.address), "A", self.instance_dns_domain)
cleanup.append(functools.partial(
self.instance_dns_manager.delete_entry,
name, self.instance_dns_domain))
self.instance_dns_manager.create_entry(
instance_id, str(fip.address), "A",
self.instance_dns_domain)
cleanup.append(functools.partial(
self.instance_dns_manager.delete_entry,
instance_id, self.instance_dns_domain))
LOG.debug('Setting up network %(network)s on host %(host)s.' %
{'network': network['id'], 'host': self.host},
instance=instance)
self._setup_network_on_host(context, network)
cleanup.append(functools.partial(
self._teardown_network_on_host,
context, network))
quotas.commit()
if address is None:
# TODO(mriedem): should _setup_network_on_host return the addr?
LOG.debug('Fixed IP is setup on network %s but not returning '
'the specific IP from the base network manager.',
network['uuid'], instance=instance)
else:
LOG.debug('Allocated fixed ip %s on network %s', address,
network['uuid'], instance=instance)
return address
except Exception:
with excutils.save_and_reraise_exception():
for f in cleanup:
try:
f()
except Exception:
LOG.warning(_LW('Error cleaning up fixed ip '
'allocation. Manual cleanup may '
'be required.'), exc_info=True)
def deallocate_fixed_ip(self, context, address, host=None, teardown=True,
instance=None):
"""Returns a fixed ip to the pool."""
fixed_ip_ref = objects.FixedIP.get_by_address(
context, address, expected_attrs=['network'])
instance_uuid = fixed_ip_ref.instance_uuid
vif_id = fixed_ip_ref.virtual_interface_id
LOG.debug('Deallocate fixed ip %s', address,
instance_uuid=instance_uuid)
if not instance:
# NOTE(vish) This db query could be removed if we pass az and name
# (or the whole instance object).
# NOTE(danms) We can't use fixed_ip_ref.instance because
# instance may be deleted and the relationship
# doesn't extend to deleted instances
instance = objects.Instance.get_by_uuid(
context.elevated(read_deleted='yes'), instance_uuid)
quotas = self.quotas_cls(context=context)
quota_project, quota_user = quotas_obj.ids_from_instance(context,
instance)
try:
quotas.reserve(fixed_ips=-1, project_id=quota_project,
user_id=quota_user)
except Exception:
LOG.exception(_LE("Failed to update usages deallocating "
"fixed IP"))
try:
self._do_trigger_security_group_members_refresh_for_instance(
instance_uuid)
if self._validate_instance_zone_for_dns_domain(context, instance):
for n in self.instance_dns_manager.get_entries_by_address(
address, self.instance_dns_domain):
self.instance_dns_manager.delete_entry(n,
self.instance_dns_domain)
fixed_ip_ref.allocated = False
fixed_ip_ref.save()
if teardown:
network = fixed_ip_ref.network
if CONF.force_dhcp_release:
dev = self.driver.get_dev(network)
# NOTE(vish): The below errors should never happen, but
# there may be a race condition that is causing
# them per
# https://code.launchpad.net/bugs/968457,
# so we log an error to help track down
# the possible race.
if not vif_id:
LOG.error(_LE("Unable to release %s because vif "
"doesn't exist"), address)
return
vif = objects.VirtualInterface.get_by_id(context, vif_id)
if not vif:
LOG.error(_LE("Unable to release %s because vif "
"object doesn't exist"), address)
return
# NOTE(cfb): Call teardown before release_dhcp to ensure
# that the IP can't be re-leased after a release
# packet is sent.
self._teardown_network_on_host(context, network)
# NOTE(vish): This forces a packet so that the
# release_fixed_ip callback will
# get called by nova-dhcpbridge.
try:
self.driver.release_dhcp(dev, address, vif.address)
except exception.NetworkDhcpReleaseFailed:
LOG.error(_LE("Error releasing DHCP for IP %(address)s"
" with MAC %(mac_address)s"),
{'address': address,
'mac_address': vif.address},
instance=instance)
# NOTE(yufang521247): This is probably a failed dhcp fixed
# ip. DHCPRELEASE packet sent to dnsmasq would not trigger
# dhcp-bridge to run. Thus it is better to disassociate
# such fixed ip here.
fixed_ip_ref = objects.FixedIP.get_by_address(
context, address)
if (instance_uuid == fixed_ip_ref.instance_uuid and
not fixed_ip_ref.leased):
fixed_ip_ref.disassociate()
else:
# We can't try to free the IP address so just call teardown
self._teardown_network_on_host(context, network)
except Exception:
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception:
LOG.warning(_LW("Failed to rollback quota for "
"deallocate fixed ip: %s"), address,
instance=instance)
# Commit the reservations
quotas.commit()
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
LOG.debug('Leased IP |%s|', address, context=context)
fixed_ip = objects.FixedIP.get_by_address(context, address)
if fixed_ip.instance_uuid is None:
LOG.warning(_LW('IP %s leased that is not associated'), address,
context=context)
return
fixed_ip.leased = True
fixed_ip.save()
if not fixed_ip.allocated:
LOG.warning(_LW('IP |%s| leased that isn\'t allocated'), address,
context=context)
def release_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is released."""
LOG.debug('Released IP |%s|', address, context=context)
fixed_ip = objects.FixedIP.get_by_address(context, address)
if fixed_ip.instance_uuid is None:
LOG.warning(_LW('IP %s released that is not associated'), address,
context=context)
return
if not fixed_ip.leased:
LOG.warning(_LW('IP %s released that was not leased'), address,
context=context)
fixed_ip.leased = False
fixed_ip.save()
if not fixed_ip.allocated:
fixed_ip.disassociate()
@staticmethod
def _convert_int_args(kwargs):
int_args = ("network_size", "num_networks",
"vlan_start", "vpn_start")
for key in int_args:
try:
value = kwargs.get(key)
if value is None:
continue
kwargs[key] = int(value)
except ValueError:
raise exception.InvalidIntValue(key=key)
def create_networks(self, context,
label, cidr=None, multi_host=None, num_networks=None,
network_size=None, cidr_v6=None,
gateway=None, gateway_v6=None, bridge=None,
bridge_interface=None, dns1=None, dns2=None,
fixed_cidr=None, allowed_start=None,
allowed_end=None, **kwargs):
arg_names = ("label", "cidr", "multi_host", "num_networks",
"network_size", "cidr_v6",
"gateway", "gateway_v6", "bridge",
"bridge_interface", "dns1", "dns2",
"fixed_cidr", "allowed_start", "allowed_end")
if 'mtu' not in kwargs:
kwargs['mtu'] = CONF.network_device_mtu
if 'dhcp_server' not in kwargs:
kwargs['dhcp_server'] = gateway
if 'enable_dhcp' not in kwargs:
kwargs['enable_dhcp'] = True
if 'share_address' not in kwargs:
kwargs['share_address'] = CONF.share_dhcp_address
for name in arg_names:
kwargs[name] = locals()[name]
self._convert_int_args(kwargs)
# check for certain required inputs
# NOTE: We can remove this check after v2.0 API code is removed because
# jsonschema has checked already before this.
label = kwargs["label"]
if not label:
raise exception.NetworkNotCreated(req="label")
# Size of "label" column in nova.networks is 255, hence the restriction
# NOTE: We can remove this check after v2.0 API code is removed because
# jsonschema has checked already before this.
if len(label) > 255:
raise exception.LabelTooLong()
# NOTE: We can remove this check after v2.0 API code is removed because
# jsonschema has checked already before this.
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
raise exception.NetworkNotCreated(req="cidr or cidr_v6")
kwargs["bridge"] = kwargs["bridge"] or CONF.flat_network_bridge
kwargs["bridge_interface"] = (kwargs["bridge_interface"] or
CONF.flat_interface)
for fld in self.required_create_args:
if not kwargs[fld]:
raise exception.NetworkNotCreated(req=fld)
if kwargs["cidr_v6"]:
# NOTE(vish): just for validation
try:
netaddr.IPNetwork(kwargs["cidr_v6"])
except netaddr.AddrFormatError:
raise exception.InvalidCidr(cidr=kwargs["cidr_v6"])
if kwargs["cidr"]:
try:
fixnet = netaddr.IPNetwork(kwargs["cidr"])
except netaddr.AddrFormatError:
raise exception.InvalidCidr(cidr=kwargs["cidr"])
kwargs["num_networks"] = kwargs["num_networks"] or CONF.num_networks
if not kwargs["network_size"]:
if kwargs["cidr"]:
each_subnet_size = fixnet.size / kwargs["num_networks"]
if each_subnet_size > CONF.network_size:
subnet = 32 - int(math.log(CONF.network_size, 2))
oversize_msg = _LW(
'Subnet(s) too large, defaulting to /%s.'
' To override, specify network_size flag.') % subnet
LOG.warn(oversize_msg)
kwargs["network_size"] = CONF.network_size
else:
kwargs["network_size"] = fixnet.size
else:
kwargs["network_size"] = CONF.network_size
kwargs["multi_host"] = (
CONF.multi_host
if kwargs["multi_host"] is None
else strutils.bool_from_string(kwargs["multi_host"]))
kwargs["vlan_start"] = kwargs.get("vlan_start") or CONF.vlan_start
kwargs["vpn_start"] = kwargs.get("vpn_start") or CONF.vpn_start
kwargs["dns1"] = kwargs["dns1"] or CONF.flat_network_dns
if kwargs["fixed_cidr"]:
try:
kwargs["fixed_cidr"] = netaddr.IPNetwork(kwargs["fixed_cidr"])
except netaddr.AddrFormatError:
raise exception.InvalidCidr(cidr=kwargs["fixed_cidr"])
# Subnet of fixed IPs must fall within fixed range
if kwargs["fixed_cidr"] not in fixnet:
raise exception.AddressOutOfRange(
address=kwargs["fixed_cidr"].network, cidr=fixnet)
LOG.debug('Create network: |%s|', kwargs)
return self._do_create_networks(context, **kwargs)
@staticmethod
def _index_of(subnet, ip):
try:
start = netaddr.IPAddress(ip)
except netaddr.AddrFormatError:
raise exception.InvalidAddress(address=ip)
index = start.value - subnet.value
if index < 0 or index >= subnet.size:
raise exception.AddressOutOfRange(address=ip, cidr=str(subnet))
return index
def _do_create_networks(self, context,
label, cidr, multi_host, num_networks,
network_size, cidr_v6, gateway, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None,
fixed_cidr=None, mtu=None, dhcp_server=None,
enable_dhcp=None, share_address=None,
allowed_start=None, allowed_end=None, **kwargs):
"""Create networks based on parameters."""
# NOTE(jkoelker): these are dummy values to make sure iter works
# TODO(tr3buchet): disallow carving up networks
fixed_net_v4 = netaddr.IPNetwork('0/32')
fixed_net_v6 = netaddr.IPNetwork('::0/128')
subnets_v4 = []
subnets_v6 = []
if kwargs.get('ipam'):
if cidr_v6:
subnets_v6 = [netaddr.IPNetwork(cidr_v6)]
if cidr:
subnets_v4 = [netaddr.IPNetwork(cidr)]
else:
subnet_bits = int(math.ceil(math.log(network_size, 2)))
if cidr_v6:
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
prefixlen_v6 = 128 - subnet_bits
# smallest subnet in IPv6 ethernet network is /64
if prefixlen_v6 > 64:
prefixlen_v6 = 64
subnets_v6 = fixed_net_v6.subnet(prefixlen_v6,
count=num_networks)
if cidr:
fixed_net_v4 = netaddr.IPNetwork(cidr)
prefixlen_v4 = 32 - subnet_bits
subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
count=num_networks))
if cidr:
# NOTE(jkoelker): This replaces the _validate_cidrs call and
# prevents looping multiple times
try:
nets = objects.NetworkList.get_all(context)
except exception.NoNetworksFound:
nets = []
num_used_nets = len(nets)
used_subnets = [net.cidr for net in nets]
def find_next(subnet):
next_subnet = subnet.next()
while next_subnet in subnets_v4:
next_subnet = next_subnet.next()
if next_subnet in fixed_net_v4:
return next_subnet
for subnet in list(subnets_v4):
if subnet in used_subnets:
next_subnet = find_next(subnet)
if next_subnet:
subnets_v4.remove(subnet)
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
raise exception.CidrConflict(cidr=subnet,
other=subnet)
for used_subnet in used_subnets:
if subnet in used_subnet:
raise exception.CidrConflict(cidr=subnet,
other=used_subnet)
if used_subnet in subnet:
next_subnet = find_next(subnet)
if next_subnet:
subnets_v4.remove(subnet)
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
raise exception.CidrConflict(cidr=subnet,
other=used_subnet)
networks = objects.NetworkList(context=context, objects=[])
subnets = itertools.izip_longest(subnets_v4, subnets_v6)
for index, (subnet_v4, subnet_v6) in enumerate(subnets):
net = objects.Network(context=context)
net.bridge = bridge
net.bridge_interface = bridge_interface
net.multi_host = multi_host
net.dns1 = dns1
net.dns2 = dns2
net.mtu = mtu
net.enable_dhcp = enable_dhcp
net.share_address = share_address
net.project_id = kwargs.get('project_id')
if num_networks > 1:
net.label = '%s_%d' % (label, index)
else:
net.label = label
bottom_reserved = self._bottom_reserved_ips
top_reserved = self._top_reserved_ips
extra_reserved = []
if cidr and subnet_v4:
current = subnet_v4[1]
if allowed_start:
val = self._index_of(subnet_v4, allowed_start)
current = netaddr.IPAddress(allowed_start)
bottom_reserved = val
if allowed_end:
val = self._index_of(subnet_v4, allowed_end)
top_reserved = subnet_v4.size - 1 - val
net.cidr = str(subnet_v4)
net.netmask = str(subnet_v4.netmask)
net.broadcast = str(subnet_v4.broadcast)
if gateway:
net.gateway = gateway
else:
net.gateway = current
current += 1
if not dhcp_server:
dhcp_server = net.gateway
net.dhcp_start = current
current += 1
if str(net.dhcp_start) == dhcp_server:
net.dhcp_start = current
net.dhcp_server = dhcp_server
extra_reserved.append(str(net.dhcp_server))
extra_reserved.append(str(net.gateway))
if cidr_v6 and subnet_v6:
net.cidr_v6 = str(subnet_v6)
if gateway_v6:
# use a pre-defined gateway if one is provided
net.gateway_v6 = str(gateway_v6)
else:
net.gateway_v6 = str(subnet_v6[1])
net.netmask_v6 = str(subnet_v6.netmask)
if CONF.network_manager == 'nova.network.manager.VlanManager':
vlan = kwargs.get('vlan', None)
if not vlan:
index_vlan = index + num_used_nets
vlan = kwargs['vlan_start'] + index_vlan
used_vlans = [x.vlan for x in nets]
if vlan in used_vlans:
# That vlan is used, try to get another one
used_vlans.sort()
vlan = used_vlans[-1] + 1
net.vpn_private_address = net.dhcp_start
extra_reserved.append(str(net.vpn_private_address))
net.dhcp_start = net.dhcp_start + 1
net.vlan = vlan
net.bridge = 'br%s' % vlan
# NOTE(vish): This makes ports unique across the cloud, a more
# robust solution would be to make them uniq per ip
index_vpn = index + num_used_nets
net.vpn_public_port = kwargs['vpn_start'] + index_vpn
net.create()
networks.objects.append(net)
if cidr and subnet_v4:
self._create_fixed_ips(context, net.id, fixed_cidr,
extra_reserved, bottom_reserved,
top_reserved)
# NOTE(danms): Remove this in RPC API v2.0
return obj_base.obj_to_primitive(networks)
def delete_network(self, context, fixed_range, uuid,
require_disassociated=True):
# Prefer uuid but we'll also take cidr for backwards compatibility
elevated = context.elevated()
if uuid:
network = objects.Network.get_by_uuid(elevated, uuid)
elif fixed_range:
network = objects.Network.get_by_cidr(elevated, fixed_range)
LOG.debug('Delete network %s', network['uuid'])
if require_disassociated and network.project_id is not None:
raise exception.NetworkHasProject(project_id=network.project_id)
network.destroy()
@property
def _bottom_reserved_ips(self):
"""Number of reserved ips at the bottom of the range."""
return 2 # network, gateway
@property
def _top_reserved_ips(self):
"""Number of reserved ips at the top of the range."""
return 1 # broadcast
def _create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
"""Create all fixed ips for network."""
network = self._get_network_by_id(context, network_id)
if extra_reserved is None:
extra_reserved = []
if not fixed_cidr:
fixed_cidr = netaddr.IPNetwork(network['cidr'])
num_ips = len(fixed_cidr)
ips = []
for index in range(num_ips):
address = str(fixed_cidr[index])
if (index < bottom_reserved or num_ips - index <= top_reserved or
address in extra_reserved):
reserved = True
else:
reserved = False
ips.append({'network_id': network_id,
'address': address,
'reserved': reserved})
objects.FixedIPList.bulk_create(context, ips)
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
raise NotImplementedError()
def setup_networks_on_host(self, context, instance_id, host,
teardown=False):
"""calls setup/teardown on network hosts for an instance."""
green_threads = []
if teardown:
call_func = self._teardown_network_on_host
else:
call_func = self._setup_network_on_host
instance = objects.Instance.get_by_id(context, instance_id)
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(
context, instance.uuid)
LOG.debug('Setup networks on host', instance=instance)
for vif in vifs:
network = objects.Network.get_by_id(context, vif.network_id)
if not network.multi_host:
# NOTE (tr3buchet): if using multi_host, host is instance.host
host = network['host']
if self.host == host or host is None:
# at this point i am the correct host, or host doesn't
# matter -> FlatManager
call_func(context, network)
else:
# i'm not the right host, run call on correct host
green_threads.append(eventlet.spawn(
self.network_rpcapi.rpc_setup_network_on_host, context,
network.id, teardown, host))
# wait for all of the setups (if any) to finish
for gt in green_threads:
gt.wait()
def rpc_setup_network_on_host(self, context, network_id, teardown):
if teardown:
call_func = self._teardown_network_on_host
else:
call_func = self._setup_network_on_host
# subcall from original setup_networks_on_host
network = objects.Network.get_by_id(context, network_id)
call_func(context, network)
def _initialize_network(self, network):
if network.enable_dhcp:
is_ext = (network.dhcp_server is not None and
network.dhcp_server != network.gateway)
self.l3driver.initialize_network(network.cidr, is_ext)
self.l3driver.initialize_gateway(network)
def _setup_network_on_host(self, context, network):
"""Sets up network on this host."""
raise NotImplementedError()
def _teardown_network_on_host(self, context, network):
"""Sets up network on this host."""
raise NotImplementedError()
def validate_networks(self, context, networks):
"""check if the networks exists and host
is set to each network.
"""
LOG.debug('Validate networks')
if networks is None or len(networks) == 0:
return
network_uuids = [uuid for (uuid, fixed_ip) in networks]
self._get_networks_by_uuids(context, network_uuids)
for network_uuid, address in networks:
# check if the fixed IP address is valid and
# it actually belongs to the network
if address is not None:
if not netutils.is_valid_ip(address):
raise exception.FixedIpInvalid(address=address)
fixed_ip_ref = objects.FixedIP.get_by_address(
context, address, expected_attrs=['network'])
network = fixed_ip_ref.network
if network.uuid != network_uuid:
raise exception.FixedIpNotFoundForNetwork(
address=address, network_uuid=network_uuid)
if fixed_ip_ref.instance_uuid is not None:
raise exception.FixedIpAlreadyInUse(
address=address,
instance_uuid=fixed_ip_ref.instance_uuid)
def _get_network_by_id(self, context, network_id):
return objects.Network.get_by_id(context, network_id,
project_only='allow_none')
def _get_networks_by_uuids(self, context, network_uuids):
networks = objects.NetworkList.get_by_uuids(
context, network_uuids, project_only="allow_none")
networks.sort(key=lambda x: network_uuids.index(x.uuid))
return networks
def get_vifs_by_instance(self, context, instance_id):
"""Returns the vifs associated with an instance."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
instance = objects.Instance.get_by_id(context, instance_id)
LOG.debug('Get VIFs for instance', instance=instance)
# NOTE(russellb) No need to object-ify this since
# get_vifs_by_instance() is unused and set to be removed.
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
instance.uuid)
for vif in vifs:
if vif.network_id is not None:
network = self._get_network_by_id(context, vif.network_id)
vif.net_uuid = network.uuid
return [dict(vif.iteritems()) for vif in vifs]
def get_instance_id_by_floating_address(self, context, address):
"""Returns the instance id a floating ip's fixed ip is allocated to."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
LOG.debug('Get instance for floating address %s', address)
fixed_ip = objects.FixedIP.get_by_floating_address(context, address)
if fixed_ip is None:
return None
else:
return fixed_ip.instance_uuid
def get_network(self, context, network_uuid):
# NOTE(vish): used locally
return objects.Network.get_by_uuid(context.elevated(), network_uuid)
def get_all_networks(self, context):
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
try:
return obj_base.obj_to_primitive(
objects.NetworkList.get_all(context))
except exception.NoNetworksFound:
return []
def disassociate_network(self, context, network_uuid):
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
network = self.get_network(context, network_uuid)
network.disassociate(context, network.id)
def get_fixed_ip(self, context, id):
"""Return a fixed ip."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
return objects.FixedIP.get_by_id(context, id)
def get_fixed_ip_by_address(self, context, address):
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
return objects.FixedIP.get_by_address(context, address)
def get_vif_by_mac_address(self, context, mac_address):
"""Returns the vifs record for the mac_address."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
# NOTE(russellb) No need to object-ify this since
# get_vifs_by_instance() is unused and set to be removed.
vif = objects.VirtualInterface.get_by_address(context, mac_address)
if vif.network_id is not None:
network = self._get_network_by_id(context, vif.network_id)
vif.net_uuid = network.uuid
return vif
@periodic_task.periodic_task(
spacing=CONF.dns_update_periodic_interval)
def _periodic_update_dns(self, context):
"""Update local DNS entries of all networks on this host."""
networks = objects.NetworkList.get_by_host(context, self.host)
for network in networks:
dev = self.driver.get_dev(network)
self.driver.update_dns(context, dev, network)
def update_dns(self, context, network_ids):
"""Called when fixed IP is allocated or deallocated."""
if CONF.fake_network:
return
LOG.debug('Update DNS for network ids: %s', network_ids)
networks = [network for network in
objects.NetworkList.get_by_host(context, self.host)
if network.multi_host and network.id in network_ids]
for network in networks:
dev = self.driver.get_dev(network)
self.driver.update_dns(context, dev, network)
def add_network_to_project(self, ctxt, project_id, network_uuid):
raise NotImplementedError()
class FlatManager(NetworkManager):
"""Basic network where no vlans are used.
FlatManager does not do any bridge or vlan creation. The user is
responsible for setting up whatever bridges are specified when creating
networks through nova-manage. This bridge needs to be created on all
compute hosts.
The idea is to create a single network for the host with a command like:
nova-manage network create 192.168.0.0/24 1 256. Creating multiple
networks for one manager is currently not supported, but could be
added by modifying allocate_fixed_ip and get_network to get the network
with new logic. Arbitrary lists of addresses in a single network can
be accomplished with manual db editing.
If flat_injected is True, the compute host will attempt to inject network
config into the guest. It attempts to modify /etc/network/interfaces and
currently only works on debian based systems. To support a wider range of
OSes, some other method may need to be devised to let the guest know which
ip it should be using so that it can configure itself. Perhaps an attached
disk or serial device with configuration info.
Metadata forwarding must be handled by the gateway, and since nova does
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
"""
timeout_fixed_ips = False
required_create_args = ['bridge']
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
requested_networks = kwargs.get('requested_networks')
addresses_by_network = {}
if requested_networks is not None:
for request in requested_networks:
addresses_by_network[request.network_id] = request.address
for network in networks:
if network['uuid'] in addresses_by_network:
address = addresses_by_network[network['uuid']]
else:
address = None
self.allocate_fixed_ip(context, instance_id,
network, address=address)
def deallocate_fixed_ip(self, context, address, host=None, teardown=True,
instance=None):
"""Returns a fixed ip to the pool."""
super(FlatManager, self).deallocate_fixed_ip(context, address, host,
teardown,
instance=instance)
objects.FixedIP.disassociate_by_address(context, address)
def _setup_network_on_host(self, context, network):
"""Setup Network on this host."""
# NOTE(tr3buchet): this does not need to happen on every ip
# allocation, this functionality makes more sense in create_network
# but we'd have to move the flat_injected flag to compute
network.injected = CONF.flat_injected
network.save()
def _teardown_network_on_host(self, context, network):
"""Tear down network on this host."""
pass
# NOTE(justinsb): The floating ip functions are stub-implemented.
# We were throwing an exception, but this was messing up horizon.
# Timing makes it difficult to implement floating ips here, in Essex.
def get_floating_ip(self, context, id):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
return None
def get_floating_pools(self, context):
"""Returns list of floating pools."""
# NOTE(maurosr) This method should be removed in future, replaced by
# get_floating_ip_pools. See bug #1091668
return {}
def get_floating_ip_pools(self, context):
"""Returns list of floating ip pools."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
return {}
def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
return None
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
return []
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
return []
# NOTE(hanlind): This method can be removed in version 2.0 of the RPC API
def allocate_floating_ip(self, context, project_id, pool):
"""Gets a floating ip from the pool."""
return None
# NOTE(hanlind): This method can be removed in version 2.0 of the RPC API
def deallocate_floating_ip(self, context, address,
affect_auto_assigned):
"""Returns a floating ip to the pool."""
return None
# NOTE(hanlind): This method can be removed in version 2.0 of the RPC API
def associate_floating_ip(self, context, floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Makes sure everything makes sense then calls _associate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
return None
# NOTE(hanlind): This method can be removed in version 2.0 of the RPC API
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from its fixed ip.
Makes sure everything makes sense then calls _disassociate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
return None
def migrate_instance_start(self, context, instance_uuid,
floating_addresses,
rxtx_factor=None, project_id=None,
source=None, dest=None):
pass
def migrate_instance_finish(self, context, instance_uuid,
floating_addresses, host=None,
rxtx_factor=None, project_id=None,
source=None, dest=None):
pass
def update_dns(self, context, network_ids):
"""Called when fixed IP is allocated or deallocated."""
pass
class FlatDHCPManager(RPCAllocateFixedIP, floating_ips.FloatingIP,
NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
It never injects network settings into the guest. It also manages bridges.
Otherwise it behaves like FlatManager.
"""
SHOULD_CREATE_BRIDGE = True
DHCP = True
required_create_args = ['bridge']
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
ctxt = context.get_admin_context()
networks = objects.NetworkList.get_by_host(ctxt, self.host)
self.driver.iptables_manager.defer_apply_on()
self.l3driver.initialize(fixed_range=False, networks=networks)
super(FlatDHCPManager, self).init_host()
self.init_host_floating_ips()
self.driver.iptables_manager.defer_apply_off()
def _setup_network_on_host(self, context, network):
"""Sets up network on this host."""
network.dhcp_server = self._get_dhcp_ip(context, network)
self._initialize_network(network)
# NOTE(vish): if dhcp server is not set then don't dhcp
if not CONF.fake_network and network.enable_dhcp:
dev = self.driver.get_dev(network)
# NOTE(dprince): dhcp DB queries require elevated context
elevated = context.elevated()
self.driver.update_dhcp(elevated, dev, network)
if CONF.use_ipv6:
self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
network.gateway_v6 = gateway
network.save()
def _teardown_network_on_host(self, context, network):
# NOTE(vish): if dhcp server is not set then don't dhcp
if not CONF.fake_network and network.enable_dhcp:
network['dhcp_server'] = self._get_dhcp_ip(context, network)
dev = self.driver.get_dev(network)
# NOTE(dprince): dhcp DB queries require elevated context
elevated = context.elevated()
self.driver.update_dhcp(elevated, dev, network)
def _get_network_dict(self, network):
"""Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = super(FlatDHCPManager, self)._get_network_dict(network)
# get flat dhcp specific fields
if self.SHOULD_CREATE_BRIDGE:
network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
if network.get('bridge_interface'):
network_dict['bridge_interface'] = network['bridge_interface']
if network.get('multi_host'):
network_dict['multi_host'] = network['multi_host']
return network_dict
class VlanManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager):
"""Vlan network with dhcp.
VlanManager is the most complicated. It will create a host-managed
vlan for each project. Each project gets its own subnet. The networks
and associated subnets are created with nova-manage using a command like:
nova-manage network create 10.0.0.0/8 3 16. This will create 3 networks
of 16 addresses from the beginning of the 10.0.0.0 range.
A dhcp server is run for each subnet, so each project will have its own.
For this mode to be useful, each project will need a vpn to access the
instances in its subnet.
"""
SHOULD_CREATE_BRIDGE = True
SHOULD_CREATE_VLAN = True
DHCP = True
required_create_args = ['bridge_interface']
def __init__(self, network_driver=None, *args, **kwargs):
super(VlanManager, self).__init__(network_driver=network_driver,
*args, **kwargs)
# NOTE(cfb) VlanManager doesn't enforce quotas on fixed IP addresses
# because a project is assigned an entire network.
self.quotas_cls = objects.QuotasNoOp
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
LOG.debug('Setup network on host %s', self.host)
ctxt = context.get_admin_context()
networks = objects.NetworkList.get_by_host(ctxt, self.host)
self.driver.iptables_manager.defer_apply_on()
self.l3driver.initialize(fixed_range=False, networks=networks)
NetworkManager.init_host(self)
self.init_host_floating_ips()
self.driver.iptables_manager.defer_apply_off()
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
LOG.debug('Allocate fixed ip on network %s', network['uuid'],
instance_uuid=instance_id)
if kwargs.get('vpn', None):
address = network['vpn_private_address']
fip = objects.FixedIP.associate(context, str(address),
instance_id, network['id'],
reserved=True)
else:
address = kwargs.get('address', None)
if address:
fip = objects.FixedIP.associate(context, str(address),
instance_id,
network['id'])
else:
fip = objects.FixedIP.associate_pool(context,
network['id'],
instance_id)
address = fip.address
vif = objects.VirtualInterface.get_by_instance_and_network(
context, instance_id, network['id'])
if vif is None:
LOG.debug('vif for network %(network)s and instance '
'%(instance_id)s is used up, '
'trying to create new vif',
{'network': network['id'],
'instance_id': instance_id})
vif = self._add_virtual_interface(context,
instance_id, network['id'])
fip.allocated = True
fip.virtual_interface_id = vif.id
fip.save()
if not kwargs.get('vpn', None):
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
# NOTE(vish) This db query could be removed if we pass az and name
# (or the whole instance object).
instance = objects.Instance.get_by_uuid(context, instance_id)
name = instance.display_name
if self._validate_instance_zone_for_dns_domain(context, instance):
self.instance_dns_manager.create_entry(name, address,
"A",
self.instance_dns_domain)
self.instance_dns_manager.create_entry(instance_id, address,
"A",
self.instance_dns_domain)
self._setup_network_on_host(context, network)
LOG.debug('Allocated fixed ip %s on network %s', address,
network['uuid'], instance=instance)
return address
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
LOG.debug('Add network %s to project %s', network_uuid, project_id)
if network_uuid is not None:
network_id = self.get_network(context, network_uuid).id
else:
network_id = None
objects.Network.associate(context, project_id, network_id, force=True)
def associate(self, context, network_uuid, associations):
"""Associate or disassociate host or project to network."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi to 2.0.
LOG.debug('Associate network %s: |%s|', network_uuid, associations)
network = self.get_network(context, network_uuid)
network_id = network.id
if 'host' in associations:
host = associations['host']
if host is None:
network.disassociate(context, network_id,
host=True, project=False)
else:
network.host = self.host
network.save()
if 'project' in associations:
project = associations['project']
if project is None:
network.disassociate(context, network_id,
host=False, project=True)
else:
network.associate(context, project, network_id, force=True)
def _get_network_by_id(self, context, network_id):
# NOTE(vish): Don't allow access to networks with project_id=None as
# these are networks that haven't been allocated to a
# project yet.
return objects.Network.get_by_id(context, network_id,
project_only=True)
def _get_networks_by_uuids(self, context, network_uuids):
# NOTE(vish): Don't allow access to networks with project_id=None as
# these are networks that haven't been allocated to a
# project yet.
networks = objects.NetworkList.get_by_uuids(
context, network_uuids, project_only=True)
networks.sort(key=lambda x: network_uuids.index(x.uuid))
return networks
def _get_networks_for_instance(self, context, instance_id, project_id,
requested_networks=None):
"""Determine which networks an instance should connect to."""
# get networks associated with project
if requested_networks is not None and len(requested_networks) != 0:
network_uuids = [request.network_id
for request in requested_networks]
networks = self._get_networks_by_uuids(context, network_uuids)
else:
# NOTE(vish): Allocates network on demand so requires admin.
networks = objects.NetworkList.get_by_project(
context.elevated(), project_id)
return networks
def create_networks(self, context, **kwargs):
"""Create networks based on parameters."""
self._convert_int_args(kwargs)
kwargs["vlan_start"] = kwargs.get("vlan_start") or CONF.vlan_start
kwargs["num_networks"] = (kwargs.get("num_networks") or
CONF.num_networks)
kwargs["network_size"] = (kwargs.get("network_size") or
CONF.network_size)
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
if kwargs["num_networks"] + kwargs["vlan_start"] > 4094:
raise ValueError(_('The sum between the number of networks and'
' the vlan start cannot be greater'
' than 4094'))
# Check that vlan is not greater than 4094 or less then 1
vlan_num = kwargs.get("vlan", None)
if vlan_num is not None:
try:
vlan_num = int(vlan_num)
except ValueError:
raise ValueError(_("vlan must be an integer"))
if vlan_num > 4094:
raise ValueError(_('The vlan number cannot be greater than'
' 4094'))
if vlan_num < 1:
raise ValueError(_('The vlan number cannot be less than 1'))
# check that num networks and network size fits in fixed_net
fixed_net = netaddr.IPNetwork(kwargs['cidr'])
if fixed_net.size < kwargs['num_networks'] * kwargs['network_size']:
raise ValueError(_('The network range is not '
'big enough to fit %(num_networks)s networks. Network '
'size is %(network_size)s') % kwargs)
kwargs['bridge_interface'] = (kwargs.get('bridge_interface') or
CONF.vlan_interface)
LOG.debug('Create network: |%s|', kwargs)
return NetworkManager.create_networks(
self, context, vpn=True, **kwargs)
@utils.synchronized('setup_network', external=True)
def _setup_network_on_host(self, context, network):
"""Sets up network on this host."""
if not network.vpn_public_address:
address = CONF.vpn_ip
network.vpn_public_address = address
network.save()
else:
address = network.vpn_public_address
network.dhcp_server = self._get_dhcp_ip(context, network)
self._initialize_network(network)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
if address == CONF.vpn_ip and hasattr(self.driver,
"ensure_vpn_forward"):
self.l3driver.add_vpn(CONF.vpn_ip,
network.vpn_public_port,
network.vpn_private_address)
if not CONF.fake_network:
dev = self.driver.get_dev(network)
# NOTE(dprince): dhcp DB queries require elevated context
if network.enable_dhcp:
elevated = context.elevated()
self.driver.update_dhcp(elevated, dev, network)
if CONF.use_ipv6:
self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
network.gateway_v6 = gateway
network.save()
@utils.synchronized('setup_network', external=True)
def _teardown_network_on_host(self, context, network):
if not CONF.fake_network:
network['dhcp_server'] = self._get_dhcp_ip(context, network)
dev = self.driver.get_dev(network)
# NOTE(ethuleau): For multi hosted networks, if the network is no
# more used on this host and if VPN forwarding rule aren't handed
# by the host, we delete the network gateway.
vpn_address = network['vpn_public_address']
if (CONF.teardown_unused_network_gateway and
network['multi_host'] and vpn_address != CONF.vpn_ip and
not objects.Network.in_use_on_host(context, network['id'],
self.host)):
LOG.debug("Remove unused gateway %s", network['bridge'])
if network.enable_dhcp:
self.driver.kill_dhcp(dev)
self.l3driver.remove_gateway(network)
if not self._uses_shared_ip(network):
fip = objects.FixedIP.get_by_address(context,
network.dhcp_server)
fip.allocated = False
fip.host = None
fip.save()
# NOTE(vish): if dhcp server is not set then don't dhcp
elif network.enable_dhcp:
# NOTE(dprince): dhcp DB queries require elevated context
elevated = context.elevated()
self.driver.update_dhcp(elevated, dev, network)
def _get_network_dict(self, network):
"""Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = super(VlanManager, self)._get_network_dict(network)
# get vlan specific network fields
if self.SHOULD_CREATE_BRIDGE:
network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
if self.SHOULD_CREATE_VLAN:
network_dict['should_create_vlan'] = self.SHOULD_CREATE_VLAN
for k in ['vlan', 'bridge_interface', 'multi_host']:
if network.get(k):
network_dict[k] = network[k]
return network_dict
@property
def _bottom_reserved_ips(self):
"""Number of reserved ips at the bottom of the range."""
return super(VlanManager, self)._bottom_reserved_ips + 1 # vpn server
@property
def _top_reserved_ips(self):
"""Number of reserved ips at the top of the range."""
parent_reserved = super(VlanManager, self)._top_reserved_ips
return parent_reserved + CONF.cnt_vpn_clients
| apache-2.0 | 375,943,698,876,767,740 | 43.378453 | 89 | 0.55347 | false |
Ruide/angr-dev | angrop/angrop/rop.py | 1 | 16690 | from angr.errors import SimEngineError, SimMemoryError
from angr.analyses.bindiff import differing_constants
from angr import Analysis, register_analysis
import chain_builder
import gadget_analyzer
import common
import pickle
import inspect
import logging
import progressbar
from errors import RopException
from .rop_gadget import RopGadget, StackPivot
from multiprocessing import Pool
l = logging.getLogger('angrop.rop')
_global_gadget_analyzer = None
# global initializer for multiprocessing
def _set_global_gadget_analyzer(rop_gadget_analyzer):
global _global_gadget_analyzer
_global_gadget_analyzer = rop_gadget_analyzer
def run_worker(addr):
return _global_gadget_analyzer.analyze_gadget(addr)
# todo what if we have mov eax, [rsp+0x20]; ret (cache would need to know where it is or at least a min/max)
# todo what if we have pop eax; mov ebx, eax; need to encode that we cannot set them to different values
class ROP(Analysis):
"""
This class is a semantic aware rop gadget finder
It is a work in progress, so don't be surprised if something doesn't quite work
After calling find_gadgets(), find_gadgets_single_threaded() or load_gadgets(),
self.gadgets, self.stack_pivots, and self._duplicates is populated.
Additionally, all public methods from ChainBuilder are copied into ROP.
"""
def __init__(self, only_check_near_rets=True, max_block_size=20, max_sym_mem_accesses=4, fast_mode=None):
"""
Initializes the rop gadget finder
:param only_check_near_rets: If true we skip blocks that are not near rets
:param max_block_size: limits the size of blocks considered, longer blocks are less likely to be good rop
gadgets so we limit the size we consider
:param fast_mode: if set to True sets options to run fast, if set to False sets options to find more gadgets
if set to None makes a decision based on the size of the binary
:return:
"""
# params
self._max_block_size = max_block_size
self._only_check_near_rets = only_check_near_rets
self._max_sym_mem_accesses = max_sym_mem_accesses
a = self.project.arch
self._sp_reg = a.register_names[a.sp_offset]
self._ip_reg = a.register_names[a.ip_offset]
self._base_pointer = a.register_names[a.bp_offset]
# get list of multipurpose registers
self._reg_list = a.default_symbolic_registers
# prune the register list of the instruction pointer and the stack pointer
self._reg_list = filter(lambda r: r != self._sp_reg, self._reg_list)
self._reg_list = filter(lambda r: r != self._ip_reg, self._reg_list)
# get ret locations
self._ret_locations = self._get_ret_locations()
# list of RopGadget's
self.gadgets = []
self.stack_pivots = []
self._duplicates = []
# RopChain settings
self.badbytes = []
self.roparg_filler = None
num_to_check = len(list(self._addresses_to_check()))
# fast mode
if fast_mode is None:
if num_to_check > 20000:
fast_mode = True
l.warning("Enabling fast mode for large binary")
else:
fast_mode = False
self._fast_mode = fast_mode
if self._fast_mode:
self._max_block_size = 12
self._max_sym_mem_accesses = 1
num_to_check = len(list(self._addresses_to_check()))
l.info("There are %d addresses within %d bytes of a ret",
num_to_check, self._max_block_size)
# gadget analyzer
self._gadget_analyzer = gadget_analyzer.GadgetAnalyzer(self.project, self._reg_list, self._max_block_size,
self._fast_mode, self._max_sym_mem_accesses)
# chain builder
self._chain_builder = None
# silence annoying loggers
logging.getLogger('angr.engines.vex.ccall').setLevel(logging.CRITICAL)
logging.getLogger('angr.engines.vex.expressions.ccall').setLevel(logging.CRITICAL)
logging.getLogger('angr.engines.vex.irop').setLevel(logging.CRITICAL)
def find_gadgets(self, processes=4, show_progress=True):
"""
Finds all the gadgets in the binary by calling analyze_gadget on every address near a ret.
Saves gadgets in self.gadgets
Saves stack pivots in self.stack_pivots
:param processes: number of processes to use
"""
self.gadgets = []
pool = Pool(processes=processes, initializer=_set_global_gadget_analyzer, initargs=(self._gadget_analyzer,))
it = pool.imap_unordered(run_worker, self._addresses_to_check_with_caching(show_progress), chunksize=5)
for gadget in it:
if gadget is not None:
if isinstance(gadget, RopGadget):
self.gadgets.append(gadget)
elif isinstance(gadget, StackPivot):
self.stack_pivots.append(gadget)
pool.close()
# fix up gadgets from cache
for g in self.gadgets:
if g.addr in self._cache:
dups = {g.addr}
for addr in self._cache[g.addr]:
dups.add(addr)
g_copy = g.copy()
g_copy.addr = addr
self.gadgets.append(g_copy)
self._duplicates.append(dups)
self.gadgets = sorted(self.gadgets, key=lambda x: x.addr)
self._reload_chain_funcs()
def find_gadgets_single_threaded(self):
"""
Finds all the gadgets in the binary by calling analyze_gadget on every address near a ret
Saves gadgets in self.gadgets
Saves stack pivots in self.stack_pivots
"""
self.gadgets = []
_set_global_gadget_analyzer(self._gadget_analyzer)
for _, addr in enumerate(self._addresses_to_check_with_caching()):
gadget = _global_gadget_analyzer.analyze_gadget(addr)
if gadget is not None:
if isinstance(gadget, RopGadget):
self.gadgets.append(gadget)
elif isinstance(gadget, StackPivot):
self.stack_pivots.append(gadget)
# fix up gadgets from cache
for g in self.gadgets:
if g.addr in self._cache:
dups = {g.addr}
for addr in self._cache[g.addr]:
dups.add(addr)
g_copy = g.copy()
g_copy.addr = addr
self.gadgets.append(g_copy)
self._duplicates.append(dups)
self.gadgets = sorted(self.gadgets, key=lambda x: x.addr)
self._reload_chain_funcs()
def save_gadgets(self, path):
"""
Saves gadgets in a file.
:param path: A path for a file where the gadgets are stored
"""
with open(path, "wb") as f:
pickle.dump(self._get_cache_tuple(), f)
def load_gadgets(self, path):
"""
Loads gadgets from a file.
:param path: A path for a file where the gadgets are loaded
"""
cache_tuple = pickle.load(open(path, "rb"))
self._load_cache_tuple(cache_tuple)
def set_badbytes(self, badbytes):
"""
Define badbytes which should not appear in the generated ropchain.
:param badbytes: a list of 8 bit integers
"""
if not isinstance(badbytes, list):
print "Require a list, e.g: [0x00, 0x09]"
return
self.badbytes = badbytes
if len(self.gadgets) > 0:
self.chain_builder._set_badbytes(self.badbytes)
def set_roparg_filler(self, roparg_filler):
"""
Define rop gadget filler argument. These will be used if the rop chain needs to pop
useless registers.
If roparg_filler is None, symbolic values will be used and the concrete values will
be whatever the constraint solver chooses (usually 0).
:param roparg_filler: A integer which is used when popping useless register or None.
"""
if not isinstance(roparg_filler, (int, type(None))):
print "Require an integer, e.g: 0x41414141 or None"
return
self.roparg_filler = roparg_filler
if len(self.gadgets) > 0:
self.chain_builder._set_roparg_filler(self.roparg_filler)
def get_badbytes(self):
"""
Returns list of badbytes.
:returns the list of badbytes
"""
return self.badbytes
def _get_cache_tuple(self):
return self.gadgets, self.stack_pivots, self._duplicates
def _load_cache_tuple(self, cache_tuple):
self.gadgets, self.stack_pivots, self._duplicates = cache_tuple
self._reload_chain_funcs()
def _reload_chain_funcs(self):
for f_name, f in inspect.getmembers(self.chain_builder, predicate=inspect.ismethod):
if f_name.startswith("_"):
continue
setattr(self, f_name, f)
@property
def chain_builder(self):
if self._chain_builder is not None:
return self._chain_builder
elif len(self.gadgets) > 0:
self._chain_builder = chain_builder.ChainBuilder(self.project, self.gadgets, self._duplicates,
self._reg_list, self._base_pointer, self.badbytes,
self.roparg_filler)
return self._chain_builder
else:
raise Exception("No gadgets available, call find_gadgets() or load_gadgets() if you haven't already.")
def _block_has_ip_relative(self, addr, bl):
"""
Checks if a block has any ip relative instructions
"""
string = bl.bytes
test_addr = 0x41414140 + addr % 0x10
bl2 = self.project.factory.block(test_addr, byte_string=string)
try:
diff_constants = differing_constants(bl, bl2)
except angr.analyses.bindiff.UnmatchedStatementsException:
return True
# check if it changes if we move it
bl_end = addr + bl.size
bl2_end = test_addr + bl2.size
filtered_diffs = []
for d in diff_constants:
if d.value_a < addr or d.value_a >= bl_end or \
d.value_b < test_addr or d.value_b >= bl2_end:
filtered_diffs.append(d)
return len(filtered_diffs) > 0
def _addresses_to_check_with_caching(self, show_progress=True):
num_addrs = len(list(self._addresses_to_check()))
widgets = ['ROP: ', progressbar.Percentage(), ' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed()]
progress = progressbar.ProgressBar(widgets=widgets, maxval=num_addrs)
if show_progress:
progress.start()
self._cache = dict()
seen = dict()
for i, a in enumerate(self._addresses_to_check()):
if show_progress:
progress.update(i)
try:
bl = self.project.factory.block(a)
if bl.size > self._max_block_size:
continue
block_data = bl.bytes
except (SimEngineError, SimMemoryError):
continue
if block_data in seen:
self._cache[seen[block_data]].add(a)
continue
else:
if len(bl.vex.constant_jump_targets) == 0 and not self._block_has_ip_relative(a, bl):
seen[block_data] = a
self._cache[a] = set()
yield a
if show_progress:
progress.finish()
def _addresses_to_check(self):
"""
:return: all the addresses to check
"""
if self._only_check_near_rets:
# align block size
alignment = self.project.arch.instruction_alignment
block_size = (self._max_block_size & ((1 << self.project.arch.bits) - alignment)) + alignment
slices = [(addr-block_size, addr) for addr in self._ret_locations]
current_addr = 0
for st, _ in slices:
current_addr = max(current_addr, st)
end_addr = st + block_size + alignment
for i in xrange(current_addr, end_addr, alignment):
segment = self.project.loader.main_object.find_segment_containing(i)
if segment is not None and segment.is_executable:
yield i
current_addr = max(current_addr, end_addr)
else:
for segment in self.project.loader.main_object.segments:
if segment.is_executable:
l.debug("Analyzing segment with address range: 0x%x, 0x%x" % (segment.min_addr, segment.max_addr))
for addr in xrange(segment.min_addr, segment.max_addr):
yield addr
def _get_ret_locations(self):
"""
:return: all the locations in the binary with a ret instruction
"""
try:
return self._get_ret_locations_by_string()
except RopException:
pass
addrs = []
seen = set()
for segment in self.project.loader.main_object.segments:
if segment.is_executable:
num_bytes = segment.max_addr-segment.min_addr
alignment = self.project.arch.instruction_alignment
# hack for arm thumb
if self.project.arch.linux_name == "aarch64" or self.project.arch.linux_name == "arm":
alignment = 1
# iterate through the code looking for rets
for addr in xrange(segment.min_addr, segment.min_addr + num_bytes, alignment):
# dont recheck addresses we've seen before
if addr in seen:
continue
try:
block = self.project.factory.block(addr)
# it it has a ret get the return address
if block.vex.jumpkind.startswith("Ijk_Ret"):
ret_addr = block.instruction_addrs[-1]
# hack for mips pipelining
if self.project.arch.linux_name.startswith("mips"):
ret_addr = block.instruction_addrs[-2]
if ret_addr not in seen:
addrs.append(ret_addr)
# save the addresses in the block
seen.update(block.instruction_addrs)
except (SimEngineError, SimMemoryError):
pass
return sorted(addrs)
def _get_ret_locations_by_string(self):
"""
uses a string filter to find the return instructions
:return: all the locations in the binary with a ret instruction
"""
if self.project.arch.linux_name == "x86_64" or self.project.arch.linux_name == "i386":
ret_instructions = {"\xc2", "\xc3", "\xca", "\xcb"}
else:
raise RopException("Only have ret strings for i386 and x86_64")
addrs = []
try:
for segment in self.project.loader.main_object.segments:
if segment.is_executable:
num_bytes = segment.max_addr-segment.min_addr
read_bytes = "".join(self.project.loader.memory.read_bytes(segment.min_addr, num_bytes))
for ret_instruction in ret_instructions:
for loc in common.str_find_all(read_bytes, ret_instruction):
addrs.append(loc + segment.min_addr)
except KeyError:
l.warning("Key error with segment analysis")
# try reading from state
state = self.project.factory.entry_state()
for segment in self.project.loader.main_object.segments:
if segment.is_executable:
num_bytes = segment.max_addr - segment.min_addr
read_bytes = state.se.eval(state.memory.load(segment.min_addr, num_bytes), cast_to=str)
for ret_instruction in ret_instructions:
for loc in common.str_find_all(read_bytes, ret_instruction):
addrs.append(loc + segment.min_addr)
return sorted(addrs)
register_analysis(ROP, 'ROP') | bsd-2-clause | -8,112,325,111,332,007,000 | 39.809291 | 118 | 0.573397 | false |
letops/django-sendgrid-parse | django_sendgrid_parse/migrations/0001_initial.py | 1 | 1575 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-28 05:54
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='')),
],
),
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('headers', models.TextField()),
('text', models.TextField()),
('html', models.TextField()),
('to', models.TextField()),
('cc', models.TextField()),
('subject', models.TextField()),
('dkim', jsonfield.fields.JSONField()),
('SPF', jsonfield.fields.JSONField()),
('envelope', jsonfield.fields.JSONField()),
('charsets', models.CharField(max_length=255)),
('spam_score', models.FloatField()),
('spam_report', models.TextField()),
('attachments', models.ManyToManyField(related_name='email', to='django_sendgrid_parse.Attachment')),
],
),
]
| mit | -5,382,535,184,245,046,000 | 34.795455 | 117 | 0.530794 | false |
tolysz/tcldis | tcldis.py | 2 | 42652 | from __future__ import print_function
import struct
import itertools
from collections import namedtuple, OrderedDict
import _tcldis
printbc = _tcldis.printbc
def getbc(*args, **kwargs):
"""
Accepts a keyword argument of:
- `tcl_code` - a string of valid Tcl to compile
- `tclobj_ptr` - a raw memory address pointing to a bytecode Tcl
object - VERY DANGEROUS
- `proc_name` - the name of a proc in the interpreter, probably
created by libtclpy
Returns a `BC` object containing information about the bytecode.
"""
bytecode, bcliterals, bclocals, bcauxs = _tcldis.getbc(*args, **kwargs)
bcliterals = [bclit.decode('utf-8') for bclit in bcliterals]
bclocals = [bcloc.decode('utf-8') for bcloc in bclocals]
return BC(bytecode, bcliterals, bclocals, bcauxs)
getbc.__doc__ = _tcldis.getbc.__doc__
literal_convert = _tcldis.literal_convert
INSTRUCTIONS = _tcldis.inst_table()
JUMP_INSTRUCTIONS = [
'jump1', 'jump4', 'jumpTrue1', 'jumpTrue4', 'jumpFalse1', 'jumpFalse4'
]
TAG_BLOCK_JOIN = 'block_join'
TAG_BLOCK_RM = 'block_rm'
TAG_FOREACH = 'foreach'
TAG_CATCH = 'catch'
TAG_IF = 'if'
TAG_I_PUSH = 'i_push'
TAG_I_OTHER = 'i_other'
TAG_H_VARIABLE = 'h_variable'
def _getop(optype):
"""
Given a C struct descriptor, return a function which will take the necessary
bytes off the front of a bytearray and return the python value.
"""
def getop_lambda(bc):
# The 'standard' sizes in the struct module match up to what Tcl expects
numbytes = struct.calcsize(optype)
opbytes = ''.join([chr(bc.pop(0)) for _ in range(numbytes)])
return struct.unpack(optype, opbytes)[0]
return getop_lambda
# InstOperandType from tclCompile.h
OPERANDS = [
('NONE', None), # Should never be present
('INT1', _getop('>b')),
('INT4', _getop('>i')),
('UINT1', _getop('>B')),
('UINT4', _getop('>I')),
('IDX4', _getop('>i')),
('LVT1', _getop('>B')),
('LVT4', _getop('>I')),
('AUX4', _getop('>I')),
]
class BC(object):
def __init__(self, bytecode, bcliterals, bclocals, bcauxs):
self._bytecode = bytecode
self._literals = bcliterals
self._locals = bclocals
self._auxs = bcauxs
self._pc = 0
def __repr__(self):
return 'BC(%s,%s,%s,%s,%s)' % tuple([repr(v) for v in [
self._bytecode,
self._literals,
self._locals,
self._auxs,
self._pc,
]])
def __str__(self):
fmtstr = (
'Bytecode with %s bytes of instructions, %s literals, %s locals, ' +
'%s auxs and pc %s'
)
return fmtstr % (
len(self._bytecode),
len(self._literals),
len(self._locals),
len(self._auxs),
self._pc,
)
def __len__(self):
return len(self._bytecode) - self._pc
def literal(self, n):
return self._literals[n]
def local(self, n):
return self._locals[n]
def aux(self, n):
return self._auxs[n]
def peek1(self):
return self._bytecode[self._pc]
def pc(self):
return self._pc
def get(self, n):
oldpc = self._pc
self._pc += n
return self._bytecode[oldpc:self._pc]
def copy(self):
bc = BC(self._bytecode, self._literals, self._locals, self._auxs)
bc.get(self._pc)
return bc
# Tcl bytecode instruction
InstTuple = namedtuple('InstTuple', ['loc', 'name', 'ops', 'targetloc'])
class Inst(InstTuple):
def __new__(cls, bc):
d = {}
d['loc'] = bc.pc()
bytecode = bc.get(INSTRUCTIONS[bc.peek1()]['num_bytes'])
inst_type = INSTRUCTIONS[bytecode.pop(0)]
d['name'] = inst_type['name']
ops = []
for opnum in inst_type['operands']:
optype, getop = OPERANDS[opnum]
if optype in ['INT1', 'INT4', 'UINT1', 'UINT4']:
ops.append(getop(bytecode))
elif optype in ['LVT1', 'LVT4']:
ops.append(bc.local(getop(bytecode)))
elif optype in ['AUX4']:
ops.append(bc.aux(getop(bytecode)))
auxtype, auxdata = ops[-1]
if auxtype == 'ForeachInfo':
auxdata = [
[bc.local(varidx) for varidx in varlist]
for varlist in auxdata
]
else:
assert False
ops[-1] = (auxtype, auxdata)
else:
assert False
d['ops'] = tuple(ops)
# Note that this doesn't get printed on str() so we only see
# the value when it gets reduced to a BCJump class
d['targetloc'] = None
if d['name'] in JUMP_INSTRUCTIONS:
d['targetloc'] = d['loc'] + d['ops'][0]
return super(Inst, cls).__new__(cls, **d)
def __init__(self, bc, *args, **kwargs):
super(Inst, self).__init__(*args, **kwargs)
def __str__(self):
return '<%s: %s %s>' % (
self.loc if self.loc is not None else '?',
self.name,
'(' + ', '.join([repr(o) for o in self.ops]) + ')',
)
#################################################################
# My own representation of anything that can be used as a value #
#################################################################
# The below represent my interpretation of the Tcl stack machine
BCValueTuple = namedtuple('BCValueTuple', ['inst', 'value', 'stackn'])
class BCValue(BCValueTuple):
def __new__(cls, inst, value):
d = {}
d['inst'] = inst
if type(value) is list:
assert all([v.stackn == 1 for v in value if isinstance(v, BCValue)])
value = tuple(value)
elif type(value) is unicode:
pass
else:
assert False
d['value'] = value
d['stackn'] = 1
return super(BCValue, cls).__new__(cls, **d)
def __init__(self, inst, value, *args, **kwargs):
super(BCValue, self).__init__(*args, **kwargs)
def destack(self):
assert self.stackn == 1
return self._replace(stackn=self.stackn-1)
def __repr__(self): assert False
def fmt(self): assert False
class BCLiteral(BCValue):
def __init__(self, *args, **kwargs):
super(BCLiteral, self).__init__(*args, **kwargs)
assert type(self.value) is unicode
def __repr__(self):
return 'BCLiteral(%s)' % (repr(self.value),)
def fmt(self):
val = self.value
if val == '': return u'{}'
if not any([c in val for c in '$[]{}""\f\r\n\t\v ']):
return val
# Can't use simple case, go the hard route
matching_brackets = True
bracket_level = 0
for c in val:
if c == '{': bracket_level += 1
elif c == '}': bracket_level -= 1
if bracket_level < 0:
matching_brackets = False
break
# If we need escape codes we have to use ""
# Note we don't try and match \n or \t - these are probably used
# in multiline strings, so if possible use {} quoting and print
# them literally.
if any([c in val for c in '\f\r\v']) or not matching_brackets:
val = (val
.replace('\\', '\\\\')
.replace('\f', '\\f')
.replace('\r', '\\r')
.replace('\n', '\\n')
.replace('\t', '\\t')
.replace('\v', '\\v')
.replace('}', '\\}')
.replace('{', '\\{')
.replace('"', '\\"')
.replace('"', '\\"')
.replace('[', '\\[')
.replace(']', '\\]')
.replace('$', '\\$')
)
val = u'"%s"' % (val,)
else:
val = u'{%s}' % (val,)
return val
class BCVarRef(BCValue):
def __init__(self, *args, **kwargs):
super(BCVarRef, self).__init__(*args, **kwargs)
assert len(self.value) == 1
def __repr__(self):
return 'BCVarRef(%s)' % (repr(self.value),)
def fmt(self):
return u'$' + self.value[0].fmt()
class BCArrayRef(BCValue):
def __init__(self, *args, **kwargs):
super(BCArrayRef, self).__init__(*args, **kwargs)
assert len(self.value) == 2
def __repr__(self):
return 'BCArrayRef(%s)' % (repr(self.value),)
def fmt(self):
return u'$%s(%s)' % (self.value[0].fmt(), self.value[1].fmt())
class BCConcat(BCValue):
def __init__(self, *args, **kwargs):
super(BCConcat, self).__init__(*args, **kwargs)
assert len(self.value) > 1
def __repr__(self):
return 'BCConcat(%s)' % (repr(self.value),)
def fmt(self):
# TODO: this won't always work, need to be careful of
# literals following variables
return u'"%s"' % (u''.join([v.fmt() for v in self.value]),)
class BCProcCall(BCValue):
def __init__(self, *args, **kwargs):
super(BCProcCall, self).__init__(*args, **kwargs)
assert len(self.value) >= 1
def __repr__(self):
return 'BCProcCall(%s)' % (self.value,)
def fmt(self):
args = list(self.value)
if args[0].fmt() == u'::tcl::array::set':
args[0:1] = [BCLiteral(None, 'array'), BCLiteral(None, 'set')]
cmd = u' '.join([arg.fmt() for arg in args])
if self.stackn:
cmd = u'[%s]' % (cmd,)
return cmd
class BCSet(BCProcCall):
def __init__(self, *args, **kwargs):
super(BCSet, self).__init__(*args, **kwargs)
assert len(self.value) == 2
def __repr__(self):
return 'BCSet(%s)' % (self.value,)
def fmt(self):
cmd = u'set %s %s' % tuple([v.fmt() for v in self.value])
if self.stackn:
cmd = u'[%s]' % (cmd,)
return cmd
# This one is odd. inst.ops[0] is the index to the locals table, kv[0]
# is namespace::value, or value if looking at the same namespace (i.e.
# most of the time). For now we only handle the case where they're both
# the same, i.e. looking at the same namespace.
# Additionally, note there is a hack we apply before reducing to recognise
# that Tcl gives variable calls a return value.
class BCVariable(BCProcCall):
def __init__(self, *args, **kwargs):
super(BCVariable, self).__init__(*args, **kwargs)
assert len(self.value) == 1
# self.value[0].fmt() is the fully qualified name, if appropriate
assert self.value[0].fmt().endswith(self.inst.ops[0])
def __repr__(self):
return 'BCVariable(%s)' % (self.value,)
def fmt(self):
cmd = u'variable %s' % (self.value[0].fmt(),)
if self.stackn:
cmd = u'[%s]' % (cmd,)
return cmd
class BCExpr(BCValue):
_exprmap = {
'gt': (u'>', 2),
'lt': (u'<', 2),
'ge': (u'>=', 2),
'le': (u'<=', 2),
'eq': (u'==', 2),
'neq': (u'!=', 2),
'add': (u'+', 2),
'not': (u'!', 1),
}
def __init__(self, *args, **kwargs):
super(BCExpr, self).__init__(*args, **kwargs)
_, nargs = self._exprmap[self.inst.name]
assert len(self.value) == nargs
def __repr__(self):
return 'BCExpr(%s)' % (self.value,)
def expr(self):
op, nargs = self._exprmap[self.inst.name]
if nargs == 1:
expr = u'%s %s' % (op, self.value[0].fmt())
elif nargs == 2:
expr = u'%s %s %s' % (self.value[0].fmt(), op, self.value[1].fmt())
return expr
def fmt(self):
return u'[expr {%s}]' % (self.expr(),)
class BCReturn(BCProcCall):
def __init__(self, *args, **kwargs):
super(BCReturn, self).__init__(*args, **kwargs)
assert len(self.value) == 2
assert self.value[1].value == '' # Options
assert self.inst.ops[0] == 0 # Code
assert self.inst.ops[1] == 1 # Level
def __repr__(self):
return 'BCReturn(%s)' % (repr(self.value),)
def fmt(self):
if self.value[0].value == '': return u'return'
return u'return %s' % (self.value[0].fmt(),)
# TODO: I'm totally unsure about where this goes. tclCompile.c says it has a -1
# stack effect, which means it doesn't put anything back on the stack. But
# sometimes it's used instead of an actual return, which does put something on
# the stack (after consuming two items). The overall stack effect is the same,
# but the end value is different...
class BCDone(BCProcCall):
def __init__(self, *args, **kwargs):
super(BCDone, self).__init__(*args, **kwargs)
# Unfortunately cannot be sure this is a BCProcCall as done is sometimes
# used for the return call (i.e. tcl throws away the information that we've
# written 'return'.
assert len(self.value) == 1
def __repr__(self):
return 'BCDone(%s)' % (repr(self.value),)
def fmt(self):
# In the general case it's impossible to guess whether 'return' was written.
if isinstance(self.value[0], BCProcCall):
return self.value[0].destack().fmt()
return u'return %s' % (self.value[0].fmt(),)
# self.value contains two bblocks, self.inst contains two jumps
class BCIf(BCProcCall):
def __init__(self, *args, **kwargs):
super(BCIf, self).__init__(*args, **kwargs)
assert len(self.value) == len(self.inst) == 2
assert all([isinstance(jump, BCJump) for jump in self.inst])
assert self.inst[0].on in (True, False) and self.inst[1].on is None
def __repr__(self):
return 'BCIf(%s)' % (self.value,)
def fmt(self):
value = list(self.value)
# An if condition takes 'ownership' of the values returned in any
# of its branches
for i, bblock in enumerate(self.value):
inst = bblock.insts[-1]
if isinstance(inst, BCLiteral):
assert inst.value == ''
value[i] = bblock.popinst()
elif isinstance(inst, BCProcCall):
value[i] = bblock.replaceinst(len(bblock.insts)-1, [inst.destack()])
else:
assert False
if isinstance(self.inst[0].value[0], BCExpr):
conditionstr = self.inst[0].value[0].expr()
if self.inst[0].on is True:
conditionstr = u'!(%s)' % (conditionstr,)
else:
conditionstr = self.inst[0].value[0].fmt()
if self.inst[0].on is True:
conditionstr = '!%s' % (conditionstr,)
cmd = (
u'if {%s} {\n\t%s\n}' % (conditionstr, value[0].fmt().replace('\n', '\n\t'))
)
if len(value[1].insts) > 0:
cmd += (
u' else {\n\t%s\n}' % (value[1].fmt().replace('\n', '\n\t'),)
)
if self.stackn:
cmd = u'[%s]' % (cmd,)
return cmd
class BCCatch(BCProcCall):
def __init__(self, *args, **kwargs):
super(BCCatch, self).__init__(*args, **kwargs)
assert len(self.value) == 3
assert all([isinstance(v, BBlock) for v in self.value])
begin, middle, end = self.value
# Make sure we recognise the overall structure of this catch
assert (all([
len(begin.insts) >= 4, # beginCatch4, code, return code, jump
len(middle.insts) == 2,
len(end.insts) == 4,
]) and all([
isinstance(begin.insts[-3], BCProcCall),
isinstance(begin.insts[-2], BCLiteral),
isinstance(begin.insts[-1], BCJump),
]) and all([
middle.insts[0].name == 'pushResult',
middle.insts[1].name == 'pushReturnCode',
end.insts[0].name == 'endCatch',
end.insts[1].name == 'reverse', end.insts[1].ops[0] == 2,
end.insts[2].name == 'storeScalar1',
end.insts[3].name == 'pop',
]))
def __repr__(self):
return 'BCCatch(%s)' % (self.value,)
def fmt(self):
begin, _, end = self.value
# Nail down the details and move things around to our liking
begin = begin.replaceinst((-3, -2), [begin.insts[-3].destack()])
begin = begin.popinst().popinst().replaceinst(0, [])
catchblock = begin.fmt()
varname = end.insts[2].ops[0]
cmd = u'catch {%s} %s' % (catchblock, varname)
if self.stackn:
cmd = u'[%s]' % (cmd,)
return cmd
class BCForeach(BCProcCall):
def __init__(self, *args, **kwargs):
super(BCForeach, self).__init__(*args, **kwargs)
assert len(self.value) == 4
assert all([isinstance(v, BBlock) for v in self.value[:3]])
begin, step, code, lit = self.value
# Make sure we recognise the overall structure of foreach
assert (all([
len(begin.insts) == 2, # list temp var, foreach start
len(step.insts) == 2, # foreach step, jumpfalse
len(code.insts) > 1,
]) and all([
isinstance(begin.insts[0], BCSet),
isinstance(begin.insts[1], Inst),
isinstance(step.insts[0], Inst),
isinstance(step.insts[1], Inst),
isinstance(code.insts[-1], BCJump),
isinstance(lit, BCLiteral),
]) and all([
begin.insts[1].name == 'foreach_start4',
step.insts[0].name == 'foreach_step4',
step.insts[1].name == 'jumpFalse1',
]))
# Nail down the details and move things around to our liking
assert begin.insts[1].ops[0] == step.insts[0].ops[0]
assert len(begin.insts[1].ops[0][1]) == 1
def __repr__(self):
return 'BCForeach(%s)' % (self.value,)
def fmt(self):
value = list(self.value)
value[2] = value[2].popinst()
# TODO: this is lazy
fevars = ' '.join(value[0].insts[1].ops[0][1][0])
felist = value[0].insts[0].value[1].fmt()
feblock = '\n\t' + value[2].fmt().replace('\n', '\n\t') + '\n'
cmd = u'foreach {%s} %s {%s}' % (fevars, felist, feblock)
if self.stackn:
cmd = u'[%s]' % (cmd,)
return cmd
####################################################################
# My own representation of anything that cannot be used as a value #
####################################################################
class BCNonValue(object):
def __init__(self, inst, value, *args, **kwargs):
super(BCNonValue, self).__init__(*args, **kwargs)
self.inst = inst
self.value = value
def __repr__(self): assert False
def fmt(self): assert False
class BCJump(BCNonValue):
def __init__(self, on, *args, **kwargs):
super(BCJump, self).__init__(*args, **kwargs)
assert len(self.value) == 0 if on is None else 1
self.on = on
self.targetloc = self.inst.targetloc
def __repr__(self):
condition = ''
if self.on is not None:
condition = '(%s==%s)' % (self.on, self.value)
return 'BCJump%s->%s' % (condition, self.inst.targetloc)
def fmt(self):
#return 'JUMP%s(%s)' % (self.on, self.value[0].fmt())
return unicode(self)
# Just a formatting container for the form a(x)
class BCArrayElt(BCNonValue):
def __init__(self, *args, **kwargs):
super(BCArrayElt, self).__init__(*args, **kwargs)
assert len(self.value) == 2
def __repr__(self):
return 'BCArrayElt(%s)' % (repr(self.value),)
def fmt(self):
return u'%s(%s)' % (self.value[0].fmt(), self.value[1].fmt())
##############################
# Any basic block structures #
##############################
# Basic block, containing a linear flow of logic
class BBlock(object):
def __init__(self, insts, loc, *args, **kwargs):
super(BBlock, self).__init__(*args, **kwargs)
assert type(insts) is list
assert type(loc) is int
self.insts = tuple(insts)
self.loc = loc
def __repr__(self):
return 'BBlock(at %s, %s insts)' % (self.loc, len(self.insts))
def replaceinst(self, ij, replaceinsts):
newinsts = list(self.insts)
if type(ij) is not tuple:
assert ij >= 0
ij = (ij, ij+1)
assert type(replaceinsts) is list
newinsts[ij[0]:ij[1]] = replaceinsts
return BBlock(newinsts, self.loc)
def appendinsts(self, insts):
return self.replaceinst((len(self.insts), len(self.insts)), insts)
def popinst(self):
return self.replaceinst(len(self.insts)-1, [])
def fmt_insts(self):
fmt_list = []
for inst in self.insts:
if isinstance(inst, Inst):
fmt_str = unicode(inst)
elif (isinstance(inst, BCValue) and not isinstance(inst, BCDone) and
inst.stackn == 1):
# BCDone is an odd one - it leaves something on the stack.
# That's ok, it's usually because we've compiled a proc body
# and the stack value is the return value - so we don't want to
# display a stack indicator, but we do want to leave stackn as 1
# for programmatic inspection.
# >> symbol
fmt_str = u'\u00bb %s' % (inst.fmt(),)
else:
fmt_str = inst.fmt()
fmt_list.append(fmt_str)
return fmt_list
def fmt(self):
return u'\n'.join(self.fmt_insts())
########################
# Functions start here #
########################
def getinsts(bc):
"""
Given bytecode in a bytearray, return a list of Inst objects.
"""
bc = bc.copy()
insts = []
while len(bc) > 0:
insts.append(Inst(bc))
return insts
def _bblock_create(insts):
"""
Given a list of Inst objects, split them up into basic blocks.
"""
# Identify the beginnings and ends of all basic blocks
starts = set()
ends = set()
newstart = True
for i, inst in enumerate(insts):
if newstart:
starts.add(inst.loc)
newstart = False
if inst.targetloc is not None:
ends.add(inst.loc)
starts.add(inst.targetloc)
newstart = True
# inst before target inst is end of a bblock
# search through instructions for instruction before the target
if inst.targetloc != 0:
instbeforeidx = 0
while True:
if insts[instbeforeidx+1].loc == inst.targetloc: break
instbeforeidx += 1
instbefore = insts[instbeforeidx]
ends.add(instbefore.loc)
elif inst.name in ['beginCatch4', 'endCatch']:
starts.add(inst.loc)
if inst.loc != 0:
ends.add(insts[i-1].loc)
ends.add(insts[-1].loc)
# Create the basic blocks
assert len(starts) == len(ends)
bblocks = []
bblocks_insts = insts[:]
for start, end in zip(sorted(list(starts)), sorted(list(ends))):
bbinsts = []
assert bblocks_insts[0].loc == start
while bblocks_insts[0].loc < end:
bbinsts.append(bblocks_insts.pop(0))
assert bblocks_insts[0].loc == end
bbinsts.append(bblocks_insts.pop(0))
bblocks.append(BBlock(bbinsts, bbinsts[0].loc))
return bblocks
def _inst_reductions():
"""
Define how each instruction is reduced to one of my higher level
representations.
"""
def N(n): return lambda _: n
firstop = lambda inst: inst.ops[0]
def lit(s): return BCLiteral(None, s)
def is_simple(arg):
return any([
isinstance(arg, bctype)
for bctype in [BCLiteral, BCVarRef, BCArrayRef]
])
def getargsgen(nargs_fn, checkargs_fn=None):
def getargsfn(inst, bblock, i):
nargs = nargs_fn(inst)
arglist = []
argis = []
for argi, arg in reversed(list(enumerate(bblock.insts[:i]))):
if len(arglist) == nargs:
break
if not isinstance(arg, BCValue):
break
if arg.stackn < 1:
continue
if checkargs_fn and not checkargs_fn(arg):
break
arglist.append(arg)
argis.append(argi)
arglist.reverse()
if len(arglist) != nargs: return None
return arglist
return getargsfn
# nargs, redfn, checkfn
inst_reductions = {
# Callers
'invokeStk1': [[firstop], BCProcCall],
'invokeStk4': [[firstop], BCProcCall],
'list':[[firstop], lambda inst, kv: BCProcCall(inst, [lit(u'list')] + kv)],
'listLength': [[N(1)], lambda inst, kv: BCProcCall(inst, [lit(u'llength'), kv[0]])],
'incrStkImm': [[N(1)], lambda inst, kv: BCProcCall(inst, [lit(u'incr'), kv[0]] + ([lit(unicode(inst.ops[0]))] if inst.ops[0] != 1 else []))],
'incrScalar1Imm': [[N(0)], lambda inst, kv: BCProcCall(inst, [lit(u'incr'), lit(inst.ops[0])] + ([lit(unicode(inst.ops[1]))] if inst.ops[1] != 1 else []))],
'incrScalarStkImm': [[N(1)], lambda inst, kv: BCProcCall(inst, [lit(u'incr'), kv[0]] + ([lit(unicode(inst.ops[0]))] if inst.ops[0] != 1 else []))],
'variable': [[N(1)], BCVariable],
# Jumps
'jump1': [[N(0)], lambda i, v: BCJump(None, i, v)],
'jumpFalse1': [[N(1)], lambda i, v: BCJump(False, i, v)],
'jumpTrue1': [[N(1)], lambda i, v: BCJump(True, i, v)],
# Variable references
'loadStk': [[N(1)], BCVarRef],
'loadScalarStk': [[N(1)], BCVarRef],
'loadArrayStk': [[N(2)], BCArrayRef],
'loadScalar1': [[N(0)], lambda inst, kv: BCVarRef(inst, [lit(inst.ops[0])])],
'loadArray1': [[N(1)], lambda inst, kv: BCArrayRef(inst, [lit(inst.ops[0]), kv[0]])],
# Variable sets
'storeStk': [[N(2)], BCSet],
'storeScalarStk': [[N(2)], BCSet],
'storeArrayStk': [[N(3)], lambda inst, kv: BCSet(inst, [BCArrayElt(None, kv[:2]), kv[2]])],
'storeScalar1': [[N(1)], lambda inst, kv: BCSet(inst, [lit(inst.ops[0]), kv[0]])],
'storeArray1': [[N(2)], lambda inst, kv: BCSet(inst, [BCArrayElt(None, [lit(inst.ops[0]), kv[0]]), kv[1]])],
# Expressions
'gt': [[N(2)], BCExpr],
'lt': [[N(2)], BCExpr],
'ge': [[N(2)], BCExpr],
'le': [[N(2)], BCExpr],
'eq': [[N(2)], BCExpr],
'neq': [[N(2)], BCExpr],
'add': [[N(2)], BCExpr],
'not': [[N(1)], BCExpr],
# Misc
'concat1': [[firstop], BCConcat],
'pop': [[N(1), lambda arg: isinstance(arg, BCProcCall)], lambda i, v: v[0].destack()],
'dup': [[N(1), is_simple], lambda i, v: [v[0], v[0]]],
'done': [[N(1)], BCDone],
'returnImm': [[N(2)], BCReturn],
# Useless
'tryCvtToNumeric': [[N(0)], lambda _1, _2: []], # Theoretically does something...
'nop': [[N(0)], lambda _1, _2: []],
'startCommand': [[N(0)], lambda _1, _2: []],
}
for inst, (getargsgen_args, redfn) in inst_reductions.items():
inst_reductions[inst] = {
'getargsfn': getargsgen(*getargsgen_args),
'redfn': redfn,
}
return inst_reductions
INST_REDUCTIONS = _inst_reductions()
def _bblock_hack(bc, bblock):
"""
The Tcl compiler has some annoying implementation details which must be
recognised before any reduction.
"""
# 'variable' does not push a result so the Tcl compiler inserts a push.
variableis = []
changes = []
for i, inst in enumerate(bblock.insts):
if not isinstance(inst, Inst): continue
if not inst.name == 'variable': continue
assert bblock.insts[i+1].name in ['push1', 'push4']
assert bc.literal(bblock.insts[i+1].ops[0]) == ''
variableis.append(i)
for i in reversed(variableis):
bblock = bblock.replaceinst(i+1, [])
changes.append((TAG_H_VARIABLE, (i+1, i+2), (i+1, i+1)))
return bblock, changes
def _bblock_reduce(bc, bblock):
"""
For the given basic block, attempt to reduce all instructions to my higher
level representations.
"""
changes = []
for i, inst in enumerate(bblock.insts):
if not isinstance(inst, Inst): continue
if inst.name in ['push1', 'push4']:
bblock = bblock.replaceinst(i, [BCLiteral(inst, bc.literal(inst.ops[0]))])
changes.append((TAG_I_PUSH, (i, i+1), (i, i+1)))
elif inst.name in INST_REDUCTIONS:
IRED = INST_REDUCTIONS[inst.name]
getargsfn = IRED['getargsfn']
redfn = IRED['redfn']
arglist = getargsfn(inst, bblock, i)
if arglist is None: continue
newinsts = redfn(inst, arglist)
if type(newinsts) is not list:
newinsts = [newinsts]
irange = (i-len(arglist), i+1)
bblock = bblock.replaceinst(irange, newinsts)
changes.append((TAG_I_OTHER, irange, (irange[0], irange[0]+len(newinsts))))
else:
continue # No change, continue scanning basic blcok
break
return bblock, changes
def _get_targets(bblocks):
targets = [target for target in [
(lambda jump: jump and jump.targetloc)(_get_jump(src_bblock))
for src_bblock in bblocks
] if target is not None]
inst_targets = [bblock.insts for bblock in bblocks]
inst_targets = [i for i in itertools.chain(*inst_targets)]
inst_targets = [i for i in inst_targets if isinstance(i, Inst)]
inst_targets = [i.targetloc for i in inst_targets if i.targetloc is not None]
return targets + inst_targets
def _get_jump(bblock):
if len(bblock.insts) == 0: return None
jump = bblock.insts[-1]
if not isinstance(jump, BCJump): return None
return jump
def _is_catch_begin(bblock):
if len(bblock.insts) == 0: return False
catch = bblock.insts[0]
if not isinstance(catch, Inst): return False
return catch.name == 'beginCatch4'
def _is_catch_end(bblock):
if len(bblock.insts) == 0: return False
catch = bblock.insts[0]
if not isinstance(catch, Inst): return False
return catch.name == 'endCatch'
def _bblock_flow(bblocks):
# Recognise a basic if.
# Observe that we don't try and recognise a basic if with no else branch -
# it turns out that tcl implicitly inserts the else to provide all
# execution branches with a value. TODO: this is an implementation detail
# and should be handled more generically.
# The overall structure consists of 4 basic blocks, arranged like so:
# [if] -> [ifcode] [elsecode] -> [unrelated code after if]
# |---------|----------^ ^ <- conditional jump to else
# |---------------------| <- unconditional jump to end
# We only care about the end block for checking that everything does end up
# there. The other three blocks end up 'consumed' by a BCIf object.
for i in range(len(bblocks)):
if len(bblocks[i:i+4]) < 4:
continue
jump0 = _get_jump(bblocks[i+0])
jump1 = _get_jump(bblocks[i+1])
jump2 = _get_jump(bblocks[i+2])
if jump0 is None or jump0.on is None: continue
if jump1 is None or jump1.on is not None: continue
if jump2 is not None: continue
if jump0.targetloc != bblocks[i+2].loc: continue
if jump1.targetloc != bblocks[i+3].loc: continue
if any([
isinstance(inst, Inst) for inst in
bblocks[i+1].insts + bblocks[i+2].insts
]):
continue
targets = _get_targets(bblocks)
if targets.count(bblocks[i+1].loc) > 0: continue
if targets.count(bblocks[i+2].loc) > 1: continue
# Looks like an 'if', apply the bblock transformation
changestart = ((i, 0), (i+2, len(bblocks[i+2].insts)))
jumps = [bblocks[i+0].insts[-1], bblocks[i+1].insts[-1]]
bblocks[i+0] = bblocks[i+0].popinst()
bblocks[i+1] = bblocks[i+1].popinst()
assert jumps == [jump0, jump1]
bblocks[i] = bblocks[i].appendinsts([BCIf(jumps, bblocks[i+1:i+3])])
bblocks[i+1:i+3] = []
changeend = ((i, 0), (i, len(bblocks[i].insts)))
return [(TAG_IF, changestart, changeend)]
# Recognise a catch
# The overall structure consists of 3 basic blocks, arranged like so:
# [beginCatch+code] [oncatch] [endCatch+unrelated code after catch]
# |----------------------------^ <- unconditional jump to endCatch
# The oncatch block is a series of instructions for handling when the code
# throws an exception - note there is no direct execution path to them. We
# make a number of assertions about them in case the bytecode compiler ever
# does something unexpected with them. All blocks are 'consumed' and replaced
# with a single BCCatch.
# TODO: because we steal instructions from the endCatch block, the bblock 'loc'
# is no longer correct!
for i in range(len(bblocks)):
if len(bblocks[i:i+3]) < 3:
continue
begin = bblocks[i+0]
middle = bblocks[i+1]
end = bblocks[i+2]
if not _is_catch_begin(begin): continue
if not _is_catch_end(end): continue
assert not (_is_catch_end(begin) or _is_catch_begin(end))
assert not (_is_catch_end(middle) or _is_catch_begin(middle))
if any([isinstance(inst, Inst) for inst in begin.insts[1:]]):
continue
# Looks like a 'catch', apply the bblock transformation
changestart = ((i, 0), (i+2, 4))
endcatchinst = end.insts[0]
end = end.replaceinst(0, [])
endcatch = BBlock([endcatchinst], endcatchinst.loc)
if (len(end.insts) > 2 and
isinstance(end.insts[0], Inst) and
isinstance(end.insts[1], Inst) and
isinstance(end.insts[2], Inst) and
end.insts[0].name == 'reverse' and
end.insts[1].name == 'storeScalar1' and
end.insts[2].name == 'pop'
):
endcatch = endcatch.appendinsts(list(end.insts[0:3]))
end = end.replaceinst((0, 3), [])
else:
assert False
bccatch = BCCatch(None, [begin, middle, endcatch])
bblocks[i] = begin.replaceinst((0, len(begin.insts)), [bccatch])
bblocks[i+2] = end
bblocks[i+1:i+2] = []
changeend = ((i, 0), (i, len(bblocks[i].insts)))
return [(TAG_CATCH, changestart, changeend)]
# Recognise a foreach.
# The overall structure consists of 4 basic blocks, arranged like so:
# [unrelated code+fe start] -> [fe step] [fe code] -> [unrelated code to fe]
# ^ |--------|-----------^ <- conditional jump to end
# |-----------| <- unconditional jump to fe step
# We only care about the end block for checking that everything does end up
# there. The other three blocks end up 'consumed' by a BCForEach object.
# If possible, we try and consume the BCLiteral sitting in the first instruction of
# end, though it may already have been consumed by a return call.
for i in range(len(bblocks)):
if len(bblocks[i:i+4]) < 4:
continue
jump0 = _get_jump(bblocks[i+0])
jump1 = bblocks[i+1].insts[-1]
jump2 = _get_jump(bblocks[i+2])
if jump0 is not None: continue
# Unreduced because jumps don't know how to consume foreach_step
if not isinstance(jump1, Inst) or jump1.name != 'jumpFalse1': continue
if jump2 is None or jump2.on is not None: continue
if jump1.targetloc is not bblocks[i+3].loc: continue
if jump2.targetloc is not bblocks[i+1].loc: continue
if any([isinstance(inst, Inst) for inst in bblocks[i+2].insts]): continue
if not isinstance(bblocks[i+3].insts[0], BCLiteral): continue
targets = _get_targets(bblocks)
if targets.count(bblocks[i+1].loc) > 1: continue
if targets.count(bblocks[i+2].loc) > 0: continue
if targets.count(bblocks[i+3].loc) > 1: continue
# Looks like a 'foreach', apply the bblock transformation
changestart = ((i, len(bblocks[i].insts)-1), (i+3, 1))
foreach_start = bblocks[i].insts[-1]
bblocks[i] = bblocks[i].popinst()
numvarlists = len(foreach_start.ops[0][1])
varlists = []
for i in range(numvarlists):
varlists.append(bblocks[i].insts[-1])
bblocks[i] = bblocks[i].popinst()
# TODO: Location isn't actually correct...do we care?
begin = BBlock(varlists + [foreach_start], foreach_start.loc)
end = bblocks[i+3].insts[0]
bblocks[i+3] = bblocks[i+3].replaceinst(0, [])
foreach = BCForeach(None, [begin] + bblocks[i+1:i+3] + [end])
bblocks[i] = bblocks[i].appendinsts([foreach])
bblocks[i+1:i+3] = []
changeend = ((i, len(bblocks[i].insts)-1), (i, len(bblocks[i].insts)))
return [(TAG_FOREACH, changestart, changeend)]
return []
def _bblock_join(bblocks):
# Remove empty unused blocks
# TODO: unknown if this is needed
for i, bblock in enumerate(bblocks):
if len(bblock.insts) > 0: continue
targets = _get_targets(bblocks)
if bblock.loc in targets: continue
bblocks[i:i+1] = []
previ = 0 if i == 0 else i-1
previlen = len(bblocks[previ].insts)
return [(TAG_BLOCK_RM, ((i, 0), (i, 0)), ((previ, previlen), (previ, previlen)))]
# Join together blocks if possible
for i in range(len(bblocks)):
if len(bblocks[i:i+2]) < 2:
continue
bblock1, bblock2 = bblocks[i:i+2]
targets = _get_targets(bblocks)
# If the end of bblock1 or the beginning of bblock2 should remain as
# bblock boundaries, do not join them.
if _get_jump(bblock1) is not None:
continue
# Unreduced jumps
if any([isinstance(inst, Inst) and inst.targetloc is not None
for inst in bblock1.insts[-1:]]):
continue
if bblock2.loc in targets:
continue
if _is_catch_begin(bblock2):
continue
if _is_catch_end(bblock2):
continue
changestart = ((i, 0), (i+1, len(bblocks[i+1].insts)))
bblocks[i] = bblock1.appendinsts(list(bblock2.insts))
bblocks[i+1:i+2] = []
changeend = ((i, 0), (i, len(bblocks[i].insts)))
return [(TAG_BLOCK_JOIN, changestart, changeend)]
return False
def _bblocks_operation(bblock_op, bc, bblocks):
"""
Take a per-bblock operation and wrap it to add the correct location information
indicating the ith bblock. Returns a list of bblocks and a flat list of changes.
"""
operbblocks = []
operchanges = []
operlist = [bblock_op(bc, bblock) for bblock in bblocks]
for bbi, (operbblock, bblockchanges) in enumerate(operlist):
operbblocks.append(operbblock)
operchanges.extend([
(tag, ((bbi, lfrom1), (bbi, lfrom2)), ((bbi, lto1), (bbi, lto2)))
for tag, (lfrom1, lfrom2), (lto1, lto2) in bblockchanges
])
return operbblocks, operchanges
def _decompile(bc):
"""
Given some bytecode and literals, attempt to decompile to tcl.
"""
assert isinstance(bc, BC)
insts = getinsts(bc)
bblocks = _bblock_create(insts)
yield bblocks[:], []
# Reduce bblock logic
hackedbblocks, changes = _bblocks_operation(_bblock_hack, bc, bblocks)
if changes:
bblocks = hackedbblocks
yield bblocks[:], changes
changes = True
while changes:
changes = []
if not changes:
bblocks, changes = _bblocks_operation(_bblock_reduce, bc, bblocks)
if not changes:
changes = _bblock_join(bblocks)
if not changes:
changes = _bblock_flow(bblocks)
if changes: yield bblocks[:], changes
def _bblocks_fmt(bblocks):
outstr = ''
for bblock in bblocks:
#outstr += '===========%s\n' % (bblock)
outstr += bblock.fmt()
outstr += '\n'
return outstr
def decompile(bc):
bblocks = None
for bblocks, _ in _decompile(bc):
pass
return _bblocks_fmt(bblocks)
def decompile_steps(bc):
"""
Given some bytecode, returns a tuple of `(steps, changes)` for decompilation.
`steps` is a list of 'snapshot's of each stage of the decompilation.
Each 'snapshot' is a list of 'basic block's in the program in that snapshot.
Each 'basic block' is a list of 'line's in that bblock.
Each 'line' is a string. The definition of a line is flexible - it might be
a call to `foreach` (with all nested instructions), it might be a single
bytecode instruction.
In summary, `steps` is a list of lists of lists of strings.
`changes` is a list of 'change descriptor's.
Each change descriptor looks like
{
'step': si,
'from': ((bbfrom1, lfrom1), (bbfrom2, lfrom2)),
'to': ((bbto1, lto1), (bbto2, lto2)),
'tag': tag,
}
- si is the index of the step this change applies to
- bbfrom1 is the index of the start block of the source changed lines
- lfrom1 is the slice index of the start of the source changed lines
- bbfrom2 is the index of the end block of the source changed lines
- lfrom2 is the slice index of the end of the source changed lines
- bbto1 is the index of the start block of the target changed lines
- lto1 is the slice index of the start of the target changed lines
- bbto2 is the index of the start block of the source changed lines
- lto2 is the slice index of the end of the target change lines
- tag is some string identifier of the type of change made
Note that these are *slice* indexes, i.e. like python. So if lto1 and lto2
are the same, it means the source lines have been reduced to a line of
width 0 (i.e. have been removed entirely).
"""
steps = []
changes = []
for si, (sbblocks, schanges) in enumerate(_decompile(bc)):
step = []
for sbblock in sbblocks:
step.append(sbblock.fmt_insts())
for schange in schanges:
tag, lfrom, lto = schange
changes.append({
'step': si-1,
'from': lfrom,
'to': lto,
'tag': tag,
})
steps.append(step)
return steps, changes
| bsd-3-clause | -78,312,617,112,609,150 | 38.23827 | 164 | 0.550314 | false |
rchristie/mapclientplugins.simplevizstep | mapclientplugins/simplevizstep/model/simplevizmodel.py | 1 | 10659 | '''
Created on Aug 12, 2015
@author: Richard Christie
'''
import os, sys
from opencmiss.zinc.context import Context
from opencmiss.zinc.scenecoordinatesystem import SCENECOORDINATESYSTEM_NORMALISED_WINDOW_FIT_LEFT
from opencmiss.zinc.status import OK as ZINC_OK
from mapclientplugins.simplevizstep.utils import zinc as zincutils
class SimpleVizModel(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self._location = None
self.initialise()
def initialise(self):
'''
Ensure scene or other objects from old context are not in use before calling
'''
self._context = Context("SimpleViz")
# set up standard materials and glyphs so we can use them elsewhere
materialmodule = self._context.getMaterialmodule()
materialmodule.defineStandardMaterials()
glyphmodule = self._context.getGlyphmodule()
glyphmodule.defineStandardGlyphs()
self._rootRegion = self._context.createRegion()
def getContext(self):
return self._context
def setLocation(self, location):
self._location = location
def getRootRegion(self):
return self._rootRegion
def loadScript(self, inputScriptFileName):
'''
Load model via python script file implementing loadModel(region).
'''
# set current directory to path from file, to support scripts and fieldml with external resources
path = os.path.dirname(inputScriptFileName)
os.chdir(path)
sys.path.append(path)
_, filename = os.path.split(inputScriptFileName)
mod_name, _ = os.path.splitext(filename)
import importlib.util
spec = importlib.util.spec_from_file_location(mod_name, inputScriptFileName)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
success = foo.loadModel(self._rootRegion)
if not success:
raise ValueError('Could not load ' + inputScriptFileName)
# ----- Graphics Settings -----
# ----- View Settings -----
# ----- Time Settings -----
def autorangeTime(self):
'''
Set time min/max to time range of finite element field parameters.
'''
rootRegion = self.getRootRegion()
minimum, maximum = zincutils.getRegionTreeTimeRange(rootRegion)
if minimum is None:
minimum = 0.0
maximum = 0.0
timekeepermodule = self._context.getTimekeepermodule()
timekeeper = timekeepermodule.getDefaultTimekeeper()
timekeeper.setMinimumTime(minimum)
timekeeper.setMaximumTime(maximum)
currentTime = timekeeper.getTime()
if currentTime < minimum:
timekeeper.setTime(minimum)
elif currentTime > maximum:
timekeeper.setTime(maximum)
def getMinimumTime(self):
timekeeper = self._context.getTimekeepermodule().getDefaultTimekeeper()
return timekeeper.getMinimumTime()
def setMinimumTime(self, minimumTime):
timekeeper = self._context.getTimekeepermodule().getDefaultTimekeeper()
timekeeper.setMinimumTime(minimumTime)
def getMaximumTime(self):
timekeeper = self._context.getTimekeepermodule().getDefaultTimekeeper()
return timekeeper.getMaximumTime()
def setMaximumTime(self, maximumTime):
timekeeper = self._context.getTimekeepermodule().getDefaultTimekeeper()
timekeeper.setMaximumTime(maximumTime)
def getTime(self):
timekeeper = self._context.getTimekeepermodule().getDefaultTimekeeper()
return timekeeper.getTime()
def setTime(self, time):
timekeeper = self._context.getTimekeepermodule().getDefaultTimekeeper()
timekeeper.setTime(time)
# ----- Rendering Settings -----
def checkTessellationDivisions(self, minimumDivisions, refinementFactors):
'''
Check total divisions not too high or get user confirmation
Call with both of the vectors set, each must have at least one component.
Returns True if within limits, False if need user confirmation.
'''
limit = 100000 # max elements*totalsize for each dimension
min = 1
ref = 1
totalDivisions = [1,1,1]
totalSize3d = 1
for i in range(3):
if i < len(minimumDivisions):
min = minimumDivisions[i]
if i < len(refinementFactors):
ref = refinementFactors[i]
totalDivisions[i] = min*ref
totalSize3d = totalSize3d*min*ref
totalSize2d = totalDivisions[0]*totalDivisions[1]
if totalDivisions[1]*totalDivisions[2] > totalSize2d:
totalSize2d = totalDivisions[1]*totalDivisions[2]
if totalDivisions[2]*totalDivisions[0] > totalSize2d:
totalSize2d = totalDivisions[2]*totalDivisions[0]
totalSize1d = totalDivisions[0]
if totalDivisions[1] > totalSize1d:
totalSize1d = totalDivisions[1]
if totalDivisions[2] > totalSize1d:
totalSize1d = totalDivisions[2]
meshSize3d = zincutils.getRegionTreeMeshSize(self._rootRegion, 3)
limit3d = limit
if limit3d < meshSize3d:
limit3d = meshSize3d
overLimit3d = totalSize3d*meshSize3d > limit3d
meshSize2d = zincutils.getRegionTreeMeshSize(self._rootRegion, 2)
limit2d = limit
if limit2d < meshSize2d:
limit2d = meshSize2d
overLimit2d = totalSize2d*meshSize2d > limit2d
meshSize1d = zincutils.getRegionTreeMeshSize(self._rootRegion, 1)
limit1d = limit
if limit1d < meshSize1d:
limit1d = meshSize1d
overLimit1d = totalSize1d*meshSize1d > limit1d
if not (overLimit1d or overLimit2d or overLimit3d):
return True
return False
def getTessellationMinimumDivisions(self):
tessellation = self._context.getTessellationmodule().getDefaultTessellation()
result, minimumDivisions = tessellation.getMinimumDivisions(3)
return minimumDivisions
def setTessellationMinimumDivisions(self, minimumDivisions):
tessellation = self._context.getTessellationmodule().getDefaultTessellation()
return ZINC_OK == tessellation.setMinimumDivisions(minimumDivisions)
def getTessellationRefinementFactors(self):
tessellation = self._context.getTessellationmodule().getDefaultTessellation()
result, refinementFactors = tessellation.getRefinementFactors(3)
return refinementFactors
def setTessellationRefinementFactors(self, refinementFactors):
tessellation = self._context.getTessellationmodule().getDefaultTessellation()
return ZINC_OK == tessellation.setRefinementFactors(refinementFactors)
def getTessellationCircleDivisions(self):
tessellation = self._context.getTessellationmodule().getDefaultTessellation()
return tessellation.getCircleDivisions()
def setTessellationCircleDivisions(self, circleDivisions):
tessellationmodule = self._context.getTessellationmodule()
# set circle divisions for all tessellations in tessellationmodule
result = ZINC_OK
tessellationmodule.beginChange()
iter = tessellationmodule.createTessellationiterator()
tessellation = iter.next()
while tessellation.isValid():
result = tessellation.setCircleDivisions(circleDivisions)
if ZINC_OK != result:
break # can't raise here otherwise no call to endChange()
tessellation = iter.next()
tessellationmodule.endChange()
return result
# ----- Data Colouring Settings -----
def spectrumAutorange(self, scenefilter):
'''
Set spectrum min/max to fit range of data in scene graphics filtered by scenefilter.
'''
spectrummodule = self._context.getSpectrummodule()
spectrum = spectrummodule.getDefaultSpectrum()
scene = self._rootRegion.getScene()
result, minimum, maximum = scene.getSpectrumDataRange(scenefilter, spectrum, 1)
if result >= 1: # result is number of components with range, can exceed 1
spectrummodule.beginChange()
spectrumcomponent = spectrum.getFirstSpectrumcomponent()
spectrumcomponent.setRangeMinimum(minimum)
spectrumcomponent.setRangeMaximum(maximum)
spectrummodule.endChange()
def getSpectrumMinimum(self):
spectrum = self._context.getSpectrummodule().getDefaultSpectrum()
spectrumcomponent = spectrum.getFirstSpectrumcomponent()
return spectrumcomponent.getRangeMinimum()
def setSpectrumMinimum(self, minimum):
spectrum = self._context.getSpectrummodule().getDefaultSpectrum()
spectrumcomponent = spectrum.getFirstSpectrumcomponent()
return spectrumcomponent.setRangeMinimum(minimum)
def getSpectrumMaximum(self):
spectrum = self._context.getSpectrummodule().getDefaultSpectrum()
spectrumcomponent = spectrum.getFirstSpectrumcomponent()
return spectrumcomponent.getRangeMaximum()
def setSpectrumMaximum(self, maximum):
spectrum = self._context.getSpectrummodule().getDefaultSpectrum()
spectrumcomponent = spectrum.getFirstSpectrumcomponent()
return spectrumcomponent.setRangeMaximum(maximum)
def addSpectrumColourBar(self):
'''
Add an overlay graphics showing the default spectrum colour bar.
'''
colourbarName = 'colourbar'
scene = self._rootRegion.getScene()
scene.beginChange()
spectrummodule = scene.getSpectrummodule()
spectrum = spectrummodule.getDefaultSpectrum()
glyphmodule = scene.getGlyphmodule()
glyphmodule.beginChange()
colourbar = glyphmodule.findGlyphByName(colourbarName)
if not colourbar.isValid():
colourbar = glyphmodule.createGlyphColourBar(spectrum)
colourbar.setName(colourbarName)
glyphmodule.endChange()
graphics = scene.findGraphicsByName(colourbarName)
if graphics.isValid():
scene.removeGraphics(graphics)
graphics = scene.createGraphicsPoints()
graphics.setName(colourbarName)
graphics.setScenecoordinatesystem(SCENECOORDINATESYSTEM_NORMALISED_WINDOW_FIT_LEFT)
pointattributes = graphics.getGraphicspointattributes()
pointattributes.setGlyph(colourbar)
pointattributes.setBaseSize([1.0,1.0,1.0])
pointattributes.setGlyphOffset([-0.9,0.0,0.0])
scene.endChange()
# ----- Output Settings -----
| apache-2.0 | 4,297,409,633,384,326,700 | 38.332103 | 105 | 0.677831 | false |
RPGOne/Skynet | pytorch-master/torch/legacy/nn/SpatialSubtractiveNormalization.py | 1 | 4655 | import math
import torch
from .Module import Module
from .Sequential import Sequential
from .SpatialZeroPadding import SpatialZeroPadding
from .SpatialConvolution import SpatialConvolution
from .SpatialConvolutionMap import SpatialConvolutionMap
from .Replicate import Replicate
from .CSubTable import CSubTable
from .CDivTable import CDivTable
from .utils import clear
class SpatialSubtractiveNormalization(Module):
def __init__(self, nInputPlane=1, kernel=None):
super(SpatialSubtractiveNormalization, self).__init__()
# get args
self.nInputPlane = nInputPlane
if kernel is None:
kernel = torch.Tensor(9, 9).fill_(1)
self.kernel = kernel
kdim = self.kernel.ndimension()
# check args
if kdim != 2 and kdim != 1:
raise ValueError('SpatialSubtractiveNormalization averaging kernel must be 2D or 1D')
if (self.kernel.size(0) % 2) == 0 or (kdim == 2 and (self.kernel.size(1) % 2) == 0):
raise ValueError('SpatialSubtractiveNormalization averaging kernel must have ODD dimensions')
# normalize kernel
self.kernel.div_(self.kernel.sum() * self.nInputPlane)
# padding values
padH = int(math.floor(self.kernel.size(0) / 2))
padW = padH
if kdim == 2:
padW = int(math.floor(self.kernel.size(1) / 2))
# create convolutional mean extractor
self.meanestimator = Sequential()
self.meanestimator.add(SpatialZeroPadding(padW, padW, padH, padH))
if kdim == 2:
self.meanestimator.add(SpatialConvolution(self.nInputPlane, 1, self.kernel.size(1), self.kernel.size(0)))
else:
# TODO: map
self.meanestimator.add(SpatialConvolutionMap(
SpatialConvolutionMap.maps.oneToOne(self.nInputPlane), self.kernel.size(0), 1))
self.meanestimator.add(SpatialConvolution(self.nInputPlane, 1, 1, self.kernel.size(0)))
self.meanestimator.add(Replicate(self.nInputPlane, 0))
# set kernel and bias
if kdim == 2:
for i in range(self.nInputPlane):
self.meanestimator.modules[1].weight[0][i] = self.kernel
self.meanestimator.modules[1].bias.zero_()
else:
for i in range(self.nInputPlane):
self.meanestimator.modules[1].weight[i].copy_(self.kernel)
self.meanestimator.modules[2].weight[0][i].copy_(self.kernel)
self.meanestimator.modules[1].bias.zero_()
self.meanestimator.modules[2].bias.zero_()
# other operation
self.subtractor = CSubTable()
self.divider = CDivTable()
# coefficient array, to adjust side effects
self.coef = torch.Tensor(1, 1, 1)
self.ones = None
self._coef = None
def updateOutput(self, input):
# compute side coefficients
dim = input.dim()
if (input.dim() + 1 != self.coef.dim() or
(input.size(dim - 1) != self.coef.size(dim - 1)) or
(input.size(dim - 2) != self.coef.size(dim - 2))):
if self.ones is None:
self.ones = input.new()
if self._coef is None:
self._coef = self.coef.new()
self.ones.resize_as_(input[0:1]).fill_(1)
coef = self.meanestimator.updateOutput(self.ones).squeeze(0)
self._coef.resize_as_(coef).copy_(coef) # make contiguous for view
size = list(coef.size())
size = [input.size(0)] + size
self.coef = self._coef.view(1, *self._coef.size()).expand(*size)
# compute mean
self.localsums = self.meanestimator.updateOutput(input)
self.adjustedsums = self.divider.updateOutput([self.localsums, self.coef])
self.output = self.subtractor.updateOutput([input, self.adjustedsums])
return self.output
def updateGradInput(self, input, gradOutput):
# resize grad
self.gradInput.resize_as_(input).zero_()
# backprop through all modules
gradsub = self.subtractor.updateGradInput([input, self.adjustedsums], gradOutput)
graddiv = self.divider.updateGradInput([self.localsums, self.coef], gradsub[1])
size = self.meanestimator.updateGradInput(input, graddiv[0]).size()
self.gradInput.add_(self.meanestimator.updateGradInput(input, graddiv[0]))
self.gradInput.add_(gradsub[0])
return self.gradInput
def clearState(self):
clear(self, 'ones', '_coef')
self.meanestimator.clearState()
return super(SpatialSubtractiveNormalization, self).clearState()
| bsd-3-clause | -6,849,874,772,937,031,000 | 38.117647 | 117 | 0.627068 | false |
schenc3/InteractiveROSETTA | InteractiveROSETTA/scripts/protocols_truncated.py | 1 | 26633 | import wx
import os
import os.path
import platform
import subprocess
import multiprocessing
import psutil
from daemon import daemonLoop
from tools import resizeTextControlForUNIX
from tools import logInfo
from tools import icon
from selection import SelectPanel
from superimposition import SuperimpositionPanel
from minimization import MinimizationPanel
from ensemblebrowser import EnsembleBrowserPanel
from fixbb import FixbbPanel
try:
# This uses openbabel to convert PDBs to MOL2 files, which makes it way easier
# to add new parameters files since we can just read the PDB that the user is trying
# to load directly
from advresiduecreator import ResidueCreatorPanel
except:
# Maybe the user couldn't compile openbabel, so we'll default to the original version
# that requires the user to generate their own MOL2 files
print "A Python OpenBabel installation was not detected on your system."
print "Although OpenBabel is not required, it can greatly simplify parameterizing new residues"
print "On Windows, you need to install the main OpenBabel as well: http://openbabel.org/wiki/Category:Installation"
print "On Debian, Python OpenBabel can be installed using apt-get: sudo apt-get install python-openbabel"
print "On Mac and RedHat, you need to compile from source: http://open-babel.readthedocs.org/en/latest/Installation/install.html#compiling-open-babel"
from residuecreator import ResidueCreatorPanel
from pointmutations import PointMutationsPanel
from kic import KICPanel
from docking import DockingPanel
from msd import MSDPanel
from pmutscan import PointMutantScanPanel
from surfaces import SurfacesPanel
from ensemblegen import EnsembleGenPanel
class ProtocolsPanel(wx.Panel):
def __init__(self, parent, W, H):
wx.Panel.__init__(self, parent, id=-1, pos=(0, 0), size=(350, H-265), name="ProtocolsPanel")
self.W = W
self.H = H
self.parent = parent
self.SetBackgroundColour("#333333")
self.Show()
if (platform.system() == "Windows"):
self.label = wx.StaticText(self, -1, "Protocols", (5, 5), (340, 25), wx.ALIGN_CENTRE)
self.label.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
elif (platform.system() == "Darwin"):
self.label = wx.StaticBitmap(self, -1, wx.Image(self.parent.scriptdir + "/images/osx/lblProtocols.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(5, 5), size=(340, 25))
else:
self.label = wx.StaticText(self, -1, "Protocols", style=wx.ALIGN_CENTRE)
self.label.SetFont(wx.Font(12, wx.DEFAULT, wx.ITALIC, wx.BOLD))
resizeTextControlForUNIX(self.label, 0, self.GetSize()[0])
self.label.SetForegroundColour("#FFFFFF")
self.protocols = ["Docking",
"Energy Minimization",
"Ensemble Browser",
"Ensemble Generation",
"Loop Modeling (KIC)",
"Molecular Surfaces",
"Point Mutant Scan",
"Point Mutations",
"Protein Design (Fixbb)",
"Protein Design (MSD)",
"Residue/Ligand Creator",
"Superimposition"]
if (platform.system() == "Darwin"):
self.protMenu = wx.ComboBox(self, pos=(5, 30), size=(230, 25), choices=self.protocols, style=wx.CB_READONLY)
else:
self.protMenu = wx.ComboBox(self, pos=(5, 30), size=(230, 25), choices=self.protocols, style=wx.CB_READONLY | wx.CB_SORT)
self.protMenu.SetSelection(self.protocols.index("Superimposition"))
self.protMenu.SetToolTipString("List of currently available protocols")
if (platform.system() == "Darwin"):
self.GoBtn = wx.BitmapButton(self, id=-1, bitmap=wx.Image(self.parent.scriptdir + "/images/osx/GoBtn.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap(), pos=(240, 30), size=(100, 25))
else:
self.GoBtn = wx.Button(self, id=-1, label="Go!", pos=(240, 30), size=(100, 25))
self.GoBtn.SetForegroundColour("#000000")
self.GoBtn.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD))
self.GoBtn.Bind(wx.EVT_BUTTON, self.changeProtocol)
self.GoBtn.SetToolTipString("Change to the selected protocol")
self.currentProtocol = "Superimposition"
self.protPanel = SuperimpositionPanel(self, W, H)
def changeProtocol(self, event):
logInfo("Go button clicked")
selectedProtocol = self.protMenu.GetStringSelection()
if (selectedProtocol != self.currentProtocol and selectedProtocol != ""):
# Destroy the old panel so we can free memory up and create the new protocol panel
if (self.currentProtocol == "Superimposition"):
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Energy Minimization"):
# Check to see if the user has accepted the minimization
# If not, ask if they really want to proceed
if (self.protPanel.buttonState == "Finalize!"):
dlg = wx.MessageDialog(self, "You have not accepted your minimization and the results will be lost if you proceed. Proceed anyway?", "Minimization Not Accepted", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
logInfo("Go cancelled due to unaccepted minimization")
dlg.Destroy()
return
dlg.Destroy()
logInfo("Minimization job rejected")
# Try to delete the minimized model if the user doesn't want to accept it
try:
self.cmd.remove("minimized_view")
self.cmd.delete("minimized_view")
except:
pass
self.cmd.label("all", "")
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Ensemble Browser"):
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Ensemble Generation"):
# Check to see if the user has accepted the design
# If not, ask if they really want to proceed
if (self.protPanel.buttonState == "Cancel!"):
dlg = wx.MessageDialog(self, "Your ensemble is not finished and the results will be lost if you proceed. Proceed anyway?", "Design Not Accepted", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
logInfo("Go cancelled due to unfinished ensemblegen job")
dlg.Destroy()
return
logInfo("Ensemblegen job rejected")
dlg.Destroy()
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Protein Design (Fixbb)"):
# Check to see if the user has accepted the design
# If not, ask if they really want to proceed
if (self.protPanel.buttonState == "Finalize!"):
dlg = wx.MessageDialog(self, "You have not accepted your design and the results will be lost if you proceed. Proceed anyway?", "Design Not Accepted", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
logInfo("Go cancelled due to unaccepted fixbb job")
dlg.Destroy()
return
logInfo("Fixbb job rejected")
dlg.Destroy()
# Try to delete the design model if the user doesn't want to accept it
try:
self.cmd.remove("designed_view")
self.cmd.delete("designed_view")
except:
pass
self.cmd.label("all", "")
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Residue/Ligand Creator"):
try:
self.cmd.remove("params")
self.cmd.delete("params")
except:
pass
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Point Mutations"):
try:
self.cmd.remove("rotamer_view")
self.cmd.delete("rotamer_view")
except:
pass
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Point Mutant Scan"):
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Loop Modeling (KIC)"):
# Check to see if the user has accepted the loop model
# If not, ask if they really want to proceed
if (self.protPanel.buttonState == "Finalize!"):
dlg = wx.MessageDialog(self, "You have not accepted your loop model and the results will be lost if you proceed. Proceed anyway?", "KIC Not Accepted", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
logInfo("Go cancelled due to unaccepted KIC job")
dlg.Destroy()
return
logInfo("KIC job rejected")
dlg.Destroy()
# Try to delete the design model if the user doesn't want to accept it
try:
self.cmd.remove("kic_view")
self.cmd.delete("kic_view")
except:
pass
self.cmd.label("all", "")
self.protPanel.Destroy()
elif (self.currentProtocol == "Docking"):
# Check to see if the user has accepted the docking model
# If not, ask if they really want to proceed
if (self.protPanel.buttonState != "Dock!"):
dlg = wx.MessageDialog(self, "You have not accepted your docking model and the results will be lost if you proceed. Proceed anyway?", "Docking Not Accepted", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
logInfo("Go cancelled due to unaccepted docking job")
dlg.Destroy()
return
logInfo("Docking job rejected")
dlg.Destroy()
# Try to delete the design model if the user doesn't want to accept it
try:
self.cmd.remove("dock_view")
self.cmd.delete("dock_view")
except:
pass
self.cmd.label("all", "")
self.protPanel.Destroy()
elif (self.currentProtocol == "Structure Prediction (Comparative Modeling)"):
# Check to see if the user has accepted the docking model
# If not, ask if they really want to proceed
if (self.protPanel.buttonState == "Finalize!"):
dlg = wx.MessageDialog(self, "You have not accepted your comparative modeling structure and the results will be lost if you proceed. Proceed anyway?", "Docking Not Accepted", wx.YES_NO | wx.ICON_EXCLAMATION | wx.CENTRE)
if (dlg.ShowModal() == wx.ID_NO):
logInfo("Go cancelled due to unaccepted comparative modeling job")
dlg.Destroy()
return
logInfo("Comparative modeling job rejected")
dlg.Destroy()
# Try to delete the design model if the user doesn't want to accept it
try:
self.cmd.remove("thread_view")
self.cmd.delete("thread_view")
except:
pass
self.cmd.label("all", "")
self.protPanel.Destroy()
elif (self.currentProtocol == "Protein Design (MSD)"):
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Antibody Modeling"):
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Molecular Surfaces"):
try:
self.cmd.delete("curr_surf_recp")
except:
pass
try:
self.cmd.delete("curr_surf_lig")
except:
pass
self.cmd.hide("surface", "all")
if (self.parent.Selection.showSurf):
self.currentProtocol = "n/a"
self.parent.Selection.displaySurfaces()
self.protPanel.Destroy()
del self.protPanel
elif (self.currentProtocol == "Flexible Peptide Docking"):
self.protPanel.Destroy()
del self.protPanel
self.currentProtocol = selectedProtocol
self.seqWin.cannotDelete = False
# Restart the Rosetta daemon to clear its memory up
self.parent.restartDaemon()
logInfo("Changed protocol to " + selectedProtocol)
if (selectedProtocol == "Superimposition"):
self.protPanel = SuperimpositionPanel(self, self.W, self.H)
elif (selectedProtocol == "Energy Minimization"):
self.protPanel = MinimizationPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
elif (selectedProtocol == "Ensemble Browser"):
self.protPanel = EnsembleBrowserPanel(self, self.W, self.H)
elif (selectedProtocol == "Ensemble Generation"):
self.protPanel = EnsembleGenPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
elif (selectedProtocol == "Protein Design (Fixbb)"):
self.protPanel = FixbbPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
elif (selectedProtocol == "Residue/Ligand Creator"):
self.protPanel = ResidueCreatorPanel(self, self.W, self.H)
elif (selectedProtocol == "Point Mutations"):
self.protPanel = PointMutationsPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
elif (selectedProtocol == "Point Mutant Scan"):
self.protPanel = PointMutantScanPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
elif (selectedProtocol == "Loop Modeling (KIC)"):
self.protPanel = KICPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
elif (selectedProtocol == "Docking"):
self.protPanel = DockingPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
elif (selectedProtocol == "Structure Prediction (Comparative Modeling)"):
self.protPanel = CompModelPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
elif (selectedProtocol == "Protein Design (MSD)"):
self.protPanel = MSDPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
elif (selectedProtocol == "Antibody Modeling"):
self.protPanel = AntibodyPanel(self, self.W, self.H)
elif (selectedProtocol == "Molecular Surfaces"):
self.protPanel = SurfacesPanel(self, self.W, self.H)
self.cmd.hide("surface", "all")
elif (selectedProtocol == "Flexible Peptide Docking"):
self.protPanel = FlexPepDockPanel(self, self.W, self.H)
self.protPanel.setSelectWin(self.selectWin)
self.protPanel.setSeqWin(self.seqWin)
self.protPanel.setPyMOL(self.pymol)
self.protPanel.activate()
self.seqWin.setProtocolPanel(self.protPanel)
def setSeqWin(self, seqWin):
self.seqWin = seqWin
self.protPanel.setSeqWin(seqWin)
self.seqWin.setProtocolPanel(self.protPanel)
def setPyMOL(self, pymol):
self.pymol = pymol
self.cmd = pymol.cmd
self.stored = pymol.stored
self.protPanel.setPyMOL(pymol)
def setSelectWin(self, selectWin):
self.selectWin = selectWin
def activate(self):
self.cmd.enable("seqsele")
self.protPanel.activate()
class ProtocolsWin(wx.Frame):
def __init__(self, W, H, scriptdir):
if (platform.system() == "Darwin"):
self.stdwinx = 0; self.stdwiny = 24
else:
self.stdwinx = 0; self.stdwiny = 0
self.stdwinw = 370; self.stdwinh = H-40
self.screenH = H; self.screenW = W
winx = self.stdwinx; winy = self.stdwiny
winw = self.stdwinw; winh = self.stdwinh
self.scriptdir = scriptdir
homedir = os.path.expanduser("~")
# Try to get the save values from the cfg file
try:
if (platform.system() == "Windows"):
f = open(homedir + "\\InteractiveROSETTA\\protwindow.cfg", "r")
else:
f = open(homedir + "/InteractiveROSETTA/protwindow.cfg", "r")
for aline in f:
if (aline.find("[OFFSET X]") >= 0):
winx = winx + int(aline.split()[len(aline.split())-1])
elif (aline.find("[OFFSET Y]") >= 0):
winy = winy + int(aline.split()[len(aline.split())-1])
elif (aline.find("[OFFSET WIDTH]") >= 0):
winw = winw + int(aline.split()[len(aline.split())-1])
elif (aline.find("[OFFSET HEIGHT]") >= 0):
winh = winh + int(aline.split()[len(aline.split())-1])
f.close()
except:
pass
if (winx > self.screenW - 100):
winx = self.stdwinx
if (winy > self.screenH - 100):
winy = self.stdwiny
# Maybe the screen resolution has changed and the saved dimensions put the windows in
# weird places, so default them to better positions and the user can change them later
#if (winw < 350):
# winw = 370
#elif (winw > W):
# winw = 370
#if (winx < 0):
# winx = 0
#elif (winx > W-winw):
# winx = W-winw
#if (winh > H - 40):
# winh = H - 40
#if (winy < 0):
# winy = 0
#elif (winy > H-winh):
# winh = H-40
wx.Frame.__init__(self, None, -1, "InteractiveROSETTA - Protocols", size=(winw, winh))
self.SetPosition((winx, winy))
self.SetBackgroundColour("#333333")
self.SetSizeHints(330, 560, 370, H)
self.SetIcon(icon.GetIcon())
self.Show()
self.Protocols = ProtocolsPanel(self, winw, winh)
self.Selection = SelectPanel(self, winw, winh)
self.Protocols.setSelectWin(self.Selection)
self.Selection.setProtPanel(self.Protocols)
self.saveTimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.saveWindowData, self.saveTimer)
self.Bind(wx.EVT_SIZE, self.windowGeometryChange)
self.Bind(wx.EVT_MOTION, self.windowGeometryChange)
self.Bind(wx.EVT_ACTIVATE, self.focusEvent)
# Start the Rosetta daemon that will run in the background looking for job input files generated
# by the main GUI
# It could be the case that the user was in the middle of a protocol, then quits suddenly so the
# daemon doesn't terminate itself because it's in the middle of a protocol
# The the user restarts the GUI before the daemon has a chance to finish the protocol
# This checks to see if the daemon is already active and doesn't spawn a new one if one is already
# running
stillrunning = False
count = 0
for proc in psutil.process_iter():
try:
if (platform.system() == "Windows"):
if (len(proc.cmdline()) >= 3 and proc.cmdline()[0].find("python") >= 0 and proc.cmdline()[2].find("from multiprocessing.forking") >= 0):
stillrunning = True
break
else:
# On Unix systems you just have to make sure two instances of python are running
# because there isn't any forking information in the daemon's instance of python
if (len(proc.cmdline()) >= 2 and proc.cmdline()[0].find("python") >= 0 and proc.cmdline()[1].find("InteractiveROSETTA.py") >= 0):
count = count + 1
except:
# In Windows it will crash if you try to read process information for the Administrator
# Doesn't matter though since InteractiveROSETTA is run by a non-Administrator
# But we need to catch these errors since we don't know which processes are admin ones
pass
if (platform.system() != "Windows" and count == 2):
stillrunning = True
if (not(stillrunning)):
print "Starting Rosetta protocol daemon..."
#if (platform.system() == "Darwin"):
#self.daemon_process = subprocess.Popen(args=["/usr/bin/python", self.scriptdir + "/scripts/daemon.py"], shell=False)
#else:
self.daemon_process = multiprocessing.Process(target=daemonLoop)
self.daemon_process.start()
def restartDaemon(self):
# This function kills the current daemon process and starts a new one
# This function is called whenever the protocol panel changes to a different protocol
# This function is necessary because on Windows PyRosetta can start to use well over
# 4GB of memory if the daemon has loaded memory from multiple protocols
# Killing the daemon and restarting it clears up that memory so the user's computer
# doesn't slow down as it moves a lot of stuff into swap space
savedir = os.getcwd()
os.chdir(self.scriptdir)
self.daemon_process.terminate()
print "Restarting Rosetta protocol daemon..."
self.daemon_process = multiprocessing.Process(target=daemonLoop)
self.daemon_process.start()
os.chdir(savedir)
def focusEvent(self, event):
if (event.GetActive()):
# Protocols read selection information from the sequence window, so update the sequence window
# If we're going from PyMOL->Protocols, since updates usually happen on the sequence window focus event
self.seqWin.selectUpdate(False) # Update PyMOL changes in sequence
self.Protocols.activate()
event.Skip()
def setSeqWin(self, seqWin):
self.seqWin = seqWin
self.Protocols.setSeqWin(seqWin)
self.Selection.setSeqWin(seqWin)
def windowGeometryChange(self, event):
# This function starts a timer that will write out the size and position of this window to a cfg file
# so the orientation is saved and can be loaded the next time InteractiveROSETTA is started
if (not(self.saveTimer.IsRunning())):
self.saveTimer.Start(5000)
# We have to do some finagling if this window gets resized because the minimum height
# of the specific protocol panel needs to be at least 300
# If the window is shrunk, the protocol panel will shrink down to 300 but then the select
# panel needs to shrink
(w, h) = self.GetSize()
return
if (h > 560):
try:
self.Protocols.protPanel.SetSize((w, h-330))
self.Protocols.protPanel.SetScrollbars(1, 1, 320, 800)
except:
pass
(x, y) = self.Selection.GetPosition()
self.Selection.SetPosition((x, h-270))
self.Selection.SetSize((w-20, self.Selection.GetSize()[1]))
event.Skip()
def saveWindowData(self, event):
homedir = os.path.expanduser("~")
data = []
try:
if (platform.system() == "Windows"):
f = open(homedir + "\\InteractiveROSETTA\\protwindow.cfg", "r")
else:
f = open(homedir + "/InteractiveROSETTA/protwindow.cfg", "r")
for aline in f:
data.append(aline)
f.close()
except:
pass
if (platform.system() == "Windows"):
f = open(homedir + "\\InteractiveROSETTA\\protwindow.cfg", "w")
else:
f = open(homedir + "/InteractiveROSETTA/protwindow.cfg", "w")
itemsFound = [False, False, False, False] # [offX, offY, offW, offH]
(x, y) = self.GetPosition()
(w, h) = self.GetSize()
for aline in data:
if (aline.find("[OFFSET X]") >= 0):
itemsFound[0] = True
f.write("[OFFSET X] " + str(x-self.stdwinx) + "\n")
elif (aline.find("[OFFSET Y]") >= 0):
itemsFound[1] = True
f.write("[OFFSET Y] " + str(y-self.stdwiny) + "\n")
elif (aline.find("[OFFSET WIDTH]") >= 0):
itemsFound[2] = True
f.write("[OFFSET WIDTH] " + str(w-self.stdwinw) + "\n")
elif (aline.find("[OFFSET HEIGHT]") >= 0):
itemsFound[3] = True
f.write("[OFFSET HEIGHT] " + str(h-self.stdwinh) + "\n")
else:
f.write(aline)
for i in range(0, len(itemsFound)):
if (not(itemsFound[i])):
if (i == 0):
f.write("[OFFSET X] " + str(x-self.stdwinx) + "\n")
elif (i == 1):
f.write("[OFFSET Y] " + str(y-self.stdwiny) + "\n")
elif (i == 2):
f.write("[OFFSET WIDTH] " + str(w-self.stdwinw) + "\n")
elif (i == 3):
f.write("[OFFSET HEIGHT] " + str(h-self.stdwinh) + "\n")
f.close() | gpl-2.0 | -8,787,999,662,965,035,000 | 50.120921 | 240 | 0.564037 | false |
edgeware/python-circuit | circuit/breaker.py | 1 | 6837 | # Copyright 2012 Edgeware AB.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for managing errors when interacting with a remote
service.
The circuit breaker monitors the communication and in the case of a
high error rate may break the circuit and not allow further
communication for a short period. After a while the breaker will let
through a single request to probe to see if the service feels better.
If not, it will open the circuit again.
Note the optional parameters for back-off_cap and with_jitter. If back-off on
retries is desired, set the back-off_cap to the maximum back-off value.
Empirical data (http://www.awsarchitectureblog.com/2015/03/backoff.html)
indicates adding jitter (randomness) to back-off strategies can lead to an
increased throughput for a system experiencing contention for a shared
resource. If using a L{CircuitBreaker} with a contended resource it may be
beneficial to use back-off with jitter.
A L{CircuitBreakerSet} can handle the state for multiple interactions
at the same time. Use the C{context} method to pick which interaction
to track:
try:
with circuit_breaker.context('x'):
# something that generates errors
pass
except CircuitOpenError:
# the circuit was open so we did not even try to communicate
# with the remote service.
pass
"""
import random
class CircuitOpenError(Exception):
"""The circuit breaker is open."""
class CircuitBreaker(object):
"""A single circuit with breaker logic."""
def __init__(self, clock, log, error_types, maxfail, reset_timeout,
time_unit, backoff_cap=None, with_jitter=False):
self.clock = clock
self.log = log
self.error_types = error_types
self.maxfail = maxfail
self.reset_timeout = reset_timeout
self.time_unit = time_unit
self.state = 'closed'
self.last_change = None
self.backoff_cap = backoff_cap
self.test_fail_count = 0
self.with_jitter = with_jitter
self.errors = []
def reset(self):
"""Reset the breaker after a successful transaction."""
self.log.info('closing circuit')
self.state = 'closed'
self.test_fail_count = 0
def open(self, err=None):
self.log.error('got error %r - opening circuit' % (err,))
self.state = 'open'
self.last_change = self.clock()
def error(self, err=None):
"""Update the circuit breaker with an error event."""
if self.state == 'half-open':
self.test_fail_count = min(self.test_fail_count + 1, 16)
self.errors.append(self.clock())
if len(self.errors) > self.maxfail:
time = self.clock() - self.errors.pop(0)
if time < self.time_unit:
if time == 0:
time = 0.0001
self.log.debug('error rate: %f errors per second' % (
float(self.maxfail) / time))
self.open(err)
def test(self):
"""Check state of the circuit breaker.
@raise CircuitOpenError: if the circuit is still open
"""
if self.state == 'open':
delta = self.clock() - self.last_change
delay_time = self.reset_timeout
if self.backoff_cap:
delay_time = self.reset_timeout * (2 ** self.test_fail_count)
delay_time = min(delay_time, self.backoff_cap)
if self.with_jitter:
# Add jitter, see:
# http://www.awsarchitectureblog.com/2015/03/backoff.html
delay_time = random.random() * delay_time
if delta < delay_time:
raise CircuitOpenError()
self.state = 'half-open'
self.log.debug('half-open - letting one through')
return self.state
def success(self):
if self.state == 'half-open':
self.reset()
def __enter__(self):
"""Context enter."""
self.test()
return self
def __exit__(self, exc_type, exc_val, tb):
"""Context exit."""
if exc_type is None:
self.success()
elif exc_type in self.error_types:
self.error(exc_val)
return False
class CircuitBreakerSet(object):
"""Controller for a set of circuit breakers.
@ivar clock: A callable that takes no arguments and return the
current time in seconds.
@ivar log: A L{logging.Logger} object that is used for the circuit
breakers.
@ivar maxfail: The maximum number of allowed errors over the
last minute. If the breaker detects more errors than this, the
circuit will open.
@ivar reset_timeout: Number of seconds to have the circuit open
before it moves into C{half-open}.
"""
def __init__(self, clock, log, maxfail=3, reset_timeout=10,
time_unit=60, backoff_cap=None, with_jitter=False,
factory=CircuitBreaker):
self.clock = clock
self.log = log
self.maxfail = maxfail
self.reset_timeout = reset_timeout
self.time_unit = time_unit
self.backoff_cap = backoff_cap
self.with_jitter = with_jitter
self.circuits = {}
self.error_types = []
self.factory = factory
def handle_error(self, err_type):
"""Register error C{err_type} with the circuit breakers so
that it will be handled as an error.
"""
self.error_types.append(err_type)
def handle_errors(self, err_types):
"""Register errors C{err_types} with the circuit breakers so
that it will be handled as an error.
"""
self.error_types.extend(err_types)
def context(self, id):
"""Return a circuit breaker for the given ID."""
if id not in self.circuits:
self.circuits[id] = self.factory(self.clock, self.log.getChild(id),
self.error_types, self.maxfail,
self.reset_timeout,
self.time_unit,
backoff_cap=self.backoff_cap,
with_jitter=self.with_jitter)
return self.circuits[id]
| apache-2.0 | 4,636,226,752,008,011,000 | 34.795812 | 79 | 0.608893 | false |
guillempalou/scikit-cv | skcv/multiview/two_views/tests/test_triangulation.py | 1 | 1727 | import numpy as np
import pickle
import os
from numpy.testing import assert_array_almost_equal
from skcv import data_dir
from skcv.multiview.two_views.fundamental_matrix import *
from skcv.multiview.util.points_functions import *
from skcv.multiview.util.camera import *
from skcv.multiview.two_views import triangulation
def test_triangulation_hartley():
projections_file = os.path.join(data_dir, 'two_view_projections.dat')
(x1e, x2e) = pickle.load(open(projections_file, 'rb'))
#add gaussian noise to x1e and x2e
dev = 0.1
x1e += np.random.normal(0, dev, size=x1e.shape)
x2e += np.random.normal(0, dev, size=x2e.shape)
x1h = euclidean_to_homogeneous(x1e)
x2h = euclidean_to_homogeneous(x2e)
f_matrix = robust_f_estimation(x1h, x2h)
p1, p2 = canonical_cameras_from_f(f_matrix)
X = triangulation.optimal_triangulation(x1h, x2h, f_matrix, cameras=(p1,p2), method='Hartley')
x1p = np.dot(p1, X)
x2p = np.dot(p2, X)
ratio1 = x1p / x1h
ratio2 = x2p / x2h
def test_triangulation_kanatani():
projections_file = os.path.join(data_dir, 'two_view_projections.dat')
(x1e, x2e) = pickle.load(open(projections_file, 'rb'))
#add gaussian noise to x1e and x2e
dev = 0.1
x1e += np.random.normal(0, dev, size=x1e.shape)
x2e += np.random.normal(0, dev, size=x2e.shape)
x1h = euclidean_to_homogeneous(x1e)
x2h = euclidean_to_homogeneous(x2e)
f_matrix = robust_f_estimation(x1h, x2h)
p1, p2 = canonical_cameras_from_f(f_matrix)
X = triangulation.optimal_triangulation(x1h, x2h, f_matrix, cameras=(p1,p2), method='Kanatani')
x1p = np.dot(p1, X)
x2p = np.dot(p2, X)
ratio1 = x1p / x1h
ratio2 = x2p / x2h
| bsd-3-clause | -2,687,161,321,226,213,000 | 25.984375 | 99 | 0.676896 | false |
kyper-data/python-highcharts | highcharts/highstock/highstock_types.py | 1 | 19610 | # -*- coding: UTF-8 -*-
from past.builtins import basestring
import json, datetime
from .common import Formatter, Events, Position, ContextButton, Options3d, ResetZoomButton, DataGrouping, \
Labels, Marker, Point, PlotBands, States, Tooltip, Title, Zones, Levels, Shadow, \
JSfunction, ColorObject, CSSObject, SVGObject, \
CommonObject, ArrayObject
PLOT_OPTION_ALLOWED_ARGS = {
"common": {
"allowPointSelect": bool,
"animation": bool,
"color": (ColorObject, basestring, dict),
"cursor": basestring,
"dataGrouping": (DataGrouping, dict),
"dataLabels": (Labels, dict),
"enableMouseTracking": bool,
"events": (Events, dict),
"id": basestring,
"index": [float, int],
"name": basestring,
"point": (Point, dict),
"selected": bool,
"showCheckbox": bool,
"showInLegend": bool,
"states": (States, dict),
"stickyTracking": bool,
"tooltip": (Tooltip, dict),
"visible": bool,
"xAxis": [int, basestring],
"yAxis": [int, basestring],
"zIndex": int,
"zoneAxis": basestring,
"zones": (Zones, dict),
},
"area": {
"compare": basestring,
"connectNulls": bool,
"cropThreshold": int,
"dashStyle": basestring,
"fillColor": (ColorObject, basestring, dict),
"fillOpacity": float,
"gapSize": [int, float],
"keys": list,
"legendIndex": [int, float],
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": basestring,
"marker": (Marker, dict),
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointStart": (int,basestring, datetime.datetime),
"shadow": [bool, dict],
"stack": basestring,
"stacking": basestring,
"step": bool,
"threshold": [int, type(None)],
"trackByArea": bool,
"turboThreshold": int,
},
"arearange": {
"compare": basestring,
"connectNulls": bool,
"cropThreshold": int,
"dashStyle": basestring,
"fillColor": (ColorObject, basestring, dict),
"fillOpacity": float,
"gapSize": [int, float],
"keys": list,
"legendIndex": [int, float],
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": basestring,
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointStart": [int,basestring,datetime.datetime],
"shadow": [bool, dict],
"trackByArea": bool,
"turboThreshold": int,
},
"areaspline": {
"cropThreshold": int,
"compare": basestring,
"connectNulls": bool,
"dashStyle": basestring,
"fillColor": (ColorObject, basestring, dict),
"fillOpacity": float,
"gapSize": [int, float],
"keys": list,
"legendIndex": [int, float],
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": basestring,
"marker": (Marker, dict),
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointStart": [int,basestring,datetime.datetime],
"shadow": [bool, dict],
"stack": basestring,
"stacking": basestring,
"threshold": [int, type(None)],
"turboThreshold": int,
"trackByArea": bool,
},
"areasplinerange": {
"cropThreshold": int,
"compare": basestring,
"connectNulls": bool,
"dashStyle": basestring,
"fillColor": (ColorObject, basestring, dict),
"fillOpacity": float,
"gapSize": [int, float],
"keys": list,
"legendIndex": [int, float],
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": basestring,
"marker": (Marker, dict),
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointStart": [int,basestring,datetime.datetime],
"shadow": [bool, dict],
"stack": basestring,
"stacking": basestring,
"threshold": [int, type(None)],
"turboThreshold": int,
"trackByArea": bool,
},
"candlestick": {
"colors": list,
"cropThreshold": int,
"connectNulls": bool,
"dashStyle": basestring,
"fillColor": (ColorObject, basestring, dict),
"fillOpacity": float,
"groupPadding": [int, float],
"grouping": bool,
"keys": list,
"legendIndex": [int, float],
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": basestring,
"marker": (Marker, dict),
"minPointLength": [int, float],
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointRange": [int, float],
"pointStart": [int,basestring,datetime.datetime],
"pointWidth": [int, float],
"shadow": [bool, dict],
"stack": basestring,
"upColor": (ColorObject, basestring, dict),
"upLineColor": (ColorObject, basestring, dict),
"turboThreshold": int,
"trackByArea": bool,
},
"column": {
"borderColor": (ColorObject, basestring, dict),
"borderRadius": int,
"borderWidth": [int, basestring],
"colorByPoint": bool,
"colors": list,
"compare": basestring,
"cropThreshold": int,
"groupPadding": [float, int],
"grouping": bool,
"keys": list,
"linkedTo": basestring,
"minPointLength": int,
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPadding": [float, int],
"pointPlacement": [basestring, int, float],
"pointRange": int,
"pointStart": [int,basestring,datetime.datetime],
"pointWidth": [int, float],
"shadow": [bool, dict],
"stack": basestring,
"stacking": basestring,
"turboThreshold": int,
},
"columnrange": {
"borderColor": (ColorObject, basestring, dict),
"borderRadius": int,
"borderWidth": [int, basestring],
"colorByPoint": bool,
"colors": list,
"compare": basestring,
"cropThreshold": int,
"groupPadding": [float, int],
"grouping": bool,
"keys": list,
"linkedTo": basestring,
"minPointLength": int,
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPadding": [float, int],
"pointPlacement": [basestring, int, float],
"pointRange": int,
"pointStart": [int,basestring,datetime.datetime],
"pointWidth": [int, float],
"shadow": [bool, dict],
"stack": basestring,
"stacking": basestring,
"turboThreshold": int,
},
"flags": {
"colors": list,
"cropThreshold": int,
"keys": list,
"legendIndex": [int, float],
"lineColor": (ColorObject, basestring, dict),
"lineWidth": int,
"linkedTo": basestring,
"onSeries": basestring,
"pointIntervalUnit": basestring,
"shadow": [bool, dict],
"shape": basestring,
"stack": basestring,
"stackDistance": [int, float],
"style": (CSSObject, dict),
"y": [int, float],
"useHTML": bool,
},
"line": {
"compare": basestring,
"connectNulls": bool,
"cropThreshold": int,
"dashStyle": basestring,
"gapSize": [int, float],
"keys": list,
"legendIndex": [int, float],
"lineWidth": int,
"linecap": basestring,
"linkedTo": basestring,
"marker": (Marker, dict),
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointStart": [int,basestring,datetime.datetime],
"shadow": [bool, dict],
"stack": basestring,
"stacking": basestring,
"step": basestring,
"turboThreshold": int,
},
"ohlc": {
"colorByPoint": bool,
"colors": list,
"compare": basestring,
"cropThreshold": int,
"groupPadding": [float, int],
"grouping": bool,
"keys": list,
"legendIndex": [int, float],
"lineWidth": int,
"linkedTo": basestring,
"minPointLength": int,
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPadding": [float, int],
"pointPlacement": [basestring, int, float],
"pointRange": int,
"pointStart": [int,basestring,datetime.datetime],
"pointWidth": [int, float],
"shadow": [bool, dict],
"stack": basestring,
"stacking": basestring,
"turboThreshold": int,
},
"polygon": {
"compare": basestring,
"cropThreshold": int,
"dashStyle": basestring,
"keys": list,
"legendIndex": [int, float],
"lineWidth": int,
"linkedTo": basestring,
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointRange": int,
"pointStart": [int,basestring,datetime.datetime],
"shadow": [bool, dict],
"stacking": basestring,
"turboThreshold": int,
},
"scatter": {
"compare": basestring,
"cropThreshold": int,
"dashStyle": basestring,
"keys": list,
"legendIndex": [int, float],
"lineWidth": int,
"linkedTo": basestring,
"marker": (Marker, dict),
"negativeColor": (ColorObject, basestring, dict),
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointRange": int,
"pointStart": [int,basestring,datetime.datetime],
"shadow": [bool, dict],
"stacking": basestring,
"turboThreshold": int,
},
"series": {
"compare": basestring,
"connectNulls": bool,
"cropThreshold": int,
"dashStyle": basestring,
"gapSize": [int, float],
"keys": list,
"legendIndex": [int, float],
"lineWidth": int,
"linkedTo": basestring,
"marker": (Marker, dict),
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointRange": int,
"pointStart": [int,basestring,datetime.datetime],
"shadow": [bool, dict],
"stacking": basestring,
"turboThreshold": int,
},
"spline": {
"compare": basestring,
"connectNulls": bool,
"cropThreshold": int,
"dashStyle": basestring,
"gapSize": [int, float],
"keys": list,
"legendIndex": [int, float],
"lineWidth": int,
"linkedTo": basestring,
"marker": (Marker, dict),
"pointInterval": int,
"pointIntervalUnit": basestring,
"pointPlacement": [basestring, int, float],
"pointRange": int,
"pointStart": [int,basestring,datetime.datetime],
"shadow": [bool, dict],
"stacking": basestring,
"turboThreshold": int,
},
}
DATA_SERIES_ALLOWED_OPTIONS = {
"color": (ColorObject, basestring, dict),
"connectEnds": bool,
"connectNulls": bool,
"dataLabels": (Labels, dict),
"dataParser": NotImplemented,
"dataURL": NotImplemented,
"drilldown": basestring,
"events": (Events, dict),
"high": [int, float],
"id": basestring,
"index": int,
"legendIndex": int,
"name": basestring,
"marker": (Marker, dict),
"selected": bool,
"sliced": bool,
"showInLegend": bool,
"type": basestring,
"visible": bool,
"x": [int, float, datetime.datetime],
"xAxis": int,
"yAxis": int,
}
DEFAULT_OPTIONS = {
}
class OptionTypeError(Exception):
def __init__(self,*args):
self.args = args
class SeriesOptions(object):
"""Class for plotOptions"""
def __init__(self,series_type="line",supress_errors=False,**kwargs):
self.load_defaults(series_type)
self.process_kwargs(kwargs,series_type=series_type,supress_errors=supress_errors)
@staticmethod
def __validate_options__(k,v,ov):
if isinstance(ov,list):
if isinstance(v,tuple(ov)): return True
else:
raise OptionTypeError("Option Type Currently Not Supported: %s" % k)
else:
if ov == NotImplemented: raise OptionTypeError("Option Type Currently Not Supported: %s" % k)
if isinstance(v,ov): return True
else: return False
def __options__(self):
return self.__dict__
def __jsonable__(self):
return self.__dict__
def update(self,series_type, **kwargs):
allowed_args = PLOT_OPTION_ALLOWED_ARGS[series_type]
allowed_args.update(PLOT_OPTION_ALLOWED_ARGS["common"])
for k, v in kwargs.items():
if k in allowed_args:
if SeriesOptions.__validate_options__(k,v,allowed_args[k]):
if isinstance(allowed_args[k], tuple) and isinstance(allowed_args[k][0](), CommonObject):
# re-construct input dict with existing options in objects
if self.__getattr__(k):
if isinstance(v, dict):
self.__options__()[k].update(v)
else:
self.__options__()[k].__options__().update(v)
else:
self.__options__().update({k:allowed_args[k][0](**v)})
elif isinstance(allowed_args[k], tuple) and isinstance(allowed_args[k][0](), ArrayObject):
# update array
if isinstance(v, dict):
self.__dict__[k].append(allowed_args[k][0](**v))
elif isinstance(v, list):
for item in v:
self.__dict__[k].append(allowed_args[k][0](**item))
else:
OptionTypeError("Not An Accepted Input Type: %s" % type(v))
elif isinstance(allowed_args[k], tuple) and \
(isinstance(allowed_args[k][0](), CSSObject) or isinstance(allowed_args[k][0](), SVGObject)):
if self.__getattr__(k):
for key, value in v.items():
self.__dict__[k].__options__().update({key:value})
else:
self.__dict__.update({k:allowed_args[k][0](**v)})
v = self.__dict__[k].__options__()
# upating object
if isinstance(v, dict):
self.__dict__.update({k:allowed_args[k][0](**v)})
else:
self.__dict__.update({k:allowed_args[k][0](v)})
elif isinstance(allowed_args[k], tuple) and (isinstance(allowed_args[k][0](), JSfunction) or
isinstance(allowed_args[k][0](), Formatter) or isinstance(allowed_args[k][0](), ColorObject)):
if isinstance(v, dict):
self.__dict__.update({k:allowed_args[k][0](**v)})
else:
self.__dict__.update({k:allowed_args[k][0](v)})
else:
self.__dict__.update({k:v})
else:
print(k,v)
if not supress_errors: raise OptionTypeError("Option Type Mismatch: Expected: %s" % allowed_args[k])
def process_kwargs(self,kwargs,series_type,supress_errors=False):
allowed_args = PLOT_OPTION_ALLOWED_ARGS[series_type]
allowed_args.update(PLOT_OPTION_ALLOWED_ARGS["common"])
for k, v in kwargs.items():
if k in allowed_args:
if SeriesOptions.__validate_options__(k,v,allowed_args[k]):
if isinstance(allowed_args[k], tuple):
if isinstance(v, dict):
self.__dict__.update({k:allowed_args[k][0](**v)})
elif isinstance(v, list):
if len(v) == 1:
self.__dict__.update({k:allowed_args[k][0](**v[0])})
else:
self.__dict__.update({k:allowed_args[k][0](**v[0])})
for item in v[1:]:
self.__dict__[k].update(item)
elif isinstance(v, CommonObject) or isinstance(v, ArrayObject) or \
isinstance(v, CSSObject) or isinstance(v, SVGObject) or isinstance(v, ColorObject) or \
isinstance(v, JSfunction) or isinstance(v, Formatter) or isinstance(v, datetime.datetime):
self.__dict__.update({k:v})
else:
self.__dict__.update({k:allowed_args[k][0](v)})
else:
self.__dict__.update({k:v})
else:
print(k,v)
if not supress_errors: raise OptionTypeError("Option Type Mismatch: Expected: %s" % allowed_args[k])
def load_defaults(self,series_type): # not in use
self.process_kwargs(DEFAULT_OPTIONS.get(series_type,{}),series_type)
def __getattr__(self,item):
if not item in self.__dict__:
return None # Attribute Not Set
else:
return True
class Series(object):
"""Series class for input data """
def __init__(self,data,series_type="line",supress_errors=False,**kwargs):
# List of dictionaries. Each dict contains data and properties,
# which need to handle before construct the object for series
if isinstance(data, list):
for item in data:
if isinstance(item, dict):
for k, v in item.items():
if k in DATA_SERIES_ALLOWED_OPTIONS:
if SeriesOptions.__validate_options__(k,v,DATA_SERIES_ALLOWED_OPTIONS[k]):
if isinstance(DATA_SERIES_ALLOWED_OPTIONS[k], tuple):
if isinstance(v, dict):
item.update({k:DATA_SERIES_ALLOWED_OPTIONS[k][0](**v)})
elif isinstance(v, datetime.datetime):
item.update({k:v})
else:
item.update({k:DATA_SERIES_ALLOWED_OPTIONS[k][0](v)})
else:
item.update({k:v})
self.__dict__.update({
"data": data,
"type": series_type,
})
# Series propertie can be input as kwargs, which is handled here
for k, v in kwargs.items():
if k in DATA_SERIES_ALLOWED_OPTIONS:
if SeriesOptions.__validate_options__(k,v,DATA_SERIES_ALLOWED_OPTIONS[k]):
if isinstance(DATA_SERIES_ALLOWED_OPTIONS[k], tuple):
if isinstance(v, dict):
self.__dict__.update({k:DATA_SERIES_ALLOWED_OPTIONS[k][0](**v)})
elif isinstance(v, CommonObject) or isinstance(v, ArrayObject) or \
isinstance(v, CSSObject) or isinstance(v, SVGObject) or isinstance(v, ColorObject) or \
isinstance(v, JSfunction) or isinstance(v, Formatter) or isinstance(v, datetime.datetime):
self.__dict__.update({k:v})
else:
self.__dict__.update({k:DATA_SERIES_ALLOWED_OPTIONS[k][0](v)})
else:
self.__dict__.update({k:v})
else:
if not supress_errors: raise OptionTypeError("Option Type Mismatch: Expected: %s" % DATA_SERIES_ALLOWED_OPTIONS[k])
def __options__(self):
return self.__dict__
def __jsonable__(self):
return self.__dict__
| mit | -7,985,119,377,433,008,000 | 34.080501 | 135 | 0.54615 | false |
qunying/gps | share/support/core/console_utils.py | 1 | 1813 | """
This plugin provides highlighting of text in Messages View
matched by user defined regular expressions.
"""
import GPS
from gps_utils import hook
# number of regexps
cu_count = 5
# Initialize preference for regexp with number num
def cu_create_preference(num):
cu_regexp = GPS.Preference(
"Messages:Custom Highlighting " + str(num) + "/regexp")
cu_style = GPS.Preference(
"Messages:Custom Highlighting " + str(num) + "/variant")
cu_regexp.create(
"Regexp to highlight",
"string",
"Enter a regular expression to highlight in the Messages View.",
"")
cu_style.create_style(
label="Regexp style",
doc="")
def cu_load_preference(num):
cu_regexp = GPS.Preference(
"Messages:Custom Highlighting " + str(num) + "/regexp")
cu_style = GPS.Preference(
"Messages:Custom Highlighting " + str(num) + "/variant")
if cu_regexp.get() == "":
return
style_value = cu_style.get().split('@')
try:
GPS.Console().create_link(regexp=cu_regexp.get(),
on_click=lambda x: None,
foreground=style_value[1],
background=style_value[2],
underline=False,
font=style_value[0])
except GPS.Exception:
return
@hook('preferences_changed')
def on_preferences_changed(reload=True):
cu_load_preferences()
def on_gps_started(hook):
cu_load_preferences()
@hook('gps_started')
def __on_gps_started():
cu_load_preferences()
def cu_load_preferences():
GPS.Console().delete_links()
for j in range(cu_count):
cu_load_preference(j + 1)
for j in range(cu_count):
cu_create_preference(j + 1)
| gpl-3.0 | -6,870,264,987,213,448,000 | 23.5 | 72 | 0.583012 | false |
tylerlaberge/PyPattyrn | pypattyrn/behavioral/mediator.py | 1 | 1661 | from collections import defaultdict
class Mediator(object):
"""
Mediator class as part of the Mediator design pattern.
- External Usage documentation: U{https://github.com/tylerlaberge/PyPattyrn#mediator-pattern}
- External Mediator Pattern documentation: U{https://en.wikipedia.org/wiki/Mediator_pattern}
"""
def __init__(self):
"""
Initialize a new Mediator instance.
"""
self.signals = defaultdict(list)
def signal(self, signal_name, *args, **kwargs):
"""
Send a signal out to all connected handlers.
@param signal_name: The name of the signal.
@type signal_name: Str
@param args: Positional arguments to send with the signal.
@param kwargs: Keyword arguments to send with the signal.
"""
for handler in self.signals[signal_name]:
handler(*args, **kwargs)
def connect(self, signal_name, receiver):
"""
Connect a receiver to a signal.
@param signal_name: The name of the signal to connect the receiver to.
@type signal_name: str
@param receiver: A handler to call when the signal is sent out.
"""
self.signals[signal_name].append(receiver)
def disconnect(self, signal_name, receiver):
"""
Disconnect a receiver from a signal.
@param signal_name: The name of the signal to disconnect the receiver from.
@type signal_name: str
@param receiver: The receiver to disconnect from the signal.
"""
try:
self.signals[signal_name].remove(receiver)
except ValueError:
pass
| mit | -2,050,017,796,419,365,400 | 32.22 | 97 | 0.625527 | false |
aritter/twitter_nlp | python/ner/Features.py | 1 | 7652 | import sys
import re
import os
import string
import subprocess
#BASE_DIR = '/home/aritter/twitter_nlp'
#BASE_DIR = os.environ['HOME'] + '/twitter_nlp'
#BASE_DIR = '/homes/gws/aritter/twitter_nlp'
BASE_DIR = 'twitter_nlp.jar'
if os.environ.has_key('TWITTER_NLP'):
BASE_DIR = os.environ['TWITTER_NLP']
#sys.path.append('%s/python/' % (BASE_DIR))
#sys.path.append('%s/python/cap/' % (BASE_DIR))
#sys.path.append('../cap/')
#import cap_classifier
def Brown2Bits(bits):
bitstring = ""
for i in range(20):
if bits & (1 << i):
bitstring += '1'
else:
bitstring += '0'
return bitstring
def GetOrthographicFeatures(word, goodCap):
features = []
#Don't include these features for usernames
features.append("word=%s" % word)
features.append("word_lower=%s" % word.lower())
if(len(word) >= 4):
features.append("prefix=%s" % word[0:1].lower())
features.append("prefix=%s" % word[0:2].lower())
features.append("prefix=%s" % word[0:3].lower())
features.append("suffix=%s" % word[len(word)-1:len(word)].lower())
features.append("suffix=%s" % word[len(word)-2:len(word)].lower())
features.append("suffix=%s" % word[len(word)-3:len(word)].lower())
#Substring features (don't seem to help)
#for i in range(1,len(word)-2):
# for j in range(i+1,len(word)-1):
# features.append("substr=%s" % word[i:j])
if re.search(r'^[A-Z]', word):
features.append('INITCAP')
if re.search(r'^[A-Z]', word) and goodCap:
features.append('INITCAP_AND_GOODCAP')
if re.match(r'^[A-Z]+$', word):
features.append('ALLCAP')
if re.match(r'^[A-Z]+$', word) and goodCap:
features.append('ALLCAP_AND_GOODCAP')
if re.match(r'.*[0-9].*', word):
features.append('HASDIGIT')
if re.match(r'[0-9]', word):
features.append('SINGLEDIGIT')
if re.match(r'[0-9][0-9]', word):
features.append('DOUBLEDIGIT')
if re.match(r'.*-.*', word):
features.append('HASDASH')
if re.match(r'[.,;:?!-+\'"]', word):
features.append('PUNCTUATION')
return features
class DictionaryFeatures:
def __init__(self, dictDir):
self.brownClusters = None
self.word2dictionaries = {}
self.dictionaries = []
i = 0
for d in os.listdir(dictDir):
self.dictionaries.append(d)
if d == '.svn':
continue
for line in open(dictDir + "/" + d):
word = line.rstrip('\n')
word = word.strip(' ').lower()
if not self.word2dictionaries.has_key(word): #Tab-seperated string is more memory efficient than a list?
self.word2dictionaries[word] = str(i)
else:
self.word2dictionaries[word] += "\t%s" % i
i += 1
def AddBrownClusters(self, brownFile):
self.brownClusters = {}
for line in open(brownFile):
line = line.rstrip('\n')
(word, bits) = line.split(' ')
bits = int(bits)
self.brownClusters[word] = bits
MAX_WINDOW_SIZE=6
def GetDictFeatures(self, words, i):
features = []
for window in range(self.MAX_WINDOW_SIZE):
for start in range(max(i-window+1, 0), i+1):
end = start + window
phrase = ' '.join(words[start:end]).lower().strip(string.punctuation)
if self.word2dictionaries.has_key(phrase):
for j in self.word2dictionaries[phrase].split('\t'):
features.append('DICT=%s' % self.dictionaries[int(j)])
if window > 1:
features.append('DICTWIN=%s' % window)
if self.brownClusters and self.brownClusters.has_key(words[i].lower()):
for j in [4, 8, 12]:
bitstring = Brown2Bits(self.brownClusters[words[i].lower()])
features.append('BROWN=%s' % bitstring[0:j+1])
return list(set(features))
class DictionaryFeatures2(DictionaryFeatures):
def __init__(self, dictFile):
self.word2dictionaries = {}
for line in open(dictFile):
(word, dictionary) = line.rstrip('\n').split('\t')
if re.search(r'^/(common|user|type|freebase|base)/', dictionary):
continue
if not self.word2dictionaries.has_key(word):
self.word2dictionaries[word] = []
self.word2dictionaries[word].append(dictionary)
def GetQuotes(words):
string = ' '.join(words)
quoted = []
string = re.sub(r"' ([^']+) '", r"' |||[ \1 ]||| '", string)
string = re.sub(r'" ([^"]+) "', r'" |||[ \1 ]||| "', string)
isquoted = False
words = string.split(' ')
for i in range(len(words)):
if words[i] == "|||[":
isquoted = True
elif words[i] == "]|||":
isquoted = False
else:
quoted.append(isquoted)
return quoted
class FeatureExtractor:
def __init__(self, dictDir="%s/data/dictionaries" % (BASE_DIR), brownFile="%s/data/brown_clusters/60K_clusters.txt" % (BASE_DIR)):
self.df = DictionaryFeatures(dictDir)
if brownFile:
self.df.AddBrownClusters(brownFile)
LEFT_RIGHT_WINDOW=3
def Extract(self, words, pos, chunk, i, goodCap=True):
features = GetOrthographicFeatures(words[i], goodCap) + self.df.GetDictFeatures(words, i) + ["goodCap=%s" % goodCap]
for j in range(i-self.LEFT_RIGHT_WINDOW,i+self.LEFT_RIGHT_WINDOW):
if j > 0 and j < i:
features.append('LEFT_WINDOW=%s' % words[j])
elif j < len(words) and j > i:
features.append('RIGHT_WINDOW=%s' % words[j])
if pos:
features.append('POS=%s' % pos[i])
features.append('POS=%s' % pos[i][0:1])
features.append('POS=%s' % pos[i][0:2])
if chunk:
features.append('CHUNK=%s' % chunk[i])
if i == 0:
features.append('BEGIN')
if pos:
features.append('POS=%s_X_%s' % ('_'.join(pos[i-1:i]),'_'.join(pos[i+1:i+2])))
if chunk:
features.append('CHUNK=%s_X_%s' % ('_'.join(chunk[i-1:i]),'_'.join(chunk[i+1:i+2])))
if i > 0:
features += ["p1=%s" % x for x in GetOrthographicFeatures(words[i-1], goodCap) + self.df.GetDictFeatures(words, i-1)]
if pos:
features.append('PREV_POS=%s' % pos[i-1])
features.append('PREV_POS=%s' % pos[i-1][0:1])
features.append('PREV_POS=%s' % pos[i-1][0:2])
if i > 1:
if pos:
features.append('PREV_POS=%s_%s' % (pos[i-1], pos[i-2]))
features.append('PREV_POS=%s_%s' % (pos[i-1][0:1], pos[i-2][0:1]))
features.append('PREV_POS=%s_%s' % (pos[i-1][0:2], pos[i-2][0:2]))
if i < len(words)-1:
features += ["n1=%s" % x for x in GetOrthographicFeatures(words[i+1], goodCap) + self.df.GetDictFeatures(words, i+1)]
if pos:
features.append('NEXT_POS=%s' % pos[i+1])
features.append('NEXT_POS=%s' % pos[i+1][0:1])
features.append('NEXT_POS=%s' % pos[i+1][0:2])
if i < len(words)-2:
if pos:
features.append('NEXT_POS=%s_%s' % (pos[i+1], pos[i+2]))
features.append('NEXT_POS=%s_%s' % (pos[i+1][0:1], pos[i+2][0:1]))
features.append('NEXT_POS=%s_%s' % (pos[i+1][0:2], pos[i+2][0:2]))
return features
| gpl-3.0 | 5,359,250,534,318,683,000 | 38.040816 | 134 | 0.531887 | false |
aholkner/PoliticalRPG | odsimport.py | 1 | 1309 | from odf.opendocument import load
from odf.table import Table, TableRow, TableCell
from odf.namespaces import TABLENS
from odf.text import P
def import_ods(path):
doc = load(path)
db = {}
tables = doc.spreadsheet.getElementsByType(Table)
for table in tables:
db_table = []
db[table.getAttribute('name')] = db_table
for row in table.getElementsByType(TableRow):
db_row = []
db_table.append(db_row)
for cell in row.getElementsByType(TableCell):
db_value = '\n'.join(map(str, cell.getElementsByType(P))).decode('utf-8')
db_value = db_value.strip()
try:
db_value = float(db_value)
except:
db_value = db_value.replace(u'\u2026', '...')
db_value = db_value.replace(u'\u200b', '')
db_value = db_value.encode('utf-8')
try:
repeat_count = int(cell.getAttribute('numbercolumnsrepeated'))
except:
repeat_count = 1
if not cell.nextSibling:
repeat_count = 1
for i in range(repeat_count):
db_row.append(db_value)
return db
| bsd-2-clause | 4,306,628,085,065,820,700 | 31.75 | 89 | 0.509549 | false |
digitalocean/netbox | netbox/dcim/migrations/0093_device_component_ordering.py | 1 | 5321 | from django.db import migrations
import utilities.fields
import utilities.ordering
def _update_model_names(model):
# Update each unique field value in bulk
for name in model.objects.values_list('name', flat=True).order_by('name').distinct():
model.objects.filter(name=name).update(_name=utilities.ordering.naturalize(name, max_length=100))
def naturalize_consoleports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'ConsolePort'))
def naturalize_consoleserverports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'ConsoleServerPort'))
def naturalize_powerports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'PowerPort'))
def naturalize_poweroutlets(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'PowerOutlet'))
def naturalize_frontports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'FrontPort'))
def naturalize_rearports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'RearPort'))
def naturalize_devicebays(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'DeviceBay'))
class Migration(migrations.Migration):
dependencies = [
('dcim', '0092_fix_rack_outer_unit'),
]
operations = [
migrations.AlterModelOptions(
name='consoleport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='consoleserverport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='devicebay',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='frontport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='inventoryitem',
options={'ordering': ('device__id', 'parent__id', '_name')},
),
migrations.AlterModelOptions(
name='poweroutlet',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='powerport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='rearport',
options={'ordering': ('device', '_name')},
),
migrations.AddField(
model_name='consoleport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='consoleserverport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='devicebay',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='frontport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='inventoryitem',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='poweroutlet',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='powerport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='rearport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.RunPython(
code=naturalize_consoleports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_consoleserverports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_powerports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_poweroutlets,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_frontports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_rearports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_devicebays,
reverse_code=migrations.RunPython.noop
),
]
| apache-2.0 | 7,556,244,809,131,868,000 | 35.197279 | 143 | 0.614922 | false |
Kamalheib/senty | senty/modules/rdma_dev.py | 1 | 1174 | #!/usr/bin/env python
"""
Senty Project
Copyright(c) 2017 Senty.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Kamal Heib <[email protected]>
"""
class RDMADev(object):
def __init__(self, name, id, ports=[]):
self._id = id
self._name = name
self._ports = ports
def get_id(self):
return self._id
def get_name(self):
return self._name
def get_ports(self):
return self._ports
ID = property(get_id)
Name = property(get_name)
Ports = property(get_ports)
| gpl-2.0 | 8,281,828,205,370,899,000 | 25.681818 | 73 | 0.706985 | false |
ev3dev/u-boot | tools/genboardscfg.py | 2 | 14695 | #!/usr/bin/env python2
# SPDX-License-Identifier: GPL-2.0+
#
# Author: Masahiro Yamada <[email protected]>
#
"""
Converter from Kconfig and MAINTAINERS to a board database.
Run 'tools/genboardscfg.py' to create a board database.
Run 'tools/genboardscfg.py -h' for available options.
Python 2.6 or later, but not Python 3.x is necessary to run this script.
"""
import errno
import fnmatch
import glob
import multiprocessing
import optparse
import os
import sys
import tempfile
import time
sys.path.append(os.path.join(os.path.dirname(__file__), 'buildman'))
import kconfiglib
### constant variables ###
OUTPUT_FILE = 'boards.cfg'
CONFIG_DIR = 'configs'
SLEEP_TIME = 0.03
COMMENT_BLOCK = '''#
# List of boards
# Automatically generated by %s: don't edit
#
# Status, Arch, CPU, SoC, Vendor, Board, Target, Options, Maintainers
''' % __file__
### helper functions ###
def try_remove(f):
"""Remove a file ignoring 'No such file or directory' error."""
try:
os.remove(f)
except OSError as exception:
# Ignore 'No such file or directory' error
if exception.errno != errno.ENOENT:
raise
def check_top_directory():
"""Exit if we are not at the top of source directory."""
for f in ('README', 'Licenses'):
if not os.path.exists(f):
sys.exit('Please run at the top of source directory.')
def output_is_new(output):
"""Check if the output file is up to date.
Returns:
True if the given output file exists and is newer than any of
*_defconfig, MAINTAINERS and Kconfig*. False otherwise.
"""
try:
ctime = os.path.getctime(output)
except OSError as exception:
if exception.errno == errno.ENOENT:
# return False on 'No such file or directory' error
return False
else:
raise
for (dirpath, dirnames, filenames) in os.walk(CONFIG_DIR):
for filename in fnmatch.filter(filenames, '*_defconfig'):
if fnmatch.fnmatch(filename, '.*'):
continue
filepath = os.path.join(dirpath, filename)
if ctime < os.path.getctime(filepath):
return False
for (dirpath, dirnames, filenames) in os.walk('.'):
for filename in filenames:
if (fnmatch.fnmatch(filename, '*~') or
not fnmatch.fnmatch(filename, 'Kconfig*') and
not filename == 'MAINTAINERS'):
continue
filepath = os.path.join(dirpath, filename)
if ctime < os.path.getctime(filepath):
return False
# Detect a board that has been removed since the current board database
# was generated
with open(output) as f:
for line in f:
if line[0] == '#' or line == '\n':
continue
defconfig = line.split()[6] + '_defconfig'
if not os.path.exists(os.path.join(CONFIG_DIR, defconfig)):
return False
return True
### classes ###
class KconfigScanner:
"""Kconfig scanner."""
### constant variable only used in this class ###
_SYMBOL_TABLE = {
'arch' : 'SYS_ARCH',
'cpu' : 'SYS_CPU',
'soc' : 'SYS_SOC',
'vendor' : 'SYS_VENDOR',
'board' : 'SYS_BOARD',
'config' : 'SYS_CONFIG_NAME',
'options' : 'SYS_EXTRA_OPTIONS'
}
def __init__(self):
"""Scan all the Kconfig files and create a Config object."""
# Define environment variables referenced from Kconfig
os.environ['srctree'] = os.getcwd()
os.environ['UBOOTVERSION'] = 'dummy'
os.environ['KCONFIG_OBJDIR'] = ''
self._conf = kconfiglib.Config(print_warnings=False)
def __del__(self):
"""Delete a leftover temporary file before exit.
The scan() method of this class creates a temporay file and deletes
it on success. If scan() method throws an exception on the way,
the temporary file might be left over. In that case, it should be
deleted in this destructor.
"""
if hasattr(self, '_tmpfile') and self._tmpfile:
try_remove(self._tmpfile)
def scan(self, defconfig):
"""Load a defconfig file to obtain board parameters.
Arguments:
defconfig: path to the defconfig file to be processed
Returns:
A dictionary of board parameters. It has a form of:
{
'arch': <arch_name>,
'cpu': <cpu_name>,
'soc': <soc_name>,
'vendor': <vendor_name>,
'board': <board_name>,
'target': <target_name>,
'config': <config_header_name>,
'options': <extra_options>
}
"""
# strip special prefixes and save it in a temporary file
fd, self._tmpfile = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
for line in open(defconfig):
colon = line.find(':CONFIG_')
if colon == -1:
f.write(line)
else:
f.write(line[colon + 1:])
warnings = self._conf.load_config(self._tmpfile)
if warnings:
for warning in warnings:
print '%s: %s' % (defconfig, warning)
try_remove(self._tmpfile)
self._tmpfile = None
params = {}
# Get the value of CONFIG_SYS_ARCH, CONFIG_SYS_CPU, ... etc.
# Set '-' if the value is empty.
for key, symbol in self._SYMBOL_TABLE.items():
value = self._conf.get_symbol(symbol).get_value()
if value:
params[key] = value
else:
params[key] = '-'
defconfig = os.path.basename(defconfig)
params['target'], match, rear = defconfig.partition('_defconfig')
assert match and not rear, '%s : invalid defconfig' % defconfig
# fix-up for aarch64
if params['arch'] == 'arm' and params['cpu'] == 'armv8':
params['arch'] = 'aarch64'
# fix-up options field. It should have the form:
# <config name>[:comma separated config options]
if params['options'] != '-':
params['options'] = params['config'] + ':' + \
params['options'].replace(r'\"', '"')
elif params['config'] != params['target']:
params['options'] = params['config']
return params
def scan_defconfigs_for_multiprocess(queue, defconfigs):
"""Scan defconfig files and queue their board parameters
This function is intended to be passed to
multiprocessing.Process() constructor.
Arguments:
queue: An instance of multiprocessing.Queue().
The resulting board parameters are written into it.
defconfigs: A sequence of defconfig files to be scanned.
"""
kconf_scanner = KconfigScanner()
for defconfig in defconfigs:
queue.put(kconf_scanner.scan(defconfig))
def read_queues(queues, params_list):
"""Read the queues and append the data to the paramers list"""
for q in queues:
while not q.empty():
params_list.append(q.get())
def scan_defconfigs(jobs=1):
"""Collect board parameters for all defconfig files.
This function invokes multiple processes for faster processing.
Arguments:
jobs: The number of jobs to run simultaneously
"""
all_defconfigs = []
for (dirpath, dirnames, filenames) in os.walk(CONFIG_DIR):
for filename in fnmatch.filter(filenames, '*_defconfig'):
if fnmatch.fnmatch(filename, '.*'):
continue
all_defconfigs.append(os.path.join(dirpath, filename))
total_boards = len(all_defconfigs)
processes = []
queues = []
for i in range(jobs):
defconfigs = all_defconfigs[total_boards * i / jobs :
total_boards * (i + 1) / jobs]
q = multiprocessing.Queue(maxsize=-1)
p = multiprocessing.Process(target=scan_defconfigs_for_multiprocess,
args=(q, defconfigs))
p.start()
processes.append(p)
queues.append(q)
# The resulting data should be accumulated to this list
params_list = []
# Data in the queues should be retrieved preriodically.
# Otherwise, the queues would become full and subprocesses would get stuck.
while any([p.is_alive() for p in processes]):
read_queues(queues, params_list)
# sleep for a while until the queues are filled
time.sleep(SLEEP_TIME)
# Joining subprocesses just in case
# (All subprocesses should already have been finished)
for p in processes:
p.join()
# retrieve leftover data
read_queues(queues, params_list)
return params_list
class MaintainersDatabase:
"""The database of board status and maintainers."""
def __init__(self):
"""Create an empty database."""
self.database = {}
def get_status(self, target):
"""Return the status of the given board.
The board status is generally either 'Active' or 'Orphan'.
Display a warning message and return '-' if status information
is not found.
Returns:
'Active', 'Orphan' or '-'.
"""
if not target in self.database:
print >> sys.stderr, "WARNING: no status info for '%s'" % target
return '-'
tmp = self.database[target][0]
if tmp.startswith('Maintained'):
return 'Active'
elif tmp.startswith('Supported'):
return 'Active'
elif tmp.startswith('Orphan'):
return 'Orphan'
else:
print >> sys.stderr, ("WARNING: %s: unknown status for '%s'" %
(tmp, target))
return '-'
def get_maintainers(self, target):
"""Return the maintainers of the given board.
Returns:
Maintainers of the board. If the board has two or more maintainers,
they are separated with colons.
"""
if not target in self.database:
print >> sys.stderr, "WARNING: no maintainers for '%s'" % target
return ''
return ':'.join(self.database[target][1])
def parse_file(self, file):
"""Parse a MAINTAINERS file.
Parse a MAINTAINERS file and accumulates board status and
maintainers information.
Arguments:
file: MAINTAINERS file to be parsed
"""
targets = []
maintainers = []
status = '-'
for line in open(file):
# Check also commented maintainers
if line[:3] == '#M:':
line = line[1:]
tag, rest = line[:2], line[2:].strip()
if tag == 'M:':
maintainers.append(rest)
elif tag == 'F:':
# expand wildcard and filter by 'configs/*_defconfig'
for f in glob.glob(rest):
front, match, rear = f.partition('configs/')
if not front and match:
front, match, rear = rear.rpartition('_defconfig')
if match and not rear:
targets.append(front)
elif tag == 'S:':
status = rest
elif line == '\n':
for target in targets:
self.database[target] = (status, maintainers)
targets = []
maintainers = []
status = '-'
if targets:
for target in targets:
self.database[target] = (status, maintainers)
def insert_maintainers_info(params_list):
"""Add Status and Maintainers information to the board parameters list.
Arguments:
params_list: A list of the board parameters
"""
database = MaintainersDatabase()
for (dirpath, dirnames, filenames) in os.walk('.'):
if 'MAINTAINERS' in filenames:
database.parse_file(os.path.join(dirpath, 'MAINTAINERS'))
for i, params in enumerate(params_list):
target = params['target']
params['status'] = database.get_status(target)
params['maintainers'] = database.get_maintainers(target)
params_list[i] = params
def format_and_output(params_list, output):
"""Write board parameters into a file.
Columnate the board parameters, sort lines alphabetically,
and then write them to a file.
Arguments:
params_list: The list of board parameters
output: The path to the output file
"""
FIELDS = ('status', 'arch', 'cpu', 'soc', 'vendor', 'board', 'target',
'options', 'maintainers')
# First, decide the width of each column
max_length = dict([ (f, 0) for f in FIELDS])
for params in params_list:
for f in FIELDS:
max_length[f] = max(max_length[f], len(params[f]))
output_lines = []
for params in params_list:
line = ''
for f in FIELDS:
# insert two spaces between fields like column -t would
line += ' ' + params[f].ljust(max_length[f])
output_lines.append(line.strip())
# ignore case when sorting
output_lines.sort(key=str.lower)
with open(output, 'w') as f:
f.write(COMMENT_BLOCK + '\n'.join(output_lines) + '\n')
def gen_boards_cfg(output, jobs=1, force=False):
"""Generate a board database file.
Arguments:
output: The name of the output file
jobs: The number of jobs to run simultaneously
force: Force to generate the output even if it is new
"""
check_top_directory()
if not force and output_is_new(output):
print "%s is up to date. Nothing to do." % output
sys.exit(0)
params_list = scan_defconfigs(jobs)
insert_maintainers_info(params_list)
format_and_output(params_list, output)
def main():
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError:
cpu_count = 1
parser = optparse.OptionParser()
# Add options here
parser.add_option('-f', '--force', action="store_true", default=False,
help='regenerate the output even if it is new')
parser.add_option('-j', '--jobs', type='int', default=cpu_count,
help='the number of jobs to run simultaneously')
parser.add_option('-o', '--output', default=OUTPUT_FILE,
help='output file [default=%s]' % OUTPUT_FILE)
(options, args) = parser.parse_args()
gen_boards_cfg(options.output, jobs=options.jobs, force=options.force)
if __name__ == '__main__':
main()
| gpl-2.0 | 3,503,022,929,816,490,500 | 31.87472 | 79 | 0.576659 | false |
fabioz/Pydev | plugins/org.python.pydev.core/pysrc/tests_python/test_suspended_frames_manager.py | 2 | 6407 | import sys
from _pydevd_bundle.pydevd_constants import int_types, GENERATED_LEN_ATTR_NAME
from _pydevd_bundle.pydevd_resolver import MAX_ITEMS_TO_HANDLE, TOO_LARGE_ATTR
from _pydevd_bundle import pydevd_frame_utils
def get_frame():
var1 = 1
var2 = [var1]
var3 = {33: [var1]}
return sys._getframe()
def check_vars_dict_expected(as_dict, expected):
assert as_dict == expected
class _DummyPyDB(object):
def __init__(self):
from _pydevd_bundle.pydevd_api import PyDevdAPI
self.variable_presentation = PyDevdAPI.VariablePresentation()
def test_suspended_frames_manager():
from _pydevd_bundle.pydevd_suspended_frames import SuspendedFramesManager
from _pydevd_bundle.pydevd_utils import DAPGrouper
suspended_frames_manager = SuspendedFramesManager()
py_db = _DummyPyDB()
with suspended_frames_manager.track_frames(py_db) as tracker:
# : :type tracker: _FramesTracker
thread_id = 'thread1'
frame = get_frame()
tracker.track(thread_id, pydevd_frame_utils.create_frames_list_from_frame(frame))
assert suspended_frames_manager.get_thread_id_for_variable_reference(id(frame)) == thread_id
variable = suspended_frames_manager.get_variable(id(frame))
# Should be properly sorted.
assert ['var1', 'var2', 'var3'] == [x.get_name()for x in variable.get_children_variables()]
as_dict = dict((x.get_name(), x.get_var_data()) for x in variable.get_children_variables())
var_reference = as_dict['var2'].pop('variablesReference')
assert isinstance(var_reference, int_types) # The variable reference is always a new int.
assert isinstance(as_dict['var3'].pop('variablesReference'), int_types) # The variable reference is always a new int.
check_vars_dict_expected(as_dict, {
'var1': {'name': 'var1', 'value': '1', 'type': 'int', 'evaluateName': 'var1', 'variablesReference': 0},
'var2': {'name': 'var2', 'value': '[1]', 'type': 'list', 'evaluateName': 'var2'},
'var3': {'name': 'var3', 'value': '{33: [1]}', 'type': 'dict', 'evaluateName': 'var3'}
})
# Now, same thing with a different format.
as_dict = dict((x.get_name(), x.get_var_data(fmt={'hex': True})) for x in variable.get_children_variables())
var_reference = as_dict['var2'].pop('variablesReference')
assert isinstance(var_reference, int_types) # The variable reference is always a new int.
assert isinstance(as_dict['var3'].pop('variablesReference'), int_types) # The variable reference is always a new int.
check_vars_dict_expected(as_dict, {
'var1': {'name': 'var1', 'value': '0x1', 'type': 'int', 'evaluateName': 'var1', 'variablesReference': 0},
'var2': {'name': 'var2', 'value': '[0x1]', 'type': 'list', 'evaluateName': 'var2'},
'var3': {'name': 'var3', 'value': '{0x21: [0x1]}', 'type': 'dict', 'evaluateName': 'var3'}
})
var2 = dict((x.get_name(), x) for x in variable.get_children_variables())['var2']
children_vars = var2.get_children_variables()
as_dict = (dict([x.get_name(), x.get_var_data()] for x in children_vars if x.get_name() not in DAPGrouper.SCOPES_SORTED))
assert as_dict == {
'0': {'name': '0', 'value': '1', 'type': 'int', 'evaluateName': 'var2[0]', 'variablesReference': 0 },
GENERATED_LEN_ATTR_NAME: {'name': GENERATED_LEN_ATTR_NAME, 'value': '1', 'type': 'int', 'evaluateName': 'len(var2)', 'variablesReference': 0, 'presentationHint': {'attributes': ['readOnly']}, },
}
var3 = dict((x.get_name(), x) for x in variable.get_children_variables())['var3']
children_vars = var3.get_children_variables()
as_dict = (dict([x.get_name(), x.get_var_data()] for x in children_vars if x.get_name() not in DAPGrouper.SCOPES_SORTED))
assert isinstance(as_dict['33'].pop('variablesReference'), int_types) # The variable reference is always a new int.
check_vars_dict_expected(as_dict, {
'33': {'name': '33', 'value': "[1]", 'type': 'list', 'evaluateName': 'var3[33]'},
GENERATED_LEN_ATTR_NAME: {'name': GENERATED_LEN_ATTR_NAME, 'value': '1', 'type': 'int', 'evaluateName': 'len(var3)', 'variablesReference': 0, 'presentationHint': {'attributes': ['readOnly']}, }
})
_NUMBER_OF_ITEMS_TO_CREATE = MAX_ITEMS_TO_HANDLE + 300
def get_dict_large_frame():
obj = {}
for idx in range(_NUMBER_OF_ITEMS_TO_CREATE):
obj[idx] = (1)
return sys._getframe()
def get_set_large_frame():
obj = set()
for idx in range(_NUMBER_OF_ITEMS_TO_CREATE):
obj.add(idx)
return sys._getframe()
def get_tuple_large_frame():
obj = tuple(range(_NUMBER_OF_ITEMS_TO_CREATE))
return sys._getframe()
def test_get_child_variables():
from _pydevd_bundle.pydevd_suspended_frames import SuspendedFramesManager
suspended_frames_manager = SuspendedFramesManager()
py_db = _DummyPyDB()
for frame in (
get_dict_large_frame(),
get_set_large_frame(),
get_tuple_large_frame(),
):
with suspended_frames_manager.track_frames(py_db) as tracker:
# : :type tracker: _FramesTracker
thread_id = 'thread1'
tracker.track(thread_id, pydevd_frame_utils.create_frames_list_from_frame(frame))
assert suspended_frames_manager.get_thread_id_for_variable_reference(id(frame)) == thread_id
variable = suspended_frames_manager.get_variable(id(frame))
children_variables = variable.get_child_variable_named('obj').get_children_variables()
assert len(children_variables) < _NUMBER_OF_ITEMS_TO_CREATE
found_too_large = False
found_len = False
for x in children_variables:
if x.name == TOO_LARGE_ATTR:
var_data = x.get_var_data()
assert 'readOnly' in var_data['presentationHint']['attributes']
found_too_large = True
elif x.name == GENERATED_LEN_ATTR_NAME:
found_len = True
if not found_too_large:
raise AssertionError('Expected to find variable named: %s' % (TOO_LARGE_ATTR,))
if not found_len:
raise AssertionError('Expected to find variable named: len()')
| epl-1.0 | -1,478,246,996,531,170,300 | 44.119718 | 206 | 0.616513 | false |
CuriouslyCurious/shatterednippon | scripts/province_generator.py | 1 | 2478 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PIL import Image
import os
import codecs
if ("scripts" in os.getcwd()):
cwd = os.getcwd().replace("\\scripts", "")
else:
cwd = os.getcwd()
print(cwd)
prov_dir = cwd+"\\shatterednippon\\history\\provinces\\"
local_dir = cwd+"\\shatterednippon\\localisation\\"
map_dir = cwd+"\\shatterednippon\\map\\"
local_file = open(local_dir + "prov_names_l_english.yml", "w")
local_file.write("l_english:\n")
definitions = open(map_dir + "definition.csv", "w")
definitions.write("province;red;green;blue;x;x\n")
im = Image.open(map_dir+"provinces.bmp")
colors = []
image_size = im.size
pixel = im.load()
land_tiles = 0
sea_tiles = 0
"""
for y in range (image_size[1]):
for x in range (image_size[0]):
pixel_color = pixel[x, y]
if pixel_color not in colors and not (pixel_color == (255,255,255) or pixel_color == (0,0,0)): # excluding pure white and black
colors.append(pixel)
if pixel_color[2] > 0:
sea_tiles += 1
else:
land_tiles += 1
"""
colors = im.getcolors(maxcolors=10000) # Arbitrary maxcolors number
for color in colors:
color = color[1]
if color[2] > 0:
sea_tiles += 1
else:
land_tiles += 1
print("land: ", land_tiles)
print("sea: ", sea_tiles)
provinces = len(colors)
x = 0
for color in colors:
color = color[1]
if color[2] == 0:
out = "%d - PROV%d" % (x+1, x+1)
"""
letter = (x%26) + 65
out = "%d - " % (x+1)
if x > 25:
out += chr((x//26) + 64)
out += chr(letter)
"""
if (x < land_tiles):
f = open(prov_dir + out + ".txt", "w")
f.write (
"""# {0}
owner = JAP
add_core = JAP
controller = JAP
is_city = yes
hre = no
religion = shinto
culture = japanese
base_tax = 2
base_production = 2
base_manpower = 2
trade_goods = silk
capital = "{1}"
discovered_by = chinese""".format(out, out.split(" - ")[1]))
local_file.write(' PROV{0}:0 "{1}"\n'.format(x+1, out.split(" - ")[1]))
definitions.write("{0};{1};{2};{3};;{4}\n".format(x+1, color[0], color[1], color[2], out.split(" - ")[1]))
#definitions.write("{0};{1};{2};{3};;{4}\n".format(x+1, color[1][0], color[1][1], color[1][2], out.split(" - ")[1]))
f.close()
x += 1
local_file.close()
definitions.close()
print (str(x) + " provinces defined.")
| mit | -6,176,161,133,252,635,000 | 22.6 | 135 | 0.541566 | false |
youfoh/webkit-efl | Tools/Scripts/webkitpy/tool/commands/rebaseline.py | 1 | 20517 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import optparse
import os.path
import re
import shutil
import sys
import urllib
import webkitpy.common.config.urls as config_urls
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.net.buildbot import BuildBot
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.user import User
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.models.test_expectations import TestExpectations, suffixes_for_expectations, BASELINE_SUFFIX_LIST
from webkitpy.layout_tests.port import builders
from webkitpy.tool.grammar import pluralize
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
_log = logging.getLogger(__name__)
# FIXME: Should TestResultWriter know how to compute this string?
def _baseline_name(fs, test_name, suffix):
return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
def __init__(self, options=None):
options = options or []
options.extend([
optparse.make_option('--suffixes', default=','.join(BASELINE_SUFFIX_LIST), action='store',
help='file types to rebaseline')])
AbstractDeclarativeCommand.__init__(self, options=options)
self._baseline_suffix_list = BASELINE_SUFFIX_LIST
class RebaselineTest(AbstractRebaseliningCommand):
name = "rebaseline-test-internal"
help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
def __init__(self):
options = [
optparse.make_option("--builder", help="Builder to pull new baselines from"),
optparse.make_option("--platform-to-move-to", help="Platform to move existing baselines to before rebaselining. This is for dealing with bringing up new ports that interact with non-tree portions of the fallback graph."),
optparse.make_option("--test", help="Test to rebaseline"),
]
AbstractRebaseliningCommand.__init__(self, options=options)
self._scm_changes = {'add': []}
def _results_url(self, builder_name):
return self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name).latest_layout_test_results_url()
def _baseline_directory(self, builder_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
override_dir = builders.rebaseline_override_dir(builder_name)
if override_dir:
return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
return port.baseline_version_dir()
def _copy_existing_baseline(self, platforms_to_move_existing_baselines_to, test_name, suffix):
old_baselines = []
new_baselines = []
# Need to gather all the baseline paths before modifying the filesystem since
# the modifications can affect the results of port.expected_filename.
for platform in platforms_to_move_existing_baselines_to:
port = self._tool.port_factory.get(platform)
old_baseline = port.expected_filename(test_name, "." + suffix)
if not self._tool.filesystem.exists(old_baseline):
_log.info("No existing baseline for %s." % test_name)
continue
new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
if self._tool.filesystem.exists(new_baseline):
_log.info("Existing baseline at %s, not copying over it." % new_baseline)
continue
old_baselines.append(old_baseline)
new_baselines.append(new_baseline)
for i in range(len(old_baselines)):
old_baseline = old_baselines[i]
new_baseline = new_baselines[i]
_log.info("Copying baseline from %s to %s." % (old_baseline, new_baseline))
self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
self._tool.filesystem.copyfile(old_baseline, new_baseline)
if not self._tool.scm().exists(new_baseline):
self._add_to_scm(new_baseline)
def _save_baseline(self, data, target_baseline):
if not data:
return
filesystem = self._tool.filesystem
filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
filesystem.write_binary_file(target_baseline, data)
if not self._tool.scm().exists(target_baseline):
self._add_to_scm(target_baseline)
def _add_to_scm(self, path):
self._scm_changes['add'].append(path)
def _update_expectations_file(self, builder_name, test_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
expectations = TestExpectations(port, include_overrides=False)
for test_configuration in port.all_test_configurations():
if test_configuration.version == port.test_configuration().version:
expectationsString = expectations.remove_configuration_from_test(test_name, test_configuration)
self._tool.filesystem.write_text_file(port.path_to_test_expectations_file(), expectationsString)
def _test_root(self, test_name):
return os.path.splitext(test_name)[0]
def _file_name_for_actual_result(self, test_name, suffix):
return "%s-actual.%s" % (self._test_root(test_name), suffix)
def _file_name_for_expected_result(self, test_name, suffix):
return "%s-expected.%s" % (self._test_root(test_name), suffix)
def _rebaseline_test(self, builder_name, test_name, platforms_to_move_existing_baselines_to, suffix):
results_url = self._results_url(builder_name)
baseline_directory = self._baseline_directory(builder_name)
source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
if platforms_to_move_existing_baselines_to:
self._copy_existing_baseline(platforms_to_move_existing_baselines_to, test_name, suffix)
_log.info("Retrieving %s." % source_baseline)
self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline)
def _rebaseline_test_and_update_expectations(self, builder_name, test_name, platforms_to_move_existing_baselines_to):
for suffix in self._baseline_suffix_list:
self._rebaseline_test(builder_name, test_name, platforms_to_move_existing_baselines_to, suffix)
self._update_expectations_file(builder_name, test_name)
def execute(self, options, args, tool):
self._baseline_suffix_list = options.suffixes.split(',')
self._rebaseline_test_and_update_expectations(options.builder, options.test, options.platform_to_move_to)
print json.dumps(self._scm_changes)
class OptimizeBaselines(AbstractRebaseliningCommand):
name = "optimize-baselines"
help_text = "Reshuffles the baselines for the given tests to use as litte space on disk as possible."
argument_names = "TEST_NAMES"
def _optimize_baseline(self, test_name):
for suffix in self._baseline_suffix_list:
baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
if not self._baseline_optimizer.optimize(baseline_name):
print "Hueristics failed to optimize %s" % baseline_name
def execute(self, options, args, tool):
self._baseline_suffix_list = options.suffixes.split(',')
self._baseline_optimizer = BaselineOptimizer(tool)
self._port = tool.port_factory.get("chromium-win-win7") # FIXME: This should be selectable.
for test_name in self._port.tests(args):
print "Optimizing %s." % test_name
self._optimize_baseline(test_name)
class AnalyzeBaselines(AbstractRebaseliningCommand):
name = "analyze-baselines"
help_text = "Analyzes the baselines for the given tests and prints results that are identical."
argument_names = "TEST_NAMES"
def _print(self, baseline_name, directories_by_result):
for result, directories in directories_by_result.items():
if len(directories) <= 1:
continue
results_names = [self._tool.filesystem.join(directory, baseline_name) for directory in directories]
print ' '.join(results_names)
def _analyze_baseline(self, test_name):
for suffix in self._baseline_suffix_list:
baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
directories_by_result = self._baseline_optimizer.directories_by_result(baseline_name)
self._print(baseline_name, directories_by_result)
def execute(self, options, args, tool):
self._baseline_suffix_list = options.suffixes.split(',')
self._baseline_optimizer = BaselineOptimizer(tool)
self._port = tool.port_factory.get("chromium-win-win7") # FIXME: This should be selectable.
for test_name in self._port.tests(args):
self._analyze_baseline(test_name)
class AbstractParallelRebaselineCommand(AbstractDeclarativeCommand):
def __init__(self, options=None):
options = options or []
options.extend([
optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
help=('Do not optimize/de-dup the expectations after rebaselining '
'(default is to de-dup automatically). '
'You can use "webkit-patch optimize-baselines" to optimize separately.'))])
AbstractDeclarativeCommand.__init__(self, options=options)
def _run_webkit_patch(self, args):
try:
self._tool.executive.run_command([self._tool.path()] + args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
_log.error(e)
def _builders_to_fetch_from(self, builders):
# This routine returns the subset of builders that will cover all of the baseline search paths
# used in the input list. In particular, if the input list contains both Release and Debug
# versions of a configuration, we *only* return the Release version (since we don't save
# debug versions of baselines).
release_builders = set()
debug_builders = set()
builders_to_fallback_paths = {}
for builder in builders:
port = self._tool.port_factory.get_from_builder_name(builder)
if port.test_configuration().build_type == 'Release':
release_builders.add(builder)
else:
debug_builders.add(builder)
for builder in list(release_builders) + list(debug_builders):
port = self._tool.port_factory.get_from_builder_name(builder)
fallback_path = port.baseline_search_path()
if fallback_path not in builders_to_fallback_paths.values():
builders_to_fallback_paths[builder] = fallback_path
return builders_to_fallback_paths.keys()
def _rebaseline_commands(self, test_list):
path_to_webkit_patch = self._tool.path()
cwd = self._tool.scm().checkout_root
commands = []
for test in test_list:
for builder in self._builders_to_fetch_from(test_list[test]):
suffixes = ','.join(test_list[test][builder])
cmd_line = [path_to_webkit_patch, 'rebaseline-test-internal', '--suffixes', suffixes, '--builder', builder, '--test', test]
commands.append(tuple([cmd_line, cwd]))
return commands
def _files_to_add(self, command_results):
files_to_add = set()
for output in [result[1].split('\n') for result in command_results]:
file_added = False
for line in output:
try:
files_to_add.update(json.loads(line)['add'])
file_added = True
except ValueError, e:
_log.debug('"%s" is not a JSON object, ignoring' % line)
if not file_added:
_log.debug('Could not add file based off output "%s"' % output)
return list(files_to_add)
def _optimize_baselines(self, test_list):
# We don't run this in parallel because modifying the SCM in parallel is unreliable.
for test in test_list:
all_suffixes = set()
for builder in self._builders_to_fetch_from(test_list[test]):
all_suffixes.update(test_list[test][builder])
self._run_webkit_patch(['optimize-baselines', '--suffixes', ','.join(all_suffixes), test])
def _rebaseline(self, options, test_list):
commands = self._rebaseline_commands(test_list)
command_results = self._tool.executive.run_in_parallel(commands)
files_to_add = self._files_to_add(command_results)
self._tool.scm().add_list(list(files_to_add))
if options.optimize:
self._optimize_baselines(test_list)
class RebaselineJson(AbstractParallelRebaselineCommand):
name = "rebaseline-json"
help_text = "Rebaseline based off JSON passed to stdin. Intended to only be called from other scripts."
def execute(self, options, args, tool):
self._rebaseline(options, json.loads(sys.stdin.read()))
class RebaselineExpectations(AbstractParallelRebaselineCommand):
name = "rebaseline-expectations"
help_text = "Rebaselines the tests indicated in TestExpectations."
def _update_expectations_file(self, port_name):
port = self._tool.port_factory.get(port_name)
# FIXME: This will intentionally skip over any REBASELINE expectations that were in an overrides file.
# This is not good, but avoids having the overrides getting written into the main file.
# See https://bugs.webkit.org/show_bug.cgi?id=88456 for context. This will no longer be needed
# once we properly support cascading expectations files.
expectations = TestExpectations(port, include_overrides=False)
path = port.path_to_test_expectations_file()
self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures()))
def _tests_to_rebaseline(self, port):
tests_to_rebaseline = {}
expectations = TestExpectations(port, include_overrides=True)
for test in expectations.get_rebaselining_failures():
tests_to_rebaseline[test] = suffixes_for_expectations(expectations.get_expectations(test))
return tests_to_rebaseline
def _add_tests_to_rebaseline_for_port(self, port_name):
builder_name = builders.builder_name_for_port_name(port_name)
if not builder_name:
return
tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name)).items()
if tests:
_log.info("Retrieving results for %s from %s." % (port_name, builder_name))
for test_name, suffixes in tests:
_log.info(" %s (%s)" % (test_name, ','.join(suffixes)))
if test_name not in self._test_list:
self._test_list[test_name] = {}
self._test_list[test_name][builder_name] = suffixes
def execute(self, options, args, tool):
self._test_list = {}
for port_name in tool.port_factory.all_port_names():
self._add_tests_to_rebaseline_for_port(port_name)
self._rebaseline(options, self._test_list)
for port_name in tool.port_factory.all_port_names():
self._update_expectations_file(port_name)
class Rebaseline(AbstractParallelRebaselineCommand):
name = "rebaseline"
help_text = "Rebaseline tests with results from the build bots. Shows the list of failing tests on the builders if no test names are provided."
argument_names = "[TEST_NAMES]"
def __init__(self):
options = [
optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
optparse.make_option("--suffixes", default=BASELINE_SUFFIX_LIST, action="append", help="Comma-separated-list of file types to rebaseline (can also be provided multiple times)"),
]
AbstractParallelRebaselineCommand.__init__(self, options=options)
def _builders_to_pull_from(self):
chromium_buildbot_builder_names = []
webkit_buildbot_builder_names = []
for name in builders.all_builder_names():
if self._tool.port_factory.get_from_builder_name(name).is_chromium():
chromium_buildbot_builder_names.append(name)
else:
webkit_buildbot_builder_names.append(name)
titles = ["build.webkit.org bots", "build.chromium.org bots"]
lists = [webkit_buildbot_builder_names, chromium_buildbot_builder_names]
chosen_names = self._tool.user.prompt_with_multiple_lists("Which builder to pull results from:", titles, lists, can_choose_multiple=True)
return [self._builder_with_name(name) for name in chosen_names]
def _builder_with_name(self, name):
return self._tool.buildbot_for_builder_name(name).builder_with_name(name)
def _tests_to_update(self, builder):
failing_tests = builder.latest_layout_test_results().tests_matching_failure_types([test_failures.FailureTextMismatch])
return self._tool.user.prompt_with_list("Which test(s) to rebaseline for %s:" % builder.name(), failing_tests, can_choose_multiple=True)
def _suffixes_to_update(self, options):
suffixes = set()
for suffix_list in options.suffixes:
suffixes |= set(suffix_list.split(","))
return list(suffixes)
def execute(self, options, args, tool):
if options.builders:
builders = []
for builder_names in options.builders:
builders += [self._builder_with_name(name) for name in builder_names.split(",")]
else:
builders = self._builders_to_pull_from()
test_list = {}
for builder in builders:
tests = args or self._tests_to_update(builder)
for test in tests:
if test not in test_list:
test_list[test] = {}
test_list[test][builder.name()] = self._suffixes_to_update(options)
if options.verbose:
print "rebaseline-json: " + str(test_list)
self._rebaseline(options, test_list)
| lgpl-2.1 | -7,490,709,868,916,791,000 | 47.161972 | 233 | 0.670371 | false |
Netflix/repokid | repokid/exceptions.py | 1 | 1253 | # Copyright 2020 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RoleError(Exception):
pass
class UnexpectedDynamoUpdateValue(Exception):
pass
class BlocklistError(Exception):
pass
class AardvarkError(Exception):
pass
class RoleNotFoundError(RoleError):
pass
class MissingRepoableServices(RoleError):
pass
class RoleStoreError(RoleError):
pass
class IAMError(Exception):
pass
class ModelError(AttributeError):
pass
class DynamoDBError(Exception):
pass
class IntegrityError(Exception):
pass
class NotFoundError(Exception):
pass
class DynamoDBMaxItemSizeError(Exception):
pass
class IAMActionError(Exception):
pass
| apache-2.0 | -2,718,314,803,365,845,000 | 17.701493 | 78 | 0.725459 | false |
lizardsystem/lizard-waterbalance | lizard_waterbalance/management/commands/compute_export_tests.py | 1 | 3294 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Implements tests for the command to compute and export a waterbalance."""
# This package implements the management commands for lizard-waterbalance Django
# app.
#
# Copyright (C) 2011 Nelen & Schuurmans
#
# This package is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this package. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
import unittest
from mock import Mock
from lizard_waterbalance.models import WaterbalanceArea
from lizard_waterbalance.models import WaterbalanceConf
from lizard_waterbalance.models import WaterbalanceScenario
from lizard_waterbalance.management.commands.compute_export import Command
from timeseries.timeseriesstub import TimeseriesStub
class CommandTests(unittest.TestCase):
def notest_a(self):
"""Test handle creates a waterbalance computer for the right config."""
area = WaterbalanceArea()
area.name = "Aetsveldse polder Oost"
area.save()
scenario = WaterbalanceScenario()
scenario.name = "import"
scenario.save()
configuration = WaterbalanceConf()
configuration.waterbalance_area = area
configuration.waterbalance_scenario = scenario
configuration.calculation_start_date = datetime(2011, 10, 26)
configuration.calculation_end_date = datetime(2011, 10, 26)
configuration.save()
command = Command()
command.create_computer = Mock()
command.handle("aetsveldse-polder-oost", "import")
configuration = command.create_computer.call_args[0][0]
self.assertEqual(configuration.waterbalance_area.slug, "aetsveldse-polder-oost")
self.assertEqual(configuration.waterbalance_scenario.slug, "import")
def notest_b(self):
"""Test handle passes the correct dates to the waterbalance computer."""
area = WaterbalanceArea()
area.name = "Aetsveldse polder Oost"
area.save()
scenario = WaterbalanceScenario()
scenario.name = "import"
scenario.save()
configuration = WaterbalanceConf()
configuration.waterbalance_area = area
configuration.waterbalance_scenario = scenario
configuration.calculation_start_date = datetime(2011, 10, 26)
configuration.calculation_end_date = datetime(2011, 10, 26)
configuration.save()
computer = Mock()
computer.compute = Mock()
command = Command()
command.create_computer = Mock(return_value=computer)
command.handle("aetsveldse-polder-oost", "import")
self.assertEqual(computer.compute.call_countnfiguration.waterbalance_area.slug, "aetsveldse-polder-oost")
self.assertEqual(configuration.waterbalance_scenario.slug, "import")
| gpl-3.0 | 8,607,615,110,271,120,000 | 37.752941 | 113 | 0.713418 | false |
google-research/google-research | smurf/data/generic_flow_dataset.py | 1 | 4197 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader for generic flow datasets."""
import os
import numpy as np
import tensorflow as tf
from smurf.data import data_utils
# pylint:disable=unused-import
from smurf.data.data_utils import evaluate
from smurf.data.data_utils import list_eval_keys
def make_dataset(path,
mode,
seq_len=2,
shuffle_buffer_size=0,
height=None,
width=None,
resize_gt_flow=True,
gt_flow_shape=None,
seed=41):
"""Make a dataset for training or evaluating SMURF.
Args:
path: string, in the format of 'some/path/dir1,dir2,dir3' to load all files
in some/path/dir1, some/path/dir2, and some/path/dir3.
mode: string, one of ['train', 'eval', 'test'] to switch between loading
training data, evaluation data, and test data, which right now all return
the same data.
seq_len: int length of sequence to return. Currently only 2 is supported.
shuffle_buffer_size: int, size of the shuffle buffer; no shuffling if 0.
height: int, height for reshaping the images (only if mode==train)
width: int, width for reshaping the images (only if mode==train)
resize_gt_flow: bool, indicates if ground truth flow should be resized
during traing or not (only relevant for supervised training)
gt_flow_shape: list, if not None sets a fixed size for ground truth flow
tensor, e.g. [384,512,2]
seed: int, controls the shuffling of the data shards.
Returns:
A tf.dataset of image sequences and ground truth flow for training
(see parse functions above). The dataset still requires batching
and prefetching before using it to make an iterator.
"""
if ',' in path:
paths = []
l = path.split(',')
paths.append(l[0])
for subpath in l[1:]:
subpath_length = len(subpath.split('/'))
basedir = '/'.join(l[0].split('/')[:-subpath_length])
paths.append(os.path.join(basedir, subpath))
else:
paths = [path]
# Generate list of filenames.
if seq_len != 2:
raise ValueError('for_eval only compatible with seq_len == 2.')
# Generate list of filenames.
# pylint:disable=g-complex-comprehension
files = [
os.path.join(d, f)
for d in paths
for f in tf.io.gfile.listdir(d)
]
if 'train' in mode:
rgen = np.random.RandomState(seed=seed)
rgen.shuffle(files)
num_files = len(files)
ds = tf.data.Dataset.from_tensor_slices(files)
if shuffle_buffer_size:
ds = ds.shuffle(num_files)
# Create a nested dataset.
ds = ds.map(tf.data.TFRecordDataset)
# Parse each element of the subsequences and unbatch the result
# Do interleave rather than flat_map because it is much faster.
include_flow = 'eval' in mode or 'sup' in mode
# pylint:disable=g-long-lambda
ds = ds.interleave(
lambda x: x.map(
lambda y: data_utils.parse_data(
y, include_flow=include_flow, height=height, width=width,
resize_gt_flow=resize_gt_flow, gt_flow_shape=gt_flow_shape),
num_parallel_calls=tf.data.experimental.AUTOTUNE),
cycle_length=min(1 if 'movie' in mode else 10, num_files),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if shuffle_buffer_size:
# Shuffle image pairs.
ds = ds.shuffle(buffer_size=shuffle_buffer_size)
# Put repeat after shuffle for better mixing.
if 'train' in mode:
ds = ds.repeat()
# Prefetch a number of batches because reading new ones can take much longer
# when they are from new files.
ds = ds.prefetch(10)
return ds
| apache-2.0 | -4,638,955,777,177,349,000 | 35.495652 | 79 | 0.674529 | false |
henrikalmer/genre-recognition | plot_mix_responses.py | 1 | 1252 | import sys
import json
import pylab as pl
import numpy as np
from data import load_responses
songs, responses = load_responses()
# group responses on genre
fo = open('data/mix.genres.json', 'r')
genres = json.loads(fo.read())
cmap = {'pop': 0, 'rock': 1, 'reggae': 2, 'jazz': 3, 'classical': 4}
genre_responses = dict((genres[k], responses[k]) for k in songs)
fo.close()
# Print table of mean values
mean_vectors = dict([(s, np.mean(r, axis=0)) for s, r in genre_responses.items()])
std_dev = dict([(s, np.std(r, axis=0)) for s, r in genre_responses.items()])
print "Mean values:"
for s, v in mean_vectors.items():
print "%s\t%s" % (s, '\t'.join(str(val) for val in v))
print "Standard deviation:"
for s, v in std_dev.items():
print "%s\t%s" % (s, '\t'.join(str(val) for val in v))
means = [v[cmap[g]] for g, v in mean_vectors.items()]
std = [v[cmap[g]] for g, v in std_dev.items()]
# Plot mean values with error bars
fig1 = pl.figure(1)
plot1 = fig1.add_subplot(111)
plot1.bar(range(0,5), means, 0.2, color='r', yerr=std)
plot1.ylabel = ('Procent %')
labels = ('Pop', 'Rock', 'Reggae', 'Jazz', 'Klassiskt')
pl.xticks(range(0,5), labels, rotation=20)
pl.ylim([0,100])
pl.xlim([-.5,5])
pl.title('Genreskattningar')
# Show plots
pl.show()
| gpl-2.0 | -2,135,098,435,710,583,600 | 28.809524 | 82 | 0.648562 | false |
MasterAlish/kyrgyz_tili | kg/tests/test_kyimyl_atooch2.py | 1 | 1609 | # coding=utf-8
from kg.lang.etish.kyimyl_atooch import KyimylAtooch2
from kg.lang.lang import KyrgyzWord
from kg.tests import KGTestCase
class KyimylAtooch2Test(KGTestCase):
def test_1(self):
data = self.get_data()
for word, expected_form in data.items():
kyimyl_atooch2 = KyimylAtooch2(KyrgyzWord(word))
self.assertEqual(kyimyl_atooch2.make(), expected_form)
def get_data(self):
return {
u'чий': u'чийиш',
u'теп': u'тебиш',
u'чой': u'чоюш',
u'ал': u'алыш',
u'буруй': u'буруюш',
u'токто': u'токтош',
u'каз': u'казыш',
u'кий': u'кийиш',
u'куй': u'куюш',
u'бойтой': u'бойтоюш',
u'кеңей': u'кеңейиш',
u'сай': u'сайыш',
u'бар': u'барыш',
u'акшый': u'акшыйыш',
u'тап': u'табыш',
u'алай': u'алайыш',
u'кара': u'караш',
u'былчый': u'былчыйыш',
u'ой': u'оюш',
u'жаса': u'жасаш',
u'күй': u'күйүш',
u'ташы': u'ташыш',
u'кыл': u'кылыш',
u'төмпөй': u'төмпөйүш',
u'ич': u'ичиш',
u'ук': u'угуш',
u'сүйө': u'сүйөш',
u'боё': u'боёш',
u'тая': u'таяш',
}
| gpl-3.0 | 123,703,795,323,995,760 | 28.413043 | 66 | 0.458241 | false |
kumpelblase2/rn | Aufgabe2/server/server.py | 1 | 3352 | import socket
import sys
import argparse
from thread import allocate_lock
import time
import select
import requesthandler
import ConfigParser
class Server(object):
def __init__(self, max_clients, port, accounts):
self.max_clients = max_clients
self.port = port
self.accounts = accounts
self.connection = None
self.clients = []
self.shutdown_granted = False
self.lock = allocate_lock()
self.loggedInUsers = {}
def start(self):
self.connection = socket.socket()
try:
self.connection.bind(('0.0.0.0', self.port))
self.connection.listen(1)
except socket.error as error:
print error
self.connection.close()
return error
return None
def on_disconnect(self, client):
self.clients.remove(client)
if self.shutdown_granted and len(self.clients) == 0:
self.close()
def on_client_accept(self, connection):
handler = requesthandler.RequestHandler(connection, self)
handler.start()
self.clients.append(handler)
def wait_clients(self):
while not self.shutdown_granted:
if len(self.clients) < self.max_clients:
print "Waiting"
readable_list = [self.connection]
readable, writeable, errorlist = select.select(readable_list, [], [])
for s in readable:
if s is self.connection:
try:
client_connection, address = self.connection.accept()
print("Accepted client from ", address)
self.on_client_accept(client_connection)
except socket.error as accept_error:
print("Error while accepting client: ", accept_error)
else:
time.sleep(0.1)
def close(self):
try:
self.connection.shutdown(socket.SHUT_RDWR)
self.connection.close()
except:
pass
def try_login(self, username, password):
self.lock.acquire()
state = True
if self.loggedInUsers.__contains__(username) and self.loggedInUsers[username]:
state = False
else:
account = self.get_account(username)
if account:
state = account['password'] == password
else:
state = False
self.lock.release()
return state
def get_account(self, username):
for accountDef in self.accounts:
account = self.accounts[accountDef]
if account['name'] == username:
return account
return None
def set_logged_in(self, username, state):
self.lock.acquire()
self.loggedInUsers[username] = state
self.lock.release()
def load_config():
config = ConfigParser.ConfigParser()
config.read("../config.ini")
servers = {}
for section in config.sections():
values = {}
values['name'] = section
for option in config.options(section):
try:
values[option] = config.get(section, option)
except:
print("exception when parsing", option)
values[option] = None
servers[section] = values
return servers
def configure():
parser = argparse.ArgumentParser(description="RN Server")
parser.add_argument("--max-clients", '-n', type=int, help="Number of max clients", default=3)
parser.add_argument("--port", "-p", type=int, help="Port", default=1337)
result = parser.parse_args(sys.argv[1:])
server = Server(max_clients=result.max_clients, port=result.port, accounts=load_config())
result = server.start()
return server, result
if __name__ == "__main__":
server, error = configure()
if error:
print("There was an error setting up the server")
else:
print "Awaiting connection"
server.wait_clients()
server.close()
| mit | -5,503,661,005,984,894,000 | 24.203008 | 94 | 0.695406 | false |
blueskycoco/rtt | bsp/stm32f0x/rtconfig.py | 1 | 3479 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m0'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
CROSS_TOOL = 'gcc'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'C:/Program Files/CodeSourcery/Sourcery_CodeBench_Lite_for_ARM_EABI/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
STM32_TYPE = 'STM32F0XX'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m0 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-stm32.map,-cref,-u,Reset_Handler -T stm32_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-stm32.map --scatter stm32_rom.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' -D USE_STDPERIPH_DRIVER' + ' -D STM32F10X_HD'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M0'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M0'
AFLAGS += ' --fpu None'
LFLAGS = ' --config stm32f0xx_flash.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = IAR_PATH + '/arm/bin/'
POST_ACTION = ''
| gpl-2.0 | -7,060,390,628,054,017,000 | 26.393701 | 130 | 0.543834 | false |
GregorCH/ipet | test/DiffAndEqualTest.py | 1 | 1841 | '''
Created on 05.02.2018
@author: gregor
'''
import unittest
import pandas as pd
import numpy as np
from ipet.evaluation import IPETFilter
Var = "Var"
Val = "Val"
one_equal = "one_equal"
one_diff = "one_diff"
all_equal = "all_equal"
all_diff = "all_diff"
class DiffAndEqualTest(unittest.TestCase):
"""Test list operators 'diff' and 'equal' for IPETFilter class
"""
def testFilter(self):
#
# construct a test data frame with no index and repeating group index column 'Var'
#
testdf = pd.DataFrame(
[
["A", 1.0, False, True, False, True],
["A", 2.0, False, True, False, True],
["A", 3.0, False, True, False, True],
["B", 1.0, True, True, False, False],
["B", 1.0, True, True, False, False],
["B", 2.0, True, True, False, False],
["C", 1.0, True, False, True, False],
["C", 1.0, True, False, True, False],
["C", 1.0, True, False, True, False],
],
columns = [Var, Val, one_equal, one_diff, all_equal, all_diff]
)
#
# test all combinations of anytestrun and operator if the filtered result matches
# the corresponding column
#
for combi in (one_diff, one_equal, all_diff, all_equal):
any, op = combi.split("_")
f = IPETFilter(operator = op, anytestrun = any, datakey = "Val")
f_result = f.applyListOperator(testdf, ["Var"])
self.assertTrue(np.all(f_result["Val"] == testdf[combi]),
"Wrong list operator result for combination {}:\n{}\n!=\n{}\n".format(combi, f_result, testdf[combi]))
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit | -6,031,705,541,871,043,000 | 30.20339 | 130 | 0.531233 | false |
CloverHealth/pycon2017 | etl_nested/app/models.py | 1 | 4280 | import sqlalchemy as sa
import sqlalchemy.dialects.postgresql as sa_pg
from sqlalchemy.ext import declarative
import sqlalchemy.orm as sa_orm
from app import constants
from app.util.timestamps import utc_now
__all__ = [
'init_database'
'Form'
]
SCHEMAS = {
'app': 'clover_app',
'dwh': 'clover_dwh'
}
METADATA = sa.MetaData()
BaseModel = declarative.declarative_base(metadata=METADATA)
def init_database(db: sa.engine.Connectable):
"""
Initializes the database to support the models
:param db: SQLAlchemy connectable instance
"""
# setup the Postgres extensions and schema
db.execute("""
CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public;
""")
db.execute(
';\n'.join(
'CREATE SCHEMA IF NOT EXISTS {}'.format(s) for s in SCHEMAS.values()
)
)
# create the schema from the models
METADATA.create_all(bind=db)
class PrimaryKeyUUIDMixin:
"""
Includes an 'id' primary key UUID column
This is used to generate primary keys using the Postgres database server rather than the application
"""
__repr_details__ = ['id']
id = sa.Column(sa_pg.UUID(as_uuid=True), primary_key=True, server_default=sa.text('uuid_generate_v4()'))
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join('{}={}'.format(d, str(getattr(self, d))) for d in self.__repr_details__)
)
class ApplicationModel(BaseModel, PrimaryKeyUUIDMixin):
__abstract__ = True
__table_args__ = (
{'schema': SCHEMAS['app']},
)
class DataWarehouseModel(BaseModel, PrimaryKeyUUIDMixin):
__abstract__ = True
__table_args__ = (
{'schema': SCHEMAS['dwh']},
)
class Form(ApplicationModel):
__tablename__ = 'form_schemas'
__repr_details__ = ['name']
name = sa.Column(sa.Text(), nullable=False, unique=True, index=True)
description = sa.Column(sa.Text(), nullable=False)
schema = sa.Column(sa_pg.JSONB, nullable=False)
class User(ApplicationModel):
"""
A user who can create responses to forms
"""
__tablename__ = 'users'
__repr_details__ = ['full_name']
given_name = sa.Column(sa.Text, nullable=False)
family_name = sa.Column(sa.Text, nullable=False)
@property
def full_name(self):
return "%s %s" % (self.given_name, self.family_name)
class Submission(ApplicationModel):
"""
Answers to a form created by a user
"""
__tablename__ = 'form_responses'
form_id = sa.Column(sa.ForeignKey(Form.id), nullable=False)
user_id = sa.Column(sa.ForeignKey(User.id), nullable=False)
responses = sa.Column(sa_pg.JSON, nullable=False)
date_created = sa.Column(sa.DateTime(timezone=True), default=utc_now, nullable=False)
form = sa_orm.relationship(Form)
user = sa_orm.relationship(User)
class ResponseEvent(DataWarehouseModel):
"""
Represents an ETL transform of individual responses
This is an OLAP table where the following is expected:
- No foreign keys
- Redundant data (for faster analytical queries)
"""
__tablename__ = 'response_events'
# form schema information
form_id = sa.Column(sa_pg.UUID(as_uuid=True), nullable=False) # FormType.id
form_name = sa.Column(sa.Text, nullable=False) # FormType.name
# user information
user_id = sa.Column(sa_pg.UUID(as_uuid=True), nullable=False) # User.id
user_full_name = sa.Column(sa.Text, nullable=False) # User.full_name (property)
# submission information
submission_id = sa.Column(sa_pg.UUID(as_uuid=True), nullable=False)
submission_created = sa.Column(sa.DateTime(timezone=True), nullable=False) # Submission.date_created
# transformed properties
processed_on = sa.Column(sa.DateTime(timezone=True), nullable=False) # when this event was created
schema_path = sa.Column(sa.Text, nullable=False) # dot separated path to node in Submission.responses
value = sa.Column(sa.Text, nullable=False) # value of node in Submission.responses
answer_type = sa.Column(sa.Enum(constants.AnswerType), nullable=False) # answerType from node in Schema
tag = sa.Column(sa.Text, nullable=True, default=None) # tag from node in Schema (if exists)
| bsd-3-clause | -5,249,740,182,038,666,000 | 28.93007 | 108 | 0.659813 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/virtual_machine_scale_set_os_disk_py3.py | 1 | 3487 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualMachineScaleSetOSDisk(Model):
"""Describes a virtual machine scale set operating system disk.
All required parameters must be populated in order to send to Azure.
:param name: Required. The disk name.
:type name: str
:param caching: Specifies the caching requirements. <br><br> Possible
values are: <br><br> **None** <br><br> **ReadOnly** <br><br> **ReadWrite**
<br><br> Default: **None for Standard storage. ReadOnly for Premium
storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite'
:type caching: str or ~azure.mgmt.compute.v2016_03_30.models.CachingTypes
:param create_option: Required. Specifies how the virtual machines in the
scale set should be created.<br><br> The only allowed value is:
**FromImage** \\u2013 This value is used when you are using an image to
create the virtual machine. If you are using a platform image, you also
use the imageReference element described above. If you are using a
marketplace image, you also use the plan element previously described.
Possible values include: 'FromImage', 'Empty', 'Attach'
:type create_option: str or
~azure.mgmt.compute.v2016_03_30.models.DiskCreateOptionTypes
:param os_type: This property allows you to specify the type of the OS
that is included in the disk if creating a VM from user-image or a
specialized VHD. <br><br> Possible values are: <br><br> **Windows**
<br><br> **Linux**. Possible values include: 'Windows', 'Linux'
:type os_type: str or
~azure.mgmt.compute.v2016_03_30.models.OperatingSystemTypes
:param image: The Source User Image VirtualHardDisk. This VirtualHardDisk
will be copied before using it to attach to the Virtual Machine. If
SourceImage is provided, the destination VirtualHardDisk should not exist.
:type image: ~azure.mgmt.compute.v2016_03_30.models.VirtualHardDisk
:param vhd_containers: The list of virtual hard disk container uris.
:type vhd_containers: list[str]
"""
_validation = {
'name': {'required': True},
'create_option': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'create_option': {'key': 'createOption', 'type': 'DiskCreateOptionTypes'},
'os_type': {'key': 'osType', 'type': 'OperatingSystemTypes'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'},
}
def __init__(self, *, name: str, create_option, caching=None, os_type=None, image=None, vhd_containers=None, **kwargs) -> None:
super(VirtualMachineScaleSetOSDisk, self).__init__(**kwargs)
self.name = name
self.caching = caching
self.create_option = create_option
self.os_type = os_type
self.image = image
self.vhd_containers = vhd_containers
| mit | 1,544,975,557,484,405,800 | 48.112676 | 131 | 0.649555 | false |
gem/oq-engine | openquake/hazardlib/gsim/lanzano_2019.py | 1 | 24743 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`LanzanoEtAl2019`.
"""
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
def _get_stddevs(C, stddev_types, num_sites):
"""
Return standard deviations as defined in table 1.
"""
stddevs = []
for stddev_type in stddev_types:
if stddev_type == const.StdDev.TOTAL:
stddevs.append(np.sqrt(C['tau'] ** 2 + C['phi_S2S'] ** 2 +
C['phi_0'] ** 2) + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(C['tau'] + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(np.sqrt(C['phi_S2S'] ** 2 + C['phi_0'] ** 2) +
np.zeros(num_sites))
return stddevs
def _compute_distance(rup, dists, C):
"""
Compute the third term of the equation 1:
FD(Mw,R) = [c1(Mw-Mref) + c2] * log10(R) + c3(R) (eq 4)
Mref, h, Mh are in matrix C
"""
R = np.sqrt(dists**2 + C['h']**2)
return ((C['c1'] * (rup.mag - C['Mref']) + C['c2']) * np.log10(R) +
C['c3']*R)
def _compute_magnitude(rup, C):
"""
Compute the second term of the equation 1:
b1 * (Mw-Mh) for M<=Mh
b2 * (Mw-Mh) otherwise
"""
dmag = rup.mag - C["Mh"]
if rup.mag <= C["Mh"]:
mag_term = C['a'] + C['b1'] * dmag
else:
mag_term = C['a'] + C['b2'] * dmag
return mag_term
def _site_amplification(sites, C):
"""
Compute the fourth term of the equation 1 :
The functional form Fs in Eq. (1) represents the site amplification and
it is given by FS = klog10(V0/800) , where V0 = Vs30 when Vs30 <= 1500
and V0=1500 otherwise
"""
v0 = np.ones_like(sites.vs30) * 1500.
v0[sites.vs30 < 1500] = sites.vs30
return C['k'] * np.log10(v0/800)
def _get_mechanism(rup, C):
"""
Compute the part of the second term of the equation 1 (FM(SoF)):
Get fault type dummy variables
"""
SS, TF, NF = _get_fault_type_dummy_variables(rup)
return C['f1'] * SS + C['f2'] * TF
def _get_fault_type_dummy_variables(rup):
"""
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
"""
SS, TF, NF = 0, 0, 0
if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0:
# strike-slip
SS = 1
elif rup.rake > 30.0 and rup.rake < 150.0:
# reverse
TF = 1
else:
# normal
NF = 1
return SS, TF, NF
class LanzanoEtAl2019_RJB_OMO(GMPE):
"""
Implements GMPE developed by G.Lanzano, L.Luzi, F.Pacor, L.Luzi,
C.Felicetta, R.Puglia, S. Sgobba, M. D'Amico and published as "A Revised
Ground-Motion Prediction Model for Shallow Crustal Earthquakes in Italy",
Bull Seismol. Soc. Am., DOI 10.1785/0120180210
SA are given up to 10 s.
The prediction is valid for RotD50, which is the median of the
distribution of the intensity measures, obtained from the combination
of the two horizontal components across all nonredundant azimuths
(Boore, 2010).
"""
#: Supported tectonic region type is 'active shallow crust' because the
#: equations have been derived from data from Italian database ITACA, as
#: explained in the 'Introduction'.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Set of :mod:`intensity measure types <openquake.hazardlib.imt>`
#: this GSIM can calculate. A set should contain classes from module
#: :mod:`openquake.hazardlib.imt`.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, PGV, SA}
#: Supported intensity measure component is orientation-independent
#: measure :attr:`~openquake.hazardlib.const.IMC.RotD50`
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RotD50
#: Supported standard deviation types are inter-event, intra-event
#: and total, page 1904
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {
const.StdDev.TOTAL, const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT}
#: Required site parameter is only Vs30
REQUIRES_SITES_PARAMETERS = {'vs30'}
#: Required rupture parameters are magnitude and rake (eq. 1).
REQUIRES_RUPTURE_PARAMETERS = {'rake', 'mag'}
#: Required distance measure is R Joyner-Boore distance (eq. 1).
REQUIRES_DISTANCES = {'rjb'}
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
if self.REQUIRES_DISTANCES == {'rjb'}:
d = dists.rjb
else:
d = dists.rrup
imean = (_compute_magnitude(rup, C) +
_compute_distance(rup, d, C) +
_site_amplification(sites, C) +
_get_mechanism(rup, C))
istddevs = _get_stddevs(C, stddev_types, num_sites=len(sites.vs30))
# Convert units to g, but only for PGA and SA (not PGV):
if imt.string.startswith(("SA", "PGA")):
mean = np.log((10.0 ** (imean - 2.0)) / g)
else:
# PGV:
mean = np.log(10.0 ** imean)
# Return stddevs in terms of natural log scaling
stddevs = np.log(10.0 ** np.array(istddevs))
# mean_LogNaturale = np.log((10 ** mean) * 1e-2 / g)
return mean, stddevs
#: Coefficients from SA PGA and PGV from esupp Table S2
COEFFS = CoeffsTable(sa_damping=5, table="""
IMT a b1 b2 c1 c2 c3 k f1 f2 tau phi_S2S phi_0 Mh Mref h
pga 3.4210464090 0.1939540900 -0.0219827770 0.2871492910 -1.4056354760 -0.0029112640 -0.3945759700 0.0859837430 0.0105002390 0.1559878330 0.2205815790 0.2000991410 5.5000000000 5.3239727140 6.9237429440
pgv 2.0774292740 0.3486332380 0.1359129150 0.2840909830 -1.4565164760 -0.0005727360 -0.5927640710 0.0410782350 -0.0123124280 0.1388529950 0.1641479240 0.1938530300 5.7000000000 5.0155451980 5.9310213910
0.010 3.4245483320 0.1925159840 -0.0226504290 0.2875277900 -1.4065574040 -0.0029092280 -0.3936344950 0.0859882130 0.0104732970 0.1561741750 0.2207225690 0.2001331220 5.5000000000 5.3265568770 6.9261983550
0.025 3.4831620980 0.1745072330 -0.0303191060 0.2917712270 -1.4243608230 -0.0028437300 -0.3808661290 0.0869007960 0.0121168920 0.1585446770 0.2231805710 0.2008657270 5.5000000000 5.3726995880 6.9273792970
0.040 3.6506006610 0.1159102530 -0.0646660020 0.3111117150 -1.4695119300 -0.0027310440 -0.3429284420 0.0870477700 0.0161177530 0.1644920090 0.2324602390 0.2038917110 5.5000000000 5.4968889680 6.9815887950
0.050 3.7315797180 0.0938111470 -0.0847276080 0.3184743710 -1.4684793000 -0.0029101640 -0.3201326870 0.0900141460 0.0129486380 0.1679197090 0.2388540270 0.2068943560 5.5000000000 5.5554373580 7.1218137630
0.070 3.8298265420 0.0775399420 -0.1074506180 0.3219830820 -1.4693089940 -0.0034922110 -0.2775266740 0.1027835460 0.0229271220 0.1761561100 0.2512937930 0.2101610770 5.5000000000 5.5053847230 7.2904858360
0.100 3.8042169810 0.1360109680 -0.0692203330 0.2908601720 -1.4016627520 -0.0043005780 -0.2686743880 0.1147824540 0.0248269350 0.1787478720 0.2637458450 0.2113961580 5.5000000000 5.3797044570 7.2742555760
0.150 3.6500641550 0.2565050720 0.0271400960 0.2339551240 -1.3111751160 -0.0047018230 -0.3207560810 0.1109474740 0.0198659050 0.1666766970 0.2596149390 0.2113112910 5.5000000000 5.0965762810 6.6927744070
0.200 3.5441076850 0.3561477800 0.0934922750 0.1983575680 -1.2809085750 -0.0045122270 -0.3768139640 0.0942130060 0.0116640180 0.1611613280 0.2493593750 0.2085199270 5.5000000000 4.8016422440 6.1273995880
0.250 3.4904108560 0.4258794620 0.1431007860 0.1768779550 -1.2710203890 -0.0038947210 -0.4275803190 0.0803226570 0.0097630190 0.1541437520 0.2349586720 0.2074459640 5.5000000000 4.7851094040 6.0907948160
0.300 3.4415379890 0.4717747480 0.1926037060 0.1614915210 -1.2949801370 -0.0032193060 -0.4770515440 0.0776675640 0.0061077540 0.1465825190 0.2248859230 0.2059316980 5.5000000000 4.7167541960 5.9795025500
0.350 3.3446630670 0.5062658120 0.2151211470 0.1564621150 -1.3178170520 -0.0029990590 -0.5306569440 0.0728397670 0.0026993430 0.1410870920 0.2175844480 0.2080023630 5.5000000000 4.3812120300 5.8130994320
0.400 3.2550575400 0.5331242140 0.2421620470 0.1502822370 -1.2806103220 -0.0027428760 -0.5562808430 0.0661760590 0.0011870680 0.1351999940 0.2142019330 0.2045660900 5.5000000000 4.4598958150 5.8073330520
0.450 3.3642504070 0.5364578580 0.1855438960 0.1489823740 -1.3018257500 -0.0022889740 -0.5950316360 0.0648499220 0.0049044230 0.1282711800 0.2116409250 0.2038392300 5.8000000000 4.4733992810 5.9505143630
0.500 3.3608504670 0.5595158750 0.2002091480 0.1445889550 -1.3577631940 -0.0018214290 -0.6175021300 0.0643336580 0.0049344710 0.1292553150 0.2101486370 0.2021378460 5.8000000000 4.3061718270 6.0827633150
0.600 3.3138586220 0.6159734570 0.2429526950 0.1308776180 -1.3751116050 -0.0011783100 -0.6515274580 0.0517509190 -0.0106807380 0.1388319340 0.2085483340 0.2012532670 5.8000000000 4.2621864430 6.0960486570
0.700 3.2215424560 0.6410331910 0.2631217720 0.1310231460 -1.3777586170 -0.0008288090 -0.6770253130 0.0348343350 -0.0138034390 0.1487445760 0.2078712150 0.1990177160 5.8000000000 4.2242791970 5.8705686780
0.750 3.1945748660 0.6615384790 0.2753805270 0.1279582150 -1.3816587680 -0.0006332620 -0.6770002780 0.0325604250 -0.0106144310 0.1493281120 0.2061474600 0.1985444200 5.8000000000 4.2193032080 5.9399226070
0.800 3.1477172010 0.6744754580 0.2843168320 0.1274454970 -1.3805238730 -0.0005387910 -0.6807607950 0.0301501140 -0.0093150580 0.1488858080 0.2059923330 0.1975251810 5.8000000000 4.1788159560 5.9158308810
0.900 3.0438692320 0.6960808380 0.2908389870 0.1307696640 -1.3712299710 -0.0003650810 -0.6901600210 0.0243867030 -0.0057274610 0.1510220880 0.2088059530 0.1964681960 5.8000000000 4.1280019240 5.6499915110
1.000 2.9329562820 0.7162569260 0.2992085610 0.1330221520 -1.3581003000 -0.0003481280 -0.7010380780 0.0187836090 -0.0026838270 0.1498799880 0.2099740670 0.1952706350 5.8000000000 4.0068764960 5.4265347610
1.200 2.7969754630 0.7522683610 0.3148914470 0.1356882390 -1.3418915980 -0.0001946160 -0.7211447760 0.0156692770 -0.0123682580 0.1475708640 0.2085469600 0.1935369570 5.8000000000 4.0000000000 5.2114400990
1.400 2.6681627290 0.7789439750 0.3310958850 0.1374053210 -1.3265422970 -0.0001071290 -0.7304122120 0.0122846810 -0.0159220670 0.1480430620 0.2089391760 0.1905401100 5.8000000000 4.0000000000 5.0911883420
1.600 2.5723270160 0.7847328080 0.3394239090 0.1454225100 -1.3308582950 0.0000000000 -0.7386216010 0.0034499080 -0.0231247190 0.1468224080 0.2119887010 0.1888323370 5.8000000000 4.0644421250 5.1206266020
1.800 2.4933386330 0.7900020080 0.3305433860 0.1542283440 -1.3289912520 0.0000000000 -0.7538191680 -0.0079587620 -0.0354487870 0.1517555390 0.2125975420 0.1861583190 5.8000000000 4.1264090540 5.2737078390
2.000 2.4060176790 0.7777348120 0.3199509080 0.1684793150 -1.3282655150 0.0000000000 -0.7472001440 -0.0111369970 -0.0375300390 0.1533446260 0.2112262090 0.1855430060 5.8000000000 4.2174770140 5.3910987520
2.500 2.2251396500 0.7789914250 0.3280727550 0.1827792890 -1.3593977940 0.0000000000 -0.7332744950 -0.0298755170 -0.0447073420 0.1581459890 0.2057405400 0.1873131960 5.8000000000 4.0841192840 5.2885431340
3.000 2.0653645110 0.7855377910 0.3585874760 0.1917372820 -1.3622291610 -0.0000725000 -0.6907295050 -0.0523142100 -0.0534721760 0.1730562270 0.2046940180 0.1856376420 5.8000000000 4.0000000000 5.0089807590
3.500 1.9413692760 0.8006822910 0.3924715050 0.2003105480 -1.3459808710 -0.0003295060 -0.6572701800 -0.0831135690 -0.0497671120 0.1694808560 0.2000002880 0.1858647730 5.8000000000 4.0000000000 5.2239249130
4.000 1.8088893770 0.7742293710 0.3863288840 0.2209746660 -1.3605497440 -0.0004514760 -0.6361325920 -0.0850828750 -0.0481922640 0.1729190890 0.1933427470 0.1876984700 5.8000000000 4.0000000000 5.1428287170
4.500 1.7067047740 0.7606577820 0.3932273220 0.2318655310 -1.3607064390 -0.0005424670 -0.6212289540 -0.0851787910 -0.0420861940 0.1750836140 0.1912528510 0.1875258320 5.8000000000 4.0000000000 4.9944908560
5.000 1.5674508510 0.7351540960 0.4075899440 0.2444741770 -1.3443973430 -0.0006142880 -0.5996128590 -0.0740372190 -0.0294935120 0.1724655580 0.1849939070 0.1920775290 5.8000000000 4.0995166500 4.9182635170
6.000 1.8015664050 0.6866068140 0.2400330900 0.2681399590 -1.4273047180 -0.0004079660 -0.5582643820 -0.0530155580 -0.0281879710 0.1608258320 0.1827343650 0.1868738640 6.3000000000 4.0725997780 5.6196373890
7.000 1.6596668010 0.6688108030 0.2910039860 0.2736804460 -1.4575752030 -0.0002092330 -0.5293913010 -0.0164879330 -0.0063757230 0.1639920950 0.1793061350 0.1785781870 6.3000000000 4.0597872070 5.3393074950
8.000 1.5146417080 0.6053146580 0.2927231020 0.3021009530 -1.4528220690 -0.0001882700 -0.5054615800 0.0012388470 -0.0011382590 0.1605307940 0.1737530120 0.1769475170 6.3000000000 4.2884159230 5.4984545260
9.000 1.4186859130 0.5413850170 0.2751627760 0.3283351620 -1.4351308790 0.0000000000 -0.5015172920 0.0083605610 0.0036314410 0.1593645820 0.1666775610 0.1771272580 6.3000000000 4.5884949620 6.0000000000
10.000 1.3142120360 0.4897308100 0.2536297690 0.3484436940 -1.4421713740 0.0000000000 -0.4867303450 0.0170019340 0.0044164240 0.1580884750 0.1616666450 0.1776399420 6.3000000000 4.6826704140 6.2391199410
""")
class LanzanoEtAl2019_RUP_OMO(LanzanoEtAl2019_RJB_OMO):
"""
Implements GMPE developed by G.Lanzano, L.Luzi, F.Pacor, L.Luzi,
C.Felicetta, R.Puglia, S. Sgobba, M. D'Amico and published as "A Revised
Ground-Motion Prediction Model for Shallow Crustal Earthquakes in Italy",
Bull Seismol. Soc. Am., DOI 10.1785/0120180210
SA are given up to 10 s.
The prediction is valid for RotD50, which is the median of the
distribution of the intensity measures, obtained from the combination
of the two horizontal components across all nonredundant azimuths
(Boore, 2010).
"""
#: Supported tectonic region type is 'active shallow crust' because the
#: equations have been derived from data from Italian database ITACA, as
#: explained in the 'Introduction'.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Set of :mod:`intensity measure types <openquake.hazardlib.imt>`
#: this GSIM can calculate. A set should contain classes from module
#: :mod:`openquake.hazardlib.imt`.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = {PGA, PGV, SA}
#: Supported intensity measure component is orientation-independent
#: measure :attr:`~openquake.hazardlib.const.IMC.RotD50`
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RotD50
#: Supported standard deviation types are inter-event, intra-event
#: and total, page 1904
DEFINED_FOR_STANDARD_DEVIATION_TYPES = {
const.StdDev.TOTAL, const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT}
#: Required site parameter is only Vs30
REQUIRES_SITES_PARAMETERS = {'vs30'}
#: Required rupture parameters are magnitude and rake (eq. 1).
REQUIRES_RUPTURE_PARAMETERS = {'rake', 'mag'}
#: Required distance measure is Rrup (eq. 1).
REQUIRES_DISTANCES = {'rrup'}
#: Coefficients from SA PGA and PGV from esupp Table S2
COEFFS = CoeffsTable(sa_damping=5, table="""
IMT a b1 b2 c1 c2 c3 k f1 f2 tau phi_S2S phi_0 Mh Mref h
pga 3.8476009130 0.0774422740 -0.1419991420 0.3478652700 -1.5533187520 -0.0018762870 -0.3804756380 0.0981863920 0.0312839980 0.1614849070 0.2214693590 0.2009857770 5.5000000000 5.7161225870 6.6412174580
pgv 2.3828051810 0.2389279260 0.0261097410 0.3406251950 -1.5178700950 0.0000000000 -0.5766806200 0.0496574190 0.0048867220 0.1377338650 0.1657577940 0.1947190870 5.7000000000 5.4986237650 5.2603202210
0.010 3.8506248160 0.0758687500 -0.1428322710 0.3483279440 -1.5537399740 -0.0018758340 -0.3795182780 0.0982026160 0.0312888740 0.1617017580 0.2216042230 0.2010196680 5.5000000000 5.7182953400 6.6339536220
0.025 3.9192103880 0.0557696980 -0.1527646590 0.3537205430 -1.5755362770 -0.0017839260 -0.3665232370 0.0994148360 0.0335230700 0.1648739390 0.2239781630 0.2017348520 5.5000000000 5.7583429090 6.6418028640
0.040 4.1174892770 -0.0090338610 -0.1935502850 0.3762847240 -1.6561808690 -0.0015888760 -0.3280283020 0.1003977920 0.0391489610 0.1728423350 0.2330030290 0.2047022980 5.5000000000 5.7989637460 6.7563889150
0.050 4.2100749050 -0.0339492180 -0.2162534440 0.3850707690 -1.6588279250 -0.0017350640 -0.3051389660 0.1038403080 0.0364310640 0.1769569220 0.2393492360 0.2072790280 5.5000000000 5.8513503470 6.9511244450
0.070 4.3116802080 -0.0505614890 -0.2390930110 0.3886131870 -1.6360072930 -0.0023201630 -0.2625774900 0.1169554080 0.0468045100 0.1850495830 0.2515478690 0.2106976190 5.5000000000 5.8718716570 7.2254468560
0.100 4.2619091410 0.0155135150 -0.1931111810 0.3536108950 -1.5607240070 -0.0031932490 -0.2541922120 0.1287730480 0.0481946680 0.1870755970 0.2641319770 0.2116384010 5.5000000000 5.7816875540 7.1942049600
0.150 4.0281333720 0.1490530750 -0.0842910460 0.2904596360 -1.4558853220 -0.0038111280 -0.3065865390 0.1234294560 0.0409920340 0.1747944550 0.2597923620 0.2119010310 5.5000000000 5.5070413070 6.0448362270
0.200 3.9581561800 0.2581085140 -0.0067616210 0.2493181400 -1.4304030950 -0.0035049250 -0.3631567460 0.1054336950 0.0308675300 0.1663095650 0.2500575700 0.2094881320 5.5000000000 5.4083470680 6.0814859680
0.250 3.8975164920 0.3349956980 0.0504825640 0.2239371150 -1.4165129430 -0.0028978730 -0.4143360740 0.0906817050 0.0279033120 0.1577890900 0.2357231150 0.2084723930 5.5000000000 5.4514190000 6.0143888830
0.300 3.8389631040 0.3840688150 0.1030589990 0.2069719800 -1.4440780970 -0.0022541340 -0.4636965900 0.0874129150 0.0234314860 0.1496049650 0.2259333260 0.2073687490 5.5000000000 5.3968851350 5.8135245350
0.350 3.7427724390 0.4229320630 0.1304612770 0.1993820250 -1.4408251060 -0.0020338970 -0.5174986310 0.0820839430 0.0191395270 0.1437104080 0.2185074890 0.2098701290 5.5000000000 5.2806552370 5.8177492120
0.400 3.6333013750 0.4525776370 0.1603621910 0.1917091960 -1.4190236990 -0.0018301470 -0.5434295700 0.0748159970 0.0169681240 0.1358758520 0.2149494900 0.2064883230 5.5000000000 5.2222009260 5.6501186180
0.450 3.7154781180 0.4556396130 0.1097027610 0.1893950510 -1.4373487560 -0.0013790170 -0.5822090800 0.0724614960 0.0200828160 0.1287203560 0.2124451610 0.2057455270 5.8000000000 5.2478823960 5.7811054530
0.500 3.7225644930 0.4791000150 0.1250270520 0.1847593340 -1.4792888180 -0.0008746350 -0.6048678230 0.0719131380 0.0201771040 0.1286518970 0.2113216850 0.2036659890 5.8000000000 5.2517779510 5.9416879950
0.600 3.6682670680 0.5366335520 0.1687213280 0.1707059500 -1.5049666160 -0.0002411380 -0.6392187290 0.0588865330 0.0046486850 0.1365657160 0.2100293700 0.2024728710 5.8000000000 5.2219439350 5.7575653430
0.700 3.5476098040 0.5605925270 0.1871321630 0.1717388940 -1.4920154380 0.0000000000 -0.6643250560 0.0414345280 0.0015640500 0.1444444740 0.2094865120 0.2002723840 5.8000000000 5.1693165540 5.2232086450
0.750 3.4860153280 0.5835516220 0.2009753460 0.1676162740 -1.4705858310 0.0000000000 -0.6633662960 0.0390747580 0.0045913530 0.1444192940 0.2075499490 0.1999247850 5.8000000000 5.1608011770 5.2390714490
0.800 3.4153176700 0.5963140190 0.2090305350 0.1674214930 -1.4563908280 0.0000000000 -0.6668025460 0.0364159000 0.0057644770 0.1435650390 0.2072863380 0.1990343260 5.8000000000 5.1084013460 5.0192508660
0.900 3.2837755070 0.6203647060 0.2174322510 0.1695628290 -1.4268589930 0.0000000000 -0.6750760810 0.0302972660 0.0092171050 0.1439597930 0.2100453700 0.1980967980 5.8000000000 5.0273018570 4.6888779800
1.000 3.1646298450 0.6394268750 0.2244466880 0.1725257020 -1.4104378560 0.0000000000 -0.6858604010 0.0243862220 0.0120110190 0.1425963490 0.2117095410 0.1964539980 5.8000000000 4.9152918370 4.2786484540
1.200 3.0000699100 0.6752604010 0.2398000250 0.1754340770 -1.3847402050 0.0000000000 -0.7049869270 0.0203886700 0.0020858610 0.1376747050 0.2106367700 0.1947050440 5.8000000000 4.8219418190 3.8902573240
1.400 2.8548239110 0.7016598380 0.2555322610 0.1774234610 -1.3693052940 0.0000000000 -0.7139420780 0.0169071570 -0.0016271820 0.1352870520 0.2110771360 0.1914515130 5.8000000000 4.7438528440 3.6282580540
1.600 2.7452884200 0.7087902600 0.2642955350 0.1848978730 -1.3616920690 0.0000000000 -0.7215074520 0.0083121970 -0.0083006310 0.1331921300 0.2141947700 0.1895359950 5.8000000000 4.7549230280 3.7025291230
1.800 2.6642129620 0.7154152980 0.2560921110 0.1931546230 -1.3507839540 0.0000000000 -0.7371011570 -0.0017321440 -0.0190618180 0.1382147940 0.2147123860 0.1868917710 5.8000000000 4.8130574510 3.9589480220
2.000 2.5756862730 0.7028435510 0.2447144810 0.2076118600 -1.3471065740 0.0000000000 -0.7307501910 -0.0050040160 -0.0212358510 0.1397875260 0.2129299180 0.1864771670 5.8000000000 4.8506617380 4.1479373910
2.500 2.3963959400 0.7033050700 0.2518818530 0.2222780470 -1.3743185070 0.0000000000 -0.7168917870 -0.0246343480 -0.0290936910 0.1437343890 0.2068222070 0.1887206470 5.8000000000 4.7203537770 4.1181389600
3.000 2.2442760420 0.7064408310 0.2805790070 0.2323944870 -1.3938825540 0.0000000000 -0.6755767110 -0.0449346600 -0.0392006000 0.1579916440 0.2051285540 0.1872462500 5.8000000000 4.5967141280 3.6676358910
3.500 2.1509457620 0.7197140890 0.3144950490 0.2408879210 -1.3056875070 0.0000000000 -0.6454403000 -0.0712686810 -0.0377413380 0.1574415450 0.2005062730 0.1876227070 5.8000000000 5.0000000000 3.9746700550
4.000 2.0269129410 0.6873092210 0.3037401830 0.2643086690 -1.3612857890 0.0000000000 -0.6250815320 -0.0736555330 -0.0363730290 0.1610380970 0.1940097180 0.1895939010 5.8000000000 4.8167431120 3.5842582520
4.500 1.9350799290 0.6687716310 0.3061467880 0.2778468310 -1.3144751280 0.0000000000 -0.6110294070 -0.0740593630 -0.0294989060 0.1623295570 0.1927623170 0.1893881480 5.8000000000 5.0000000000 3.2644687160
5.000 1.8090192480 0.6410330200 0.3183807710 0.2915021780 -1.3209096320 0.0000000000 -0.5897990580 -0.0632488020 -0.0172750890 0.1600134890 0.1872621910 0.1941054980 5.8000000000 5.0000000000 3.3548060430
6.000 1.9455190300 0.5901995040 0.1564201050 0.3140022010 -1.3375771520 0.0000000000 -0.5478118740 -0.0424599370 -0.0125854850 0.1509581730 0.1838901820 0.1900203310 6.3000000000 5.0000000000 3.9006202840
7.000 1.7832223090 0.5733237300 0.2019467610 0.3199016260 -1.3374591120 0.0000000000 -0.5197861950 -0.0045757050 0.0111162730 0.1555189840 0.1794302890 0.1819988840 6.3000000000 5.0000000000 3.7233318770
8.000 1.6472982850 0.5073993280 0.2006409390 0.3494261080 -1.4463813400 0.0000000000 -0.4956266160 0.0136222610 0.0172305930 0.1529394340 0.1736945870 0.1803254150 6.3000000000 4.8103931580 4.3526246000
9.000 1.5105710010 0.4450324910 0.1830610430 0.3751346350 -1.4367324130 0.0000000000 -0.4912014160 0.0208945970 0.0217267490 0.1529569490 0.1662440990 0.1804191720 6.3000000000 4.9295888090 4.5509858920
10.000 1.3966806560 0.3900867860 0.1589602600 0.3968394420 -1.4232531770 0.0000000000 -0.4765713040 0.0296164880 0.0222468600 0.1525077910 0.1614679730 0.1808181160 6.3000000000 5.0403227000 4.5998115120
""")
| agpl-3.0 | -4,709,211,860,352,065,000 | 76.799363 | 206 | 0.746029 | false |
vishakh/metamkt | metamkt/metamkt/views.py | 1 | 10905 | from cornice import Service
import datetime
import hashlib
import transaction
import convert
from models import Action, DBSession, Entity, EntityType, Event, Group, Order, PriceChange, Shares, Transaction, User, \
UserData, ValueChange
# ********** Cornice Services ********** #
root = Service(name='index', path='/', description="Metamkt")
action = Service(name='action', path='/actions/{action}', description="Action")
entity_type = Service(name="entity_type", path="/entity_types/{entity_type}", description="Entity Type")
events = Service(name='events', path='/events', description="All Events")
event1 = Service(name='event', path='/events/{event}', description="Event")
group1 = Service(name='group', path='/groups/{group}', description="Group")
orders = Service(name='orders', path='/orders', description="All Orders")
order = Service(name='order', path='/orders/{order}', description="Order")
player = Service(name="player", path="/players/{player}", description="Player")
team = Service(name="team", path="/teams/{team}", description="Team")
user = Service(name='user', path='/users/{user}', description="User")
# ********** View Functions ********** #
@root.get()
def get_info(request):
return {'Hello': 'World'}
@action.delete()
def action_delete(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'action')
action = dbsession.query(Action).filter(Action.name == name).one()
dbsession.delete(action)
transaction.commit()
return {'status': 'success'}
@action.get()
def action_get(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'action')
action = dbsession.query(Action).filter(Action.name == name).one()
action_json = convert.decodeAction(request, dbsession, action)
return {'status': 'success', 'action': action_json}
@action.put()
def action_put(request):
dbsession = DBSession()
action = Action()
action.name = clean_matchdict_value(request, 'action')
action.description = clean_param_value(request, 'description')
action.points = clean_param_value(request, 'points')
action.timestamp = get_timestamp()
dbsession.add(action)
transaction.commit()
return {'status': 'success'}
@entity_type.delete()
def entity_type_delete(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'entity_type')
entity_type = dbsession.query(EntityType).filter(EntityType.name == name).one()
dbsession.delete(entity_type)
transaction.commit()
return {'status': 'success'}
@entity_type.get()
def entity_type_get(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'entity_type')
entity_type = dbsession.query(EntityType).filter(EntityType.name == name).one()
entity_type_json = convert.decodeEntityType(request, dbsession, entity_type)
return {'status': 'success', 'entity_type': entity_type_json}
@entity_type.put()
def entity_type_put(request):
dbsession = DBSession()
entity_type = EntityType()
entity_type.name = clean_matchdict_value(request, 'entity_type')
entity_type.timestamp = get_timestamp()
dbsession.add(entity_type)
transaction.commit()
return {'status': 'success'}
@event1.delete()
def event_delete(request):
dbsession = DBSession()
id = clean_matchdict_value(request, 'event')
event = dbsession.query(Event).filter(Event.id == id).one()
dbsession.delete(event)
transaction.commit()
return {'status': 'success'}
@event1.get()
def event_get(request):
dbsession = DBSession()
id = clean_matchdict_value(request, 'event')
event = dbsession.query(Event).filter(Event.id == id).one()
event_json = convert.decodeEvent(request, dbsession, event)
return {'status': 'success', 'event': event_json}
@events.put()
def event(request):
dbsession = DBSession()
event = Event()
event.entity_id = clean_param_value(request, 'entity_id')
event.action_id = clean_param_value(request, 'action_id')
event.quantity = clean_param_value(request, 'quantity')
event.description = clean_param_value(request, 'description')
event.timestamp = get_timestamp()
hash = hashlib.md5(event.entity_id + event.action_id + event.quantity + str(event.timestamp))\
.hexdigest()
event.hash = hash
dbsession.add(event)
transaction.commit()
event = dbsession.query(Event).filter(Event.hash == hash).one()
event_json = convert.decodeEvent(request, dbsession, event)
return {'status': 'success', 'event': event_json}
@group1.delete()
def group_delete(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'group')
group = dbsession.query(Group).filter(Group.name == name).one()
dbsession.delete(group)
transaction.commit()
return {'status': 'success'}
@group1.get()
def group_get(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'group')
group = dbsession.query(Group).filter(Group.name == name).one()
group_json = convert.decodeGroup(request, dbsession, group)
return {'status': 'success', 'group': group_json}
@group1.put()
def group(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'group')
group = Group()
group.name = name
group.timestamp = get_timestamp()
dbsession.add(group)
transaction.commit()
return {'status': 'success'}
@order.delete()
def order_delete(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'order')
order = dbsession.query(Order).filter(Order.id == id).one()
dbsession.delete(order)
transaction.commit()
return {'status': 'success'}
@order.get()
def order_get(request):
dbsession = DBSession()
id = clean_matchdict_value(request, 'order')
order = dbsession.query(Order).filter(Order.id == id).one()
order_json = convert.decodeOrder(request, dbsession, order)
return {'status': 'success', 'order': order_json}
@orders.put()
def orders_put(request):
dbsession = DBSession()
order = Order()
order.entity_id = clean_param_value(request, 'entity_id')
order.user_id = clean_param_value(request, 'user_id')
order.quantity = clean_param_value(request, 'quantity')
order.minPrice = clean_param_value(request, 'minPrice')
order.maxPrice = clean_param_value(request, 'maxPrice')
order.buyOrSell = clean_param_value(request, 'buyOrSell')
order.active = 1
order.timestamp = get_timestamp()
hash = hashlib.md5(order.entity_id + order.user_id + str(order.timestamp)).hexdigest()
order.hash = hash
dbsession.add(order)
transaction.commit()
order = dbsession.query(Order).filter(Order.hash == hash).one()
order_json = convert.decodeOrder(request, dbsession, order)
return {'status': 'success', 'order': order_json}
@player.delete()
def player_delete(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'player')
entity = dbsession.query(Entity).filter(Entity.name == name).filter(Entity.parent_id != None).one()
dbsession.delete(entity)
transaction.commit()
return {'status': 'success'}
@player.get()
def player_get(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'player')
entity = dbsession.query(Entity).filter(Entity.name == name).filter(Entity.parent_id != None).one()
entity_json = convert.decodePlayer(request, dbsession, entity)
return {'status': 'success', 'player': entity_json}
@player.put()
def player_put(request):
dbsession = DBSession()
entity = Entity()
entity.name = clean_matchdict_value(request, 'player')
entity.entityType_id = getPlayerTypeID()
entity.group_id = clean_param_value(request, 'group_id')
entity.parent_id = clean_param_value(request, 'parent_id')
entity.price = 100
entity.touched = False
entity.timestamp = get_timestamp()
dbsession.add(entity)
transaction.commit()
return {'status': 'success'}
@team.delete()
def team_delete(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'team')
entity = dbsession.query(Entity).filter(Entity.name == name).filter(Entity.parent_id == None).one()
dbsession.delete(entity)
transaction.commit()
return {'status': 'success'}
@team.get()
def team_get(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'team')
entity = dbsession.query(Entity).filter(Entity.name == name).filter(Entity.parent_id == None).one()
entity_json = convert.decodeTeam(request, dbsession, entity)
return {'status': 'success', 'team': entity_json}
@team.put()
def team_put(request):
dbsession = DBSession()
entity = Entity()
entity.name = clean_matchdict_value(request, 'team')
entity.entityType_id = getTeamTypeID()
entity.group_id = clean_param_value(request, 'group_id')
entity.parent_id = None
entity.price = 100
entity.touched = False
entity.timestamp = get_timestamp()
dbsession.add(entity)
transaction.commit()
return {'status': 'success'}
@user.delete()
def user_delete(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'user')
user = dbsession.query(User).filter(User.name == name).one()
dbsession.delete(user)
transaction.commit()
return {'status': 'success'}
@user.get()
def user_get(request):
dbsession = DBSession()
name = clean_matchdict_value(request, 'user')
user = dbsession.query(User).filter(User.name == name).one()
user_data = dbsession.query(UserData).filter(UserData.user_id == user.id).one()
user_json = convert.decodeUser(request, dbsession, user, user_data)
return {'status': 'success', 'user': user_json}
@user.put()
def user_put(request):
dbsession = DBSession()
username = clean_matchdict_value(request, 'user')
user = User()
user.name = username
user.email = request.params['email']
user.salt = 'salt'
user.password = 'password'
user.timestamp = get_timestamp()
dbsession.add(user)
user_data = UserData()
user_data.user_id = user.id
user_data.cash = 10000
user_data.value = 10000
user_data.points = 0
user_data.timestamp = get_timestamp()
dbsession.add(user_data)
transaction.commit()
return {'status': 'success'}
# ********** Cornice Validators ********** #
# ********** Utility Functions ********** #
def getTeamTypeID():
return DBSession().query(EntityType).filter(EntityType.name == 'team').first().id
def getPlayerTypeID():
return DBSession().query(EntityType).filter(EntityType.name == 'player').first().id
def get_timestamp():
return datetime.datetime.utcnow()
def clean_matchdict_value(request, key):
return request.matchdict[key].replace('_', ' ')
def clean_param_value(request, key):
return request.params[key].replace('_', ' ') | lgpl-3.0 | -3,825,956,341,952,392,700 | 31.362018 | 120 | 0.67602 | false |
CENDARI/editorsnotes | editorsnotes/example-settings_local.py | 1 | 5075 | ######################
# Required variables #
######################
SECRET_KEY = ''
POSTGRES_DB = {
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
NERD_SERVICE_SERVER = 'http://traces1.saclay.inria.fr/nerd/service/processNERDText'
SEMANTIC_STORE = 'Sleepycat'
#SEMANTIC_PATH = os.path.abspath(os.path.join(STORAGE_PATH, 'rdfstore'))
#SEMANTIC_STORE = 'Virtuoso'
#SEMANTIC_NAMESPACE = 'http://localhost:8000/'
SEMANTIC_NAMESPACE = 'http://resources.cendari.dariah.eu/'
VIRTUOSO = {
'dba_password': 'dba',
'dav_password': 'dav',
'HOST': 'localhost'
}
CENDARI_DATA_API_SERVER = "http://localhost:42042/v1/"
LDAP_GROUP_MAPS = {
'admin_groups': '***semicolon-separated list of group names***',
'editor_groups': '***semicolon-separated list of group names***',
'contributor_groups': '***semicolon-separated list of group names***',
'user_groups': '***semicolon-separated list of group names***'
}
ELASTICSEARCH_ENABLED = True
# Each ElasticSearch index created will be prefixed with this string.
ELASTICSEARCH_PREFIX = 'editorsnotes'
# As defined in pyelasticsearch, ELASTICSEARCH_URLS should be:
#
# A URL or iterable of URLs of ES nodes. These are full URLs with port numbers,
# like ``http://elasticsearch.example.com:9200``.
#
ELASTICSEARCH_URLS = 'http://127.0.0.1:9200'
# The base URL for your site, with protocol, hostname, and port (if not 80 for
# http or 443 for https). This will be used to construct fully-qualified URLs
# from hyperlinks in the Elasticsearch index.
ELASTICSEARCH_SITE = 'http://127.0.0.1:8000'
SITE_URL = '127.0.0.1'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('My Name', '[email protected]'),
)
MANAGERS = ADMINS
#############
# Overrides #
#############
# TIME_ZONE = ''
# LANGUAGE_CODE = ''
# DATETIME_FORMAT = ''
# USE_L10N = True
# USE I18N = True
# Edit STORAGE_PATH to change where uploads, static files, and search indexes
# will be stored, or change each of the settings individually.
# STORAGE_PATH = ''
# MEDIA_ROOT = ''
# STATIC_ROOT = ''
# Point this to the Less CSS compiler if it is not on PATH
# LESSC_BINARY = ''
######################
# Optional variables #
######################
# Set the following variables to connect to an instance of Open Refine and
# enable clustering topics and documents.
GOOGLE_REFINE_HOST = '127.0.0.1'
GOOGLE_REFINE_PORT = '3333'
# Set the following to be able to write all Zotero data to a central library.
ZOTERO_API_KEY = ''
ZOTERO_LIBRARY = ''
# Define locally installed apps here
LOCAL_APPS = (
'editorsnotes_app',
'cendari',
'djadmin2',
'djadmin2.themes.djadmin2theme_default', # for the default theme
'rest_framework', # for the browsable API templates
'floppyforms', # For HTML5 form fields
'crispy_forms', # Required for the default theme's layout
# 'djangoChat',
)
ROOT_URLCONF = 'cendari.urls'
## CENDARI ADD
IIPSRV = 'http://localhost/fcgi-bin/iipsrv.fcgi'
RQ_QUEUES = {
'high': {
'USE_REDIS_CACHE': 'redis-cache',
},
'low': {
'USE_REDIS_CACHE': 'redis-cache',
},
## CENDARI ADD
#IIPSRV = 'http://cendari.saclay.inria.fr/fcgi-bin/iipsrv.fcgi'
#RQ_QUEUES = {
'default': {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
}
}
# for use with django development server only. DO NOT USE IN PRODUCTION
SENDFILE_BACKEND = 'sendfile.backends.development'
#
# "simple" backend that uses Django file objects to attempt to stream files
# from disk (note middleware may cause files to be loaded fully into memory)
#
#SENDFILE_BACKEND = 'sendfile.backends.simple'
#
# sets X-Sendfile header (as used by mod_xsendfile/apache and lighthttpd)
#SENDFILE_BACKEND = 'sendfile.backends.xsendfile'
#
# sets Location with 200 code to trigger internal redirect (daemon
# mode mod_wsgi only - see below)
#SENDFILE_BACKEND = 'sendfile.backends.mod_wsgi'
#
# sets X-Accel-Redirect header to trigger internal redirect to file
#SENDFILE_BACKEND = 'sendfile.backends.nginx'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
},
'editorsnotes': {
'handlers': ['console'],
'level': 'DEBUG',
},
'cendari': {
'handlers': ['console'],
'level': 'DEBUG',
},
'cendari.semantic': {
'handlers': ['console'],
'level': 'DEBUG',
},
'cendari.utils': {
'handlers': ['console'],
'level': 'DEBUG',
}
}
}
| agpl-3.0 | 3,544,613,883,434,611,700 | 24.375 | 95 | 0.606502 | false |
ldkge/visfx | spark/recommendations/points.py | 1 | 4041 | from pyspark import SparkContext, SparkConf, StorageLevel
from pyspark.mllib.linalg import Vectors
import numpy as np
import json
from numpy.linalg import svd
from elasticsearch_interface \
import get_es_rdd, save_es_rdd, get_currency_pair_dict
from utils import parse_range, parse_dates, modify_record
def create_proximity_matrix(rdd, w=[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0]]):
keys = []
feature_matrix = []
features = rdd.map(lambda (key, body): (key, [
w[0][0]*w[0][1]*body['pc_trade_count'],
w[0][0]*w[0][2]*body['pc_trade_duration'],
w[0][0]*w[0][3]*body['pc_net_pnl'],
w[1][0]*w[1][1]*body['p_trade_count'],
w[1][0]*w[1][2]*body['p_trade_duration'],
w[1][0]*w[1][3]*body['p_net_pnl'],
w[2][0]*w[2][1]*body['c_trade_count'],
w[2][0]*w[2][2]*body['c_trade_duration'],
w[2][0]*w[2][3]*body['c_net_pnl'],
w[3][0]*w[3][1]*body['l_trade_count'],
w[3][0]*w[3][2]*body['l_trade_duration'],
w[3][0]*w[3][3]*body['l_net_pnl']])).collect()
for line in features:
keys.append(line[0])
feature_matrix.append(line[1])
return keys, np.cov(np.array(feature_matrix))
def mds(p):
n = len(p)
P = np.square(p)
J = np.eye(n) - (1.0 / n) * np.ones((n, n))
B = (-1/2) * J.dot(P).dot(J)
U, s, _ = svd(B)
return np.dot(U.T[0:2].T, np.diag(s[0:2]))
def zip_keys(sc, mds, keys):
mds = sc.parallelize(mds.tolist()).zipWithIndex().map(lambda x: (x[1], x[0]))
keys = sc.parallelize(keys).zipWithIndex().map(lambda x: (x[1], x[0]))
return keys.join(mds) \
.map(lambda x: x[1])
start_date = '2015-05-01'
end_date = '2015-05-07'
if __name__ == '__main__':
conf = SparkConf().setAppName('Compute MDS')
sc = SparkContext(conf=conf)
query = json.dumps({
'query': {
'bool': {
'must': [
{ 'term': { 'start_date': start_date }},
{ 'term': { 'end_date': end_date }}
]
}
}
})
rdd, _ = get_es_rdd(sc, query=query, index='forex/feature') \
.randomSplit([1.0, 9.0], seed=0L)
ratings = get_es_rdd(sc, query=query, index='forex/rating')
weights = ratings.map(lambda (_, body): [
[body['weights']['trade_ratings'],
body['weights']['pc_trade_count'],
body['weights']['pc_trade_duration'],
body['weights']['pc_net_pnl']],
[body['weights']['provider_ratings'],
body['weights']['p_trade_count'],
body['weights']['p_trade_duration'],
body['weights']['p_net_pnl']],
[body['weights']['currency_ratings'],
body['weights']['c_trade_count'],
body['weights']['c_trade_duration'],
body['weights']['c_net_pnl']],
[body['weights']['country_ratings'],
body['weights']['l_trade_count'],
body['weights']['l_trade_duration'],
body['weights']['l_net_pnl']]]).top(1)[0]
k, p = create_proximity_matrix(rdd)
m = mds(p)
points = zip_keys(sc, m, k)
k, p = create_proximity_matrix(rdd, weights)
wm = mds(p)
weighted_points = zip_keys(sc, wm, k)
pp = sc.parallelize(points.join(weighted_points).collect())
pr = pp.join(ratings) \
.map(lambda (key, ((p, wp), body)):
(key, {
'point_id': key,
'provider_id': body['provider_id'],
'currency_pair': body['currency_pair'],
'transaction_type': body['transaction_type'],
'country': body['country'],
'start_date': body['start_date'],
'end_date': body['end_date'],
'rating': body['rating'],
'x': p[0],
'y': p[1],
'wx': wp[0],
'wy': wp[1]
}))
save_es_rdd(pr, 'forex/point', key='point_id')
| mit | 1,229,958,409,515,253,800 | 33.836207 | 81 | 0.497402 | false |
LS80/script.module.html2text | lib/html2text/__init__.py | 1 | 28791 | #!/usr/bin/env python
# coding: utf-8
"""html2text: Turn HTML into equivalent Markdown-structured text."""
from __future__ import division
import re
import cgi
try:
from textwrap import wrap
except ImportError: # pragma: no cover
pass
from html2text.compat import urlparse, HTMLParser
from html2text import config
from html2text.utils import (
name2cp,
unifiable_n,
google_text_emphasis,
google_fixed_width_font,
element_style,
hn,
google_has_height,
escape_md,
google_list_style,
list_numbering_start,
dumb_css_parser,
escape_md_section,
skipwrap
)
__version__ = (2015, 6, 21)
# TODO:
# Support decoded entities with UNIFIABLE.
class HTML2Text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl='', bodywidth=config.BODY_WIDTH):
"""
Input parameters:
out: possible custom replacement for self.outtextf (which
appends lines of text).
baseurl: base URL of the document we process
"""
HTMLParser.HTMLParser.__init__(self)
# Config options
self.split_next_td = False
self.td_count = 0
self.table_start = False
self.unicode_snob = config.UNICODE_SNOB # covered in cli
self.escape_snob = config.ESCAPE_SNOB # covered in cli
self.links_each_paragraph = config.LINKS_EACH_PARAGRAPH
self.body_width = bodywidth # covered in cli
self.skip_internal_links = config.SKIP_INTERNAL_LINKS # covered in cli
self.inline_links = config.INLINE_LINKS # covered in cli
self.protect_links = config.PROTECT_LINKS # covered in cli
self.google_list_indent = config.GOOGLE_LIST_INDENT # covered in cli
self.ignore_links = config.IGNORE_ANCHORS # covered in cli
self.ignore_images = config.IGNORE_IMAGES # covered in cli
self.images_to_alt = config.IMAGES_TO_ALT # covered in cli
self.images_with_size = config.IMAGES_WITH_SIZE # covered in cli
self.ignore_emphasis = config.IGNORE_EMPHASIS # covered in cli
self.bypass_tables = config.BYPASS_TABLES # covered in cli
self.google_doc = False # covered in cli
self.ul_item_mark = '*' # covered in cli
self.emphasis_mark = '_' # covered in cli
self.strong_mark = '**'
self.single_line_break = config.SINGLE_LINE_BREAK # covered in cli
self.use_automatic_links = config.USE_AUTOMATIC_LINKS # covered in cli
self.hide_strikethrough = False # covered in cli
self.mark_code = config.MARK_CODE
if out is None: # pragma: no cover
self.out = self.outtextf
else: # pragma: no cover
self.out = out
# empty list to store output characters before they are "joined"
self.outtextlist = []
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.maybe_automatic_link = None
self.empty_link = False
self.absolute_url_matcher = re.compile(r'^[a-zA-Z+]+://')
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
try:
del unifiable_n[name2cp('nbsp')]
except KeyError:
pass
config.UNIFIABLE['nbsp'] = ' _place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def handle(self, data):
self.feed(data)
self.feed("")
return self.optwrap(self.close())
def outtextf(self, s):
self.outtextlist.append(s)
if s:
self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
try:
nochr = unicode('')
except NameError:
nochr = str('')
self.pbr()
self.o('', 0, 'end')
outtext = nochr.join(self.outtextlist)
if self.unicode_snob:
try:
nbsp = unichr(name2cp('nbsp'))
except NameError:
nbsp = chr(name2cp('nbsp'))
else:
try:
nbsp = unichr(32)
except NameError:
nbsp = chr(32)
try:
outtext = outtext.replace(unicode(' _place_holder;'), nbsp)
except NameError:
outtext = outtext.replace(' _place_holder;', nbsp)
# Clear self.outtextlist to avoid memory leak of its content to
# the next handling.
self.outtextlist = []
return outtext
def handle_charref(self, c):
charref = self.charref(c)
if not self.code and not self.pre:
charref = cgi.escape(charref)
self.handle_data(charref, True)
def handle_entityref(self, c):
entityref = self.entityref(c)
if (not self.code and not self.pre
and entityref != ' _place_holder;'):
entityref = cgi.escape(entityref)
self.handle_data(entityref, True)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
"""
:type attrs: dict
:returns: The index of certain set of attributes (of a link) in the
self.a list. If the set of attributes is not found, returns None
:rtype: int
"""
if 'href' not in attrs: # pragma: no cover
return None
i = -1
for a in self.a:
i += 1
match = 0
if ('href' in a) and a['href'] == attrs['href']:
if ('title' in a) or ('title' in attrs):
if (('title' in a) and ('title' in attrs) and
a['title'] == attrs['title']):
match = True
else:
match = True
if match:
return i
def handle_emphasis(self, start, tag_style, parent_style):
"""
Handles various text emphases
"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in \
tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
# attrs is None for endtags
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
# first thing inside the anchor tag is another tag that produces some output
if (start and not self.maybe_automatic_link is None
and tag not in ['p', 'div', 'style', 'dl', 'dt']
and (tag != "img" or self.ignore_images)):
self.o("[")
self.maybe_automatic_link = None
self.empty_link = False
if self.google_doc:
# the attrs parameter is empty for a closing tag. in addition, we
# need the attributes of the parent nodes in order to get a
# complete style description for the current element. we assume
# that google docs export well formed html.
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag) * "#" + ' ')
else:
self.inheader = False
return # prevent redundant emphasis marks on headers
if tag in ['p', 'div']:
if self.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.p()
if tag == "br" and start:
self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start:
self.quiet += 1
else:
self.quiet -= 1
if tag == "style":
if start:
self.style += 1
else:
self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p()
self.o('> ', 0, 1)
self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u'] and not self.ignore_emphasis:
self.o(self.emphasis_mark)
if tag in ['strong', 'b'] and not self.ignore_emphasis:
self.o(self.strong_mark)
if tag in ['del', 'strike', 's']:
if start:
self.o("<" + tag + ">")
else:
self.o("</" + tag + ">")
if self.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag in ["code", "tt"] and not self.pre:
self.o('`') # TODO: `` `this` ``
self.code = not self.code
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if ('title' in attrs):
self.abbr_title = attrs['title']
else:
if self.abbr_title is not None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not self.ignore_links:
if start:
if ('href' in attrs) and \
(attrs['href'] is not None) and \
not (self.skip_internal_links and
attrs['href'].startswith('#')):
self.astack.append(attrs)
self.maybe_automatic_link = attrs['href']
self.empty_link = True
if self.protect_links:
attrs['href'] = '<'+attrs['href']+'>'
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link and not self.empty_link:
self.maybe_automatic_link = None
elif a:
if self.empty_link:
self.o("[")
self.empty_link = False
self.maybe_automatic_link = None
if self.inline_links:
try:
title = escape_md(a['title'])
except KeyError:
self.o("](" + escape_md(a['href']) + ")")
else:
self.o("](" + escape_md(a['href'])
+ ' "' + title + '" )')
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not self.ignore_images:
if 'src' in attrs:
if not self.images_to_alt:
attrs['href'] = attrs['src']
alt = attrs.get('alt') or ''
# If we have images_with_size, write raw html including width,
# height, and alt attributes
if self.images_with_size and \
("width" in attrs or "height" in attrs):
self.o("<img src='" + attrs["src"] + "' ")
if "width" in attrs:
self.o("width='" + attrs["width"] + "' ")
if "height" in attrs:
self.o("height='" + attrs["height"] + "' ")
if alt:
self.o("alt='" + alt + "' ")
self.o("/>")
return
# If we have a link to create, output the start
if not self.maybe_automatic_link is None:
href = self.maybe_automatic_link
if self.images_to_alt and escape_md(alt) == href and \
self.absolute_url_matcher.match(href):
self.o("<" + escape_md(alt) + ">")
self.empty_link = False
return
else:
self.o("[")
self.maybe_automatic_link = None
self.empty_link = False
# If we have images_to_alt, we discard the image itself,
# considering only the alt text.
if self.images_to_alt:
self.o(escape_md(alt))
else:
self.o("![" + escape_md(alt) + "]")
if self.inline_links:
href = attrs.get('href') or ''
self.o("(" + escape_md(href) + ")")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("[" + str(attrs['count']) + "]")
if tag == 'dl' and start:
self.p()
if tag == 'dt' and not start:
self.pbr()
if tag == 'dd' and start:
self.o(' ')
if tag == 'dd' and not start:
self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if self.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({
'name': list_style,
'num': numbering_start
})
else:
if self.list:
self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list:
li = self.list[-1]
else:
li = {'name': 'ul', 'num': 0}
if self.google_doc:
nest_count = self.google_nest_count(tag_style)
else:
nest_count = len(self.list)
# TODO: line up <ol><li>s > 9 correctly.
self.o(" " * nest_count)
if li['name'] == "ul":
self.o(self.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num']) + ". ")
self.start = 1
if tag in ["table", "tr", "td", "th"]:
if self.bypass_tables:
if start:
self.soft_br()
if tag in ["td", "th"]:
if start:
self.o('<{0}>\n\n'.format(tag))
else:
self.o('\n</{0}>'.format(tag))
else:
if start:
self.o('<{0}>'.format(tag))
else:
self.o('</{0}>'.format(tag))
else:
if tag == "table" and start:
self.table_start = True
if tag in ["td", "th"] and start:
if self.split_next_td:
self.o("| ")
self.split_next_td = True
if tag == "tr" and start:
self.td_count = 0
if tag == "tr" and not start:
self.split_next_td = False
self.soft_br()
if tag == "tr" and not start and self.table_start:
# Underline table header
self.o("|".join(["---"] * self.td_count))
self.soft_br()
self.table_start = False
if tag in ["td", "th"] and start:
self.td_count += 1
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
if self.mark_code:
self.out("\n[/code]")
self.p()
# TODO: Add docstring for these one letter functions
def pbr(self):
"Pretty print has a line break"
if self.p_p == 0:
self.p_p = 1
def p(self):
"Set pretty print to 1 or 2 lines"
self.p_p = 1 if self.single_line_break else 2
def soft_br(self):
"Soft breaks"
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
"""
Deal with indentation and whitespace
"""
if self.abbr_data is not None:
self.abbr_data += data
if not self.quiet:
if self.google_doc:
# prevent white space immediately after 'begin emphasis'
# marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
# This is a very dangerous call ... it could mess up
# all handling of when not handled properly
# (see entityref)
data = re.sub(r'\s+', r' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force:
return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
if not data.startswith("\n"): # <pre>stuff...
data = "\n" + data
if self.mark_code:
self.out("\n[code]")
self.p_p = 0
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote:
bq += " "
if self.pre:
if not self.list:
bq += " "
#else: list content is already partially indented
for i in range(len(self.list)):
bq += " "
data = data.replace("\n", "\n" + bq)
if self.startpre:
self.startpre = 0
if self.list:
# use existing initial indentation
data = data.lstrip("\n")
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle + '\n' + bq) * self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL:
self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and self.links_each_paragraph)
or force == "end"):
if force == "end":
self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" [" + str(link['count']) + "]: " +
urlparse.urljoin(self.baseurl, link['href']))
if 'title' in link:
self.out(" (" + link['title'] + ")")
self.out("\n")
else:
newa.append(link)
# Don't need an extra line when nothing was done.
if self.a != newa:
self.out("\n")
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data, entity_char=False):
if r'\/script>' in data:
self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
if not self.maybe_automatic_link is None:
href = self.maybe_automatic_link
if (href == data and self.absolute_url_matcher.match(href)
and self.use_automatic_links):
self.o("<" + data + ">")
self.empty_link = False
return
else:
self.o("[")
self.maybe_automatic_link = None
self.empty_link = False
if not self.code and not self.pre and not entity_char:
data = escape_md_section(data, snob=self.escape_snob)
self.o(data, 1)
def unknown_decl(self, data): # pragma: no cover
# TODO: what is this doing here?
pass
def charref(self, name):
if name[0] in ['x', 'X']:
c = int(name[1:], 16)
else:
c = int(name)
if not self.unicode_snob and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
try:
return unichr(c)
except NameError: # Python3
return chr(c)
except ValueError: # invalid unicode
return ''
def entityref(self, c):
if not self.unicode_snob and c in config.UNIFIABLE.keys():
return config.UNIFIABLE[c]
else:
try:
name2cp(c)
except KeyError:
return "&" + c + ';'
else:
if c == 'nbsp':
return config.UNIFIABLE[c]
else:
try:
return unichr(name2cp(c))
except NameError: # Python3
return chr(name2cp(c))
def replaceEntities(self, s):
s = s.group(1)
if s[0] == "#":
return self.charref(s[1:])
else:
return self.entityref(s)
def unescape(self, s):
return config.RE_UNESCAPE.sub(self.replaceEntities, s)
def google_nest_count(self, style):
"""
Calculate the nesting count of google doc lists
:type style: dict
:rtype: int
"""
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) \
// self.google_list_indent
return nest_count
def optwrap(self, text):
"""
Wrap all paragraphs in the provided text.
:type text: str
:rtype: str
"""
if not self.body_width:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if not skipwrap(para):
result += "\n".join(wrap(para, self.body_width))
if para.endswith(' '):
result += " \n"
newlines = 1
else:
result += "\n\n"
newlines = 2
else:
# Warning for the tempted!!!
# Be aware that obvious replacement of this with
# line.isspace()
# DOES NOT work! Explanations are welcome.
if not config.RE_SPACE.match(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
def html2text(html, baseurl='', bodywidth=config.BODY_WIDTH):
h = HTML2Text(baseurl=baseurl, bodywidth=bodywidth)
return h.handle(html)
def unescape(s, unicode_snob=False):
h = HTML2Text()
h.unicode_snob = unicode_snob
return h.unescape(s)
if __name__ == "__main__":
from html2text.cli import main
main()
| gpl-3.0 | -8,062,942,466,037,539,000 | 33.356802 | 84 | 0.448751 | false |
spencerkclark/aospy-obj-lib | aospy_user/calcs/interpolation.py | 1 | 3800 | """Interpolation and zero finding routines. These are all
dimension agnostic.
"""
import xray
import numpy as np
from scipy.interpolate import interp1d
from collections import OrderedDict
def sorted_coords(da):
"""Returns a list of coordinate names based on axis order."""
order = {coord:
da.reset_coords(drop=True).get_axis_num(coord)
for coord in da.reset_coords(drop=True).coords}
return zip(*sorted(order.items(), key=lambda (k, v): v))[0]
def replace_dim(da, dim, values):
"""Replaces a dimension in a coords OrderedDict"""
# Sort the coordinates by axis number. Then fill
# OrderedDict.
sorted_coords_ = sorted_coords(da)
coords = da.reset_coords(drop=True).coords
new_coords = OrderedDict()
for coord in sorted_coords_:
if dim != coord:
new_coords[coord] = coords[coord]
else:
new_coords[dim] = values
return new_coords
def drop_dim(da, dim):
"""Drops the specified dimension from the coord. dict"""
sorted_coords_ = sorted_coords(da)
coords = da.coords
new_coords = OrderedDict()
for coord in sorted_coords_:
if dim != coord:
new_coords[coord] = coords[coord]
return new_coords
def interp1d_pt_xray(da, dim, value):
"""Interpolates the values of a DataArray at a point
along the specified dimension.
Parameters
----------
da : DataArray
data to interpolate
dim : str
dimension name to interpolate along
value : float
point to interpolate to
Returns
-------
slice : DataArray
interpolated data
"""
function = interp1d(da[dim].values, da.values,
axis=da.get_axis_num(dim))
values_interp = function(value)
da_interp = xray.DataArray(values_interp,
coords=replace_dim(da,
dim, value))
return da_interp
def interp1d_xray(da, dim, npoints=18000, kind='linear'):
"""Interpolates the DataArray to finer resolution
Parameters
----------
da : DataArray
data to interpolate
dim : str
dimension name to interpolate along
npoints : int
number of points to expand dimension to
kind : str
type of interpolation to perform (see scipy documentation)
defaults to linear
Returns
-------
interp : DataArray
interpolated DataArray
"""
function = interp1d(da[dim].values, da.values,
axis=da.get_axis_num(dim), kind=kind)
coord_interp = np.linspace(da[dim][0], da[dim][-1],
npoints, endpoint=True)
values_interp = function(coord_interp)
da_interp = xray.DataArray(values_interp,
coords=replace_dim(da,
dim, coord_interp))
return da_interp
def zeros_xray(da, dim, npoints=18000):
"""Finds zeros of a DataArray along an axis at the specified
resolution. A higher value for npoints means higher precision.
The defualt npoints gives linearly-interpolated
zeros to within 0.01 degrees latitude.
Parameters
----------
da : DataArray
data to find zeros of
dim : str
name of dimension in xray
npoints : int
(optional) number of points for interpolation
Returns
-------
zeros : DataArray
zeros of the DataArray along the dimension specified
"""
if npoints:
da_interp = interp1d_xray(da, dim, npoints)
else:
da_interp = da
signs = np.sign(da_interp)
mask = signs.diff(dim, label='lower') != 0
zeros = mask * mask[dim]
zeros = zeros.where(zeros != 0)
return zeros
| gpl-3.0 | -477,074,915,362,125,060 | 27.787879 | 69 | 0.596579 | false |
ooovector/qtlab_replacement | qubit_calibrations/calibrated_readout.py | 1 | 14122 | from .readout_pulse import *
from .. import readout_classifier
from . import excitation_pulse
from .. import single_shot_readout
import numpy as np
import traceback
def get_confusion_matrix(device, qubit_ids, pause_length=0, recalibrate=True, force_recalibration=False):
qubit_readout_pulse, readout_device = get_calibrated_measurer(device, qubit_ids)
excitation_pulses = {qubit_id: excitation_pulse.get_excitation_pulse(device, qubit_id, rotation_angle=np.pi) for
qubit_id in qubit_ids}
references = {('excitation_pulse', qubit_id): pulse.id for qubit_id, pulse in excitation_pulses.items()}
references['readout_pulse'] = qubit_readout_pulse.id
metadata = {'qubit_ids': qubit_readout_pulse.metadata['qubit_ids'], 'pause_length': str(pause_length)}
try:
assert not force_recalibration
confusion_matrix = device.exdir_db.select_measurement(measurement_type='confusion_matrix',
references_that=references, metadata=metadata)
except:
if not recalibrate:
raise
confusion_matrix = calibrate_preparation_and_readout_confusion(device, qubit_readout_pulse, readout_device,
pause_length)
return qubit_readout_pulse, readout_device, confusion_matrix
def calibrate_preparation_and_readout_confusion(device, qubit_readout_pulse, readout_device, pause_length=0):
qubit_ids = qubit_readout_pulse.metadata['qubit_ids'].split(',')
target_qubit_states = [0] * len(qubit_ids)
excitation_pulses = {qubit_id: excitation_pulse.get_excitation_pulse(device, qubit_id, rotation_angle=np.pi) for
qubit_id in qubit_ids}
references = {('excitation_pulse', qubit_id): pulse.id for qubit_id, pulse in excitation_pulses.items()}
references['readout_pulse'] = qubit_readout_pulse.id
def set_target_state(state):
excitation_sequence = []
for _id, qubit_id in enumerate(qubit_ids):
qubit_state = (1 << _id) & state
if qubit_state:
excitation_sequence.extend(excitation_pulses[qubit_id].get_pulse_sequence(0))
device.pg.set_seq(excitation_sequence + [
device.pg.pmulti(pause_length)] + device.trigger_readout_seq + qubit_readout_pulse.get_pulse_sequence())
return device.sweeper.sweep(readout_device,
(np.arange(2 ** len(qubit_ids)), set_target_state, 'Target state', ''),
measurement_type='confusion_matrix',
references=references,
metadata={'qubit_ids': qubit_readout_pulse.metadata['qubit_ids'],
'pause_length': str(pause_length)})
def get_calibrated_measurer(device, qubit_ids, qubit_readout_pulse=None, recalibrate=True, force_recalibration=False):
from .readout_pulse import get_multi_qubit_readout_pulse
if qubit_readout_pulse is None:
qubit_readout_pulse = get_multi_qubit_readout_pulse(device, qubit_ids)
features = []
thresholds = []
references = {'readout_pulse': qubit_readout_pulse.id,
'delay_calibration': device.modem.delay_measurement.id}
for qubit_id in qubit_ids:
metadata = {'qubit_id': qubit_id}
try:
if force_recalibration:
raise ValueError('Forcing recalibration')
measurement = device.exdir_db.select_measurement(measurement_type='readout_calibration', metadata=metadata,
references_that=references)
except Exception as e:
print(traceback.print_exc())
if not recalibrate:
raise
measurement = calibrate_readout(device, qubit_id, qubit_readout_pulse)
features.append(measurement.datasets['feature'].data)
thresholds.append(measurement.datasets['threshold'].data.ravel()[0])
readout_device = device.set_adc_features_and_thresholds(features, thresholds, disable_rest=True)
nums = int(device.get_sample_global(name='calibrated_readout_nums'))
readout_device.set_nums(nums)
readout_device.set_nop(int(device.get_sample_global('readout_adc_points')))
return qubit_readout_pulse, readout_device # , features, thresholds
def calibrate_readout(device, qubit_id, qubit_readout_pulse, transition='01', ignore_other_qubits=None):
adc, mnames = device.setup_adc_reducer_iq(qubit_id, raw=True)
nums = int(device.get_qubit_constant(qubit_id=qubit_id, name='readout_background_nums'))
old_nums = adc.get_nums()
adc.set_nop(int(device.get_sample_global('readout_adc_points')))
if ignore_other_qubits is None:
ignore_other_qubits = bool(device.get_qubit_constant(qubit_id=qubit_id, name='readout_calibration_ignore_other_qubits'))
other_qubit_pulse_sequence = []
references = {}
if not ignore_other_qubits:
for other_qubit_id in device.get_qubit_list():
if other_qubit_id != qubit_id:
half_excited_pulse = excitation_pulse.get_excitation_pulse(device, other_qubit_id,
rotation_angle=np.pi / 2.)
references[('other_qubit_pulse', other_qubit_id)] = half_excited_pulse.id
other_qubit_pulse_sequence.extend(half_excited_pulse.get_pulse_sequence(0))
qubit_excitation_pulse = excitation_pulse.get_excitation_pulse(device, qubit_id, rotation_angle=np.pi)
metadata = {'qubit_id': qubit_id,
'averages': nums,
'ignore_other_qubits': ignore_other_qubits}
references.update({'readout_pulse': qubit_readout_pulse.id,
'excitation_pulse': qubit_excitation_pulse.id,
'delay_calibration': device.modem.delay_measurement.id})
classifier = single_shot_readout.single_shot_readout(adc=adc,
prepare_seqs=[other_qubit_pulse_sequence,
other_qubit_pulse_sequence + qubit_excitation_pulse.get_pulse_sequence(
0)],
ro_seq=device.trigger_readout_seq + qubit_readout_pulse.get_pulse_sequence(),
pulse_generator=device.pg,
ro_delay_seq=None,
_readout_classifier=readout_classifier.binary_linear_classifier(),
adc_measurement_name='Voltage')
classifier.readout_classifier.cov_mode = 'equal'
try:
adc.set_nums(nums)
measurement = device.sweeper.sweep(classifier,
measurement_type='readout_calibration',
metadata=metadata,
references=references)
except:
raise
finally:
adc.set_nums(old_nums)
return measurement
# classifier.repeat_samples = 2
def get_qubit_readout_pulse_from_fidelity_scan(device, fidelity_scan):
from .readout_pulse import qubit_readout_pulse
references = {'fidelity_scan': fidelity_scan.id}
if 'channel_calibration' in fidelity_scan.metadata:
references['channel_calibration'] = fidelity_scan.references['readout_channel_calibration']
fidelity_dataset = fidelity_scan.datasets['fidelity']
max_fidelity = np.unravel_index(np.argmax(fidelity_dataset.data.ravel()), fidelity_dataset.data.shape)
pulse_parameters = {}
for p, v_id in zip(fidelity_dataset.parameters, max_fidelity):
pulse_parameters[p.name] = p.values[v_id]
# compression_1db = float(passthrough_measurement.metadata['compression_1db'])
# additional_noise_appears = float(passthrough_measurement.metadata['additional_noise_appears'])
# if np.isfinite(compression_1db):
# calibration_type = 'compression_1db'
# amplitude = compression_1db
# elif np.isfinite(additional_noise_appears):
# calibration_type = 'additional_noise_appears'
# amplitude = additional_noise_appears
# else:
# raise Exception('Compession_1db and additional_noise_appears not found on passthourgh scan!')
readout_channel = fidelity_scan.metadata['channel']
# length = float(fidelity_scan.metadata['length'])
metadata = {'pulse_type': 'rect',
'channel': readout_channel,
'qubit_id': fidelity_scan.metadata['qubit_id'],
# 'amplitude':amplitude,
'calibration_type': 'fidelity_scan',
# 'length': passthrough_measurement.metadata['length']
}
metadata.update(pulse_parameters)
length = float(metadata['length'])
amplitude = float(metadata['amplitude'])
try:
readout_pulse = qubit_readout_pulse(
device.exdir_db.select_measurement(measurement_type='qubit_readout_pulse', references_that=references,
metadata=metadata))
except Exception as e:
print(type(e), str(e))
readout_pulse = qubit_readout_pulse(references=references, metadata=metadata,
sample_name=device.exdir_db.sample_name)
device.exdir_db.save_measurement(readout_pulse)
readout_pulse.pulse_sequence = [device.pg.p(readout_channel, length, device.pg.rect, amplitude)]
return readout_pulse
def readout_fidelity_scan(device, qubit_id, readout_pulse_lengths, readout_pulse_amplitudes,
recalibrate_excitation=True, ignore_other_qubits=False):
adc, mnames = device.setup_adc_reducer_iq(qubit_id, raw=True)
nums = int(device.get_qubit_constant(qubit_id=qubit_id, name='readout_background_nums'))
adc.set_nop(int(device.get_sample_global('readout_adc_points')))
old_nums = adc.get_nums()
readout_channel = [i for i in device.get_qubit_readout_channel_list(qubit_id).keys()][0]
other_qubit_pulse_sequence = []
references = {'frequency_controls': device.get_frequency_control_measurement_id(qubit_id=qubit_id)}
if hasattr(device.awg_channels[readout_channel], 'get_calibration_measurement'):
references['channel_calibration'] = device.awg_channels[readout_channel].get_calibration_measurement()
if not ignore_other_qubits:
for other_qubit_id in device.get_qubit_list():
if other_qubit_id != qubit_id:
half_excited_pulse = excitation_pulse.get_excitation_pulse(device, other_qubit_id,
rotation_angle=np.pi / 2.,
recalibrate=recalibrate_excitation)
references[('other_qubit_pulse', other_qubit_id)] = half_excited_pulse.id
other_qubit_pulse_sequence.extend(half_excited_pulse.get_pulse_sequence(0))
qubit_excitation_pulse = excitation_pulse.get_excitation_pulse(device, qubit_id, rotation_angle=np.pi,
recalibrate=recalibrate_excitation)
metadata = {'qubit_id': qubit_id,
'averages': nums,
'channel': readout_channel,
'ignore_other_qubits': ignore_other_qubits}
# print ('len(readout_pulse_lengths): ', len(readout_pulse_lengths))
if len(readout_pulse_lengths) == 1:
metadata['length'] = str(readout_pulse_lengths[0])
references.update({'excitation_pulse': qubit_excitation_pulse.id,
'delay_calibration': device.modem.delay_measurement.id})
classifier = single_shot_readout.single_shot_readout(adc=adc,
prepare_seqs=[other_qubit_pulse_sequence,
other_qubit_pulse_sequence +
qubit_excitation_pulse.get_pulse_sequence(0)],
ro_seq=device.trigger_readout_seq,
pulse_generator=device.pg,
ro_delay_seq=None,
_readout_classifier=readout_classifier.binary_linear_classifier(),
adc_measurement_name='Voltage')
classifier.readout_classifier.cov_mode = 'equal'
# setters for sweep
readout_amplitude = 0
readout_length = 0
def set_readout_amplitude(x):
nonlocal readout_amplitude
readout_amplitude = x
classifier.ro_seq = device.trigger_readout_seq + [
device.pg.p(readout_channel, readout_length, device.pg.rect, readout_amplitude)]
def set_readout_length(x):
nonlocal readout_length
readout_length = x
classifier.ro_seq = device.trigger_readout_seq + [
device.pg.p(readout_channel, readout_length, device.pg.rect, readout_amplitude)]
try:
adc.set_nums(nums)
measurement = device.sweeper.sweep(classifier,
(readout_pulse_lengths, set_readout_length, 'length', 's'),
(readout_pulse_amplitudes, set_readout_amplitude, 'amplitude', ''),
measurement_type='readout_fidelity_scan',
metadata=metadata,
references=references)
except:
raise
finally:
adc.set_nums(old_nums)
return measurement
# classifier.repeat_samples = 2
| gpl-3.0 | -5,617,561,086,539,546,000 | 52.090226 | 142 | 0.585682 | false |
ktsamis/repose | repose/target/__init__.py | 1 | 4025 |
from logging import getLogger
from qamlib.utils import timestamp
from ..connection import Connection, CommandTimeout
from .parsers.product import parse_system
from .parsers.repository import parse_repositories
from ..messages import ConnectingTargetFailedMessage
from ..types.repositories import Repositories
logger = getLogger("repose.target")
class Target(object):
def __init__(self, hostname, port, username, connector=Connection):
# TODO: timeout handling ?
self.port = port
self.hostname = hostname
self.username = username
self.products = None
self.raw_repos = None
self.repos = None
self.connector = connector
self.is_connected = False
self.connection = self.connector(self.hostname, self.username, self.port)
self.out = []
def __repr__(self):
return "<{} object {}@{}:{} - connected: {}>".format(
self.__class__.__name__,
self.username,
self.hostname,
self.port,
self.is_connected,
)
def connect(self):
if not self.is_connected:
logger.info("Connecting to {}:{}".format(self.hostname, self.port))
try:
self.connection.connect()
except BaseException as e:
logger.critical(
ConnectingTargetFailedMessage(self.hostname, self.port, e)
)
else:
self.is_connected = True
return self
def read_products(self):
if not self.is_connected:
self.connect()
self.products = parse_system(self.connection)
def close(self):
self.connection.close()
self.is_connected = False
def __bool__(self):
return self.is_connected
def run(self, command, lock=None):
logger.debug("run {} on {}:{}".format(command, self.hostname, self.port))
time_before = timestamp()
try:
stdout, stderr, exitcode = self.connection.run(command, lock)
except CommandTimeout:
logger.critical('{}: command "{}" timed out'.format(self.hostname, command))
exitcode = -1
except AssertionError:
logger.debug("zombie command terminated", exc_info=True)
return
except Exception as e:
# failed to run command
logger.error(
'{}: failed to run command "{}"'.format(self.hostname, command)
)
logger.debug("exception {}".format(e), exc_info=True)
exitcode = -1
runtime = int(timestamp()) - int(time_before)
self.out.append([command, stdout, stderr, exitcode, runtime])
return (stdout, stderr, exitcode)
def parse_repos(self):
if not self.products:
self.read_products()
if not self.raw_repos:
self.read_repos()
self.repos = Repositories(self.raw_repos, self.products.arch())
def read_repos(self):
if self.is_connected:
stdout, stderr, exitcode = self.run("zypper -x lr")
if exitcode in (0, 106, 6):
self.raw_repos = parse_repositories(stdout)
else:
logger.error(
"Can't parse repositories on {}, zypper returned {} exitcode".format(
self.hostname, exitcode
)
)
logger.debug("output:\n {}".format(stderr))
raise ValueError(
"Can't read repositories on {}:{}".format(self.hostname, self.port)
)
else:
logger.debug("Host {}:{} not connected".format(self.hostname, self.port))
def report_products(self, sink):
return sink(self.hostname, self.port, self.products)
def report_products_yaml(self, sink):
return sink(self.hostname, self.products)
def report_repos(self, sink):
return sink(self.hostname, self.port, self.raw_repos)
| gpl-3.0 | 8,814,799,234,254,061,000 | 31.991803 | 89 | 0.570435 | false |
kwTheLinuxGuy/frostbyte | DecFunction.py | 1 | 4634 | #
# === F R O S T B Y T E === ====Decryption Alpha Module====
# By Keith Meifert (2017)
# Licensed under the Creative Commons Attribution 4.0
# If you make a fair amount of money using my code commercially, please consider donating.
#
import random
def frostbyteDec(key,arrayinput):
arrayinput = arrayinput.strip("[""]")
arrayinput = arrayinput.replace(" ","")
blockGroup = 0
ltable = []
fctable = []
charProd = 1
blocksize = 4
block = [[[0 for k in range(blocksize)] for j in range(blocksize)] for i in range(blocksize)]
ctable = arrayinput.split(",")
lookCtr = 0
while lookCtr < len(key):
charProd = charProd * ord(key[lookCtr])
lookCtr = lookCtr + 1
random.seed(charProd)
ltable = random.sample(range(0,256), 256)
while blockGroup < len(ctable) / 64:
x = 0
y = 0
z = 0
g = 0
for g in range(64):
block[x][y][z] = ctable[(blockGroup * 64 + g)]
x = x + 1
if x > 3:
x = 0
y = y + 1
if y > 3:
y = 0
z = z + 1
shufflevals = []
random.seed(charProd)
for h in range(4):
nextRnd = random.randint(0, 32)
charProd = charProd * nextRnd
shufflevals.append(nextRnd)
#========================================================== Z ==========
shx = 0
shy = 0
bltk = [0,0,0,0]
ycntr = 0
zcntr = 0
for zcntr in range(4):
for ycntr in range(4):
g = 0
for g in range(shufflevals[shx]):
bltk = (block[shx][shy][0],block[shx][shy][1],block[shx][shy][2],block[shx][shy][3])
block[shx][shy][0] = bltk[3]
block[shx][shy][1] = bltk[0]
block[shx][shy][2] = bltk[1]
block[shx][shy][3] = bltk[2]
shx = shx + 1
shy = shy + 1
shx = 0
#========================================================== Y ==========
shx = 0
shz = 0
bltk = [0,0,0,0]
ycntr = 0
zcntr = 0
for zcntr in range(4):
for ycntr in range(4):
g = 0
for g in range(shufflevals[shz]):
bltk = (block[shx][0][shz],block[shx][1][shz],block[shx][2][shz],block[shx][3][shz])
block[shx][0][shz] = bltk[3]
block[shx][1][shz] = bltk[0]
block[shx][2][shz] = bltk[1]
block[shx][3][shz] = bltk[2]
shx = shx + 1
shz = shz + 1
shx = 0
#========================================================== X ==========
shy = 0
shz = 0
bltk = [0,0,0,0]
ycntr = 0
zcntr = 0
for zcntr in range(4):
for ycntr in range(4):
g = 0
for g in range(shufflevals[shy]):
bltk = (block[0][shy][shz],block[1][shy][shz],block[2][shy][shz],block[3][shy][shz])
block[0][shy][shz] = bltk[3]
block[1][shy][shz] = bltk[0]
block[2][shy][shz] = bltk[1]
block[3][shy][shz] = bltk[2]
shy = shy + 1
shz = shz + 1
shy = 0
x = 0
y = 0
z = 0
g = 0
for g in range(64):
fctable.append(block[x][y][z])
x = x + 1
if x > 3:
x = 0
y = y + 1
if y > 3:
y = 0
z = z + 1
blockGroup = blockGroup + 1
lookCtr = 0
finaldec = []
while lookCtr < len(fctable):
swapval = ltable.index(int(fctable[lookCtr]))
finaldec.append(swapval)
lookCtr = lookCtr + 1
lookCtr = 0
while lookCtr < len(finaldec):
swapval = chr(finaldec[lookCtr])
finaldec[lookCtr] = swapval
lookCtr = lookCtr + 1
print("".join(finaldec))
frostbyteDec("secure","[5, 164, 164, 164, 152, 164, 164, 164, 160, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 152, 164, 164, 164, 160, 164, 164, 164, 164, 164, 5, 164, 164, 164, 164, 164, 164, 164, 160, 164, 164, 164, 164, 164, 164, 164, 164, 5, 164, 164, 164, 152, 164, 164, 164, 164, 164, 164, 164, 164, 164]")
| mit | -8,311,332,502,046,905,000 | 34.203125 | 339 | 0.408934 | false |
mcs07/ChemDataExtractor | chemdataextractor/scrape/csstranslator.py | 1 | 4911 | # -*- coding: utf-8 -*-
"""
chemdataextractor.scrape.csstranslator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Extend cssselect to improve handling of pseudo-elements.
This is derived from csstranslator.py in the Scrapy project. The original file is available at:
https://github.com/scrapy/scrapy/blob/master/scrapy/selector/csstranslator.py
The original file was released under the BSD license:
Copyright (c) Scrapy developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Scrapy nor the names of its contributors may be used
to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from cssselect import GenericTranslator, HTMLTranslator
from cssselect.xpath import _unicode_safe_getattr, XPathExpr, ExpressionError
from cssselect.parser import FunctionalPseudoElement
class CdeXPathExpr(XPathExpr):
textnode = False
attribute = None
@classmethod
def from_xpath(cls, xpath, textnode=False, attribute=None):
x = cls(path=xpath.path, element=xpath.element, condition=xpath.condition)
x.textnode = textnode
x.attribute = attribute
return x
def __str__(self):
path = super(CdeXPathExpr, self).__str__()
if self.textnode:
if path == '*':
path = 'text()'
elif path.endswith('::*/*'):
path = path[:-3] + 'text()'
else:
path += '/text()'
if self.attribute is not None:
if path.endswith('::*/*'):
path = path[:-2]
path += '/@%s' % self.attribute
return path
def join(self, combiner, other):
super(CdeXPathExpr, self).join(combiner, other)
self.textnode = other.textnode
self.attribute = other.attribute
return self
class TranslatorMixin(object):
def xpath_element(self, selector):
xpath = super(TranslatorMixin, self).xpath_element(selector)
return CdeXPathExpr.from_xpath(xpath)
def xpath_pseudo_element(self, xpath, pseudo_element):
if isinstance(pseudo_element, FunctionalPseudoElement):
method = 'xpath_%s_functional_pseudo_element' % (pseudo_element.name.replace('-', '_'))
method = _unicode_safe_getattr(self, method, None)
if not method:
raise ExpressionError("The functional pseudo-element ::%s() is unknown" % pseudo_element.name)
xpath = method(xpath, pseudo_element)
else:
method = 'xpath_%s_simple_pseudo_element' % (pseudo_element.replace('-', '_'))
method = _unicode_safe_getattr(self, method, None)
if not method:
raise ExpressionError("The pseudo-element ::%s is unknown" % pseudo_element)
xpath = method(xpath)
return xpath
def xpath_attr_functional_pseudo_element(self, xpath, function):
if function.argument_types() not in (['STRING'], ['IDENT']):
raise ExpressionError("Expected a single string or ident for ::attr(), got %r" % function.arguments)
return CdeXPathExpr.from_xpath(xpath, attribute=function.arguments[0].value)
def xpath_text_simple_pseudo_element(self, xpath):
"""Support selecting text nodes using ::text pseudo-element"""
return CdeXPathExpr.from_xpath(xpath, textnode=True)
class CssXmlTranslator(TranslatorMixin, GenericTranslator):
pass
class CssHTMLTranslator(TranslatorMixin, HTMLTranslator):
pass
| mit | -7,836,038,709,914,762,000 | 38.926829 | 112 | 0.687233 | false |
CommonsDev/dataserver | projects/migrations/0001_initial.py | 1 | 2013 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'projects_project', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('autoslug.fields.AutoSlugField')(unique=True, max_length=50, populate_from='title', unique_with=())),
('baseline', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('begin_date', self.gf('django.db.models.fields.DateField')()),
('end_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
))
db.send_create_signal(u'projects', ['Project'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table(u'projects_project')
models = {
u'projects.project': {
'Meta': {'object_name': 'Project'},
'baseline': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'begin_date': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'title'", 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['projects'] | agpl-3.0 | 6,827,235,984,690,537,000 | 46.952381 | 147 | 0.58768 | false |
att-comdev/drydock | drydock_provisioner/ingester/plugins/yaml.py | 1 | 24311 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This data ingester will consume YAML site topology documents."""
import yaml
import logging
import base64
import jsonschema
import os
import pkg_resources
import drydock_provisioner.objects.fields as hd_fields
from drydock_provisioner import error as errors
from drydock_provisioner import objects
from drydock_provisioner.ingester.plugins import IngesterPlugin
class YamlIngester(IngesterPlugin):
def __init__(self):
super().__init__()
self.logger = logging.getLogger('drydock.ingester.yaml')
self.load_schemas()
def get_name(self):
return "yaml"
def ingest_data(self, **kwargs):
"""Parse and save design data.
:param filenames: Array of absolute path to the YAML files to ingest
:param content: String of valid YAML
returns a tuple of a status response and a list of parsed objects from drydock_provisioner.objects
"""
if 'content' in kwargs:
parse_status, models = self.parse_docs(kwargs.get('content'))
else:
raise ValueError('Missing parameter "content"')
return parse_status, models
def parse_docs(self, doc_blob):
"""Translate a YAML string into the internal Drydock model.
Returns a tuple of a objects.TaskStatus instance to summarize all
document processing and a list of models yields by successful processing
:param doc_blob: bytes representing a utf-8 encoded YAML string
"""
models = []
yaml_string = doc_blob.decode()
self.logger.debug("yamlingester:parse_docs - Parsing YAML string.")
try:
parsed_data = yaml.safe_load_all(yaml_string)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
raise errors.IngesterError(
"Error parsing YAML at (l:%s, c:%s): %s" %
(mark.line + 1, mark.column + 1, err))
else:
raise errors.IngesterError("Error parsing YAML: %s" % (err))
# tracking processing status to provide a complete summary of issues
ps = objects.TaskStatus()
ps.set_status(hd_fields.ValidationResult.Success)
for d in parsed_data:
api = d.get('apiVersion', '')
if api.startswith('drydock/'):
try:
model = self.process_drydock_document(d)
ps.add_status_msg(
msg="Successfully processed Drydock document type %s."
% d.get('kind'),
error=False,
ctx_type='document',
ctx=model.get_id())
models.append(model)
except errors.IngesterError as ie:
msg = "Error processing document: %s" % str(ie)
self.logger.warning(msg)
if d.get('metadata', {}).get('name', None) is not None:
ctx = d.get('metadata').get('name')
else:
ctx = 'Unknown'
ps.add_status_msg(
msg=msg, error=True, ctx_type='document', ctx=ctx)
ps.set_status(hd_fields.ValidationResult.Failure)
except Exception as ex:
msg = "Unexpected error processing document: %s" % str(ex)
self.logger.error(msg, exc_info=True)
if d.get('metadata', {}).get('name', None) is not None:
ctx = d.get('metadata').get('name')
else:
ctx = 'Unknown'
ps.add_status_msg(
msg=msg, error=True, ctx_type='document', ctx=ctx)
ps.set_status(hd_fields.ValidationResult.Failure)
elif api.startswith('promenade/'):
(foo, api_version) = api.split('/')
if api_version == 'v1':
kind = d.get('kind')
metadata = d.get('metadata', {})
target = metadata.get('target', 'all')
name = metadata.get('name', None)
model = objects.PromenadeConfig(
target=target,
name=name,
kind=kind,
document=base64.b64encode(
bytearray(yaml.dump(d),
encoding='utf-8')).decode('ascii'))
ps.add_status_msg(
msg="Successfully processed Promenade document.",
error=False,
ctx_type='document',
ctx=name)
models.append(model)
return (ps, models)
def process_drydock_document(self, doc):
"""Process a parsed YAML document.
:param doc: The dictionary from parsing the YAML document
"""
kind = doc.get('kind', '')
doc_processor = YamlIngester.v1_doc_handlers.get(kind, None)
if doc_processor is None:
raise errors.IngesterError("Invalid document Kind %s" % kind)
metadata = doc.get('metadata', {})
doc_name = metadata.get('name')
return doc_processor(self, doc_name, doc.get('spec', {}))
def validate_drydock_document(self, doc):
"""Validate a parsed document via jsonschema.
If a schema for a document Kind is not available, the document is
considered valid. Schema is chosen by the doc['kind'] field.
Returns a empty list for valid documents, otherwise returns a list
of all found errors
:param doc: dictionary of the parsed document.
"""
doc_kind = doc.get('kind')
if doc_kind in self.v1_doc_schemas:
validator = jsonschema.Draft4Validator(
self.v1_doc_schemas.get(doc_kind))
errors_found = []
for error in validator.iter_errors(doc):
errors_found.append(error.message)
return errors_found
else:
return []
def process_drydock_region(self, name, data):
"""Process the data/spec section of a Region document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Site()
# Need to add validation logic, we'll assume the input is
# valid for now
model.name = name
model.status = hd_fields.SiteStatus.Unknown
model.source = hd_fields.ModelSource.Designed
model.tag_definitions = objects.NodeTagDefinitionList()
tag_defs = data.get('tag_definitions', [])
for t in tag_defs:
tag_model = objects.NodeTagDefinition()
tag_model.tag = t.get('tag', '')
tag_model.type = t.get('definition_type', '')
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError('Unknown definition_type in '
'tag_definition instance: %s' %
(t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])
model.authorized_keys = [k for k in auth_keys]
return model
def process_drydock_rack(self, name, data):
"""Process the data/spec section of a Rack document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Rack()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.tor_switches = objects.TorSwitchList()
tors = data.get('tor_switches', {})
for k, v in tors.items():
tor = objects.TorSwitch()
tor.switch_name = k
tor.mgmt_ip = v.get('mgmt_ip', None)
tor.sdn_api_uri = v.get('sdn_api_url', None)
model.tor_switches.append(tor)
location = data.get('location', {})
model.location = dict()
for k, v in location.items():
model.location[k] = v
model.local_networks = [n for n in data.get('local_networks', [])]
return model
def process_drydock_networklink(self, name, data):
"""Process the data/spec section of a NetworkLink document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.NetworkLink()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
bonding = data.get('bonding', {})
model.bonding_mode = bonding.get(
'mode', hd_fields.NetworkLinkBondingMode.Disabled)
if model.bonding_mode in \
(hd_fields.NetworkLinkBondingMode.LACP,
hd_fields.NetworkLinkBondingMode.RoundRobin,
hd_fields.NetworkLinkBondingMode.Standby):
model.bonding_mon_rate = bonding.get('mon_rate', '100')
model.bonding_up_delay = bonding.get('up_delay', '200')
model.bonding_down_delay = bonding.get('down_delay', '200')
if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP:
model.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
model.bonding_peer_rate = bonding.get('peer_rate', 'fast')
model.mtu = data.get('mtu', None)
model.linkspeed = data.get('linkspeed', None)
trunking = data.get('trunking', {})
model.trunk_mode = trunking.get(
'mode', hd_fields.NetworkLinkTrunkingMode.Disabled)
model.native_network = trunking.get('default_network', None)
model.allowed_networks = data.get('allowed_networks', None)
return model
def process_drydock_network(self, name, data):
"""Process the data/spec section of a Network document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.Network()
model.source = hd_fields.ModelSource.Designed
model.name = name
model.metalabels = data.get('labels', {})
model.cidr = data.get('cidr', None)
model.vlan_id = data.get('vlan', None)
model.mtu = data.get('mtu', None)
model.routedomain = data.get('routedomain', None)
dns = data.get('dns', {})
model.dns_domain = dns.get('domain', 'local')
model.dns_servers = dns.get('servers', None)
ranges = data.get('ranges', [])
model.ranges = []
for r in ranges:
model.ranges.append({
'type': r.get('type', None),
'start': r.get('start', None),
'end': r.get('end', None),
})
routes = data.get('routes', [])
model.routes = []
for r in routes:
model.routes.append({
'subnet': r.get('subnet', None),
'gateway': r.get('gateway', None),
'metric': r.get('metric', None),
'routedomain': r.get('routedomain', None),
})
dhcp_relay = data.get('dhcp_relay', None)
if dhcp_relay is not None:
model.dhcp_relay_self_ip = dhcp_relay.get('self_ip', None)
model.dhcp_relay_upstream_target = dhcp_relay.get(
'upstream_target', None)
return model
def process_drydock_hwprofile(self, name, data):
"""Process the data/spec section of a HardwareProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HardwareProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
model.vendor = data.get('vendor', None)
model.generation = data.get('generation', None)
model.hw_version = data.get('hw_version', None)
model.bios_version = data.get('bios_version', None)
model.boot_mode = data.get('boot_mode', None)
model.bootstrap_protocol = data.get('bootstrap_protocol', None)
model.pxe_interface = data.get('pxe_interface', None)
model.devices = objects.HardwareDeviceAliasList()
device_aliases = data.get('device_aliases', {})
for d, v in device_aliases.items():
dev_model = objects.HardwareDeviceAlias()
dev_model.source = hd_fields.ModelSource.Designed
dev_model.alias = d
dev_model.bus_type = v.get('bus_type', None)
dev_model.dev_type = v.get('dev_type', None)
dev_model.address = v.get('address', None)
model.devices.append(dev_model)
return model
def process_drydock_hostprofile(self, name, data):
"""Process the data/spec section of a HostProfile document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.HostProfile()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
return model
def process_drydock_bootaction(self, name, data):
"""Process the data/spec section of a BootAction document.
:param name: the document name attribute
:Param data: the dictionary of the parsed data/spec section
"""
model = objects.BootAction()
model.name = name
model.source = hd_fields.ModelSource.Designed
assets = data.get('assets')
model.asset_list = objects.BootActionAssetList()
for a in assets:
ba = self.process_bootaction_asset(a)
model.asset_list.append(ba)
node_filter = data.get('node_filter', None)
if node_filter is not None:
nfs = self.process_bootaction_nodefilter(node_filter)
model.node_filter = nfs
model.signaling = data.get('signaling', None)
return model
def process_bootaction_asset(self, asset_dict):
"""Process a dictionary representing a BootAction Data Asset.
:param asset_dict: dictionary representing the bootaction asset
"""
model = objects.BootActionAsset(**asset_dict)
return model
def process_bootaction_nodefilter(self, nf):
"""Process a dictionary representing a BootAction NodeFilter Set.
:param nf: dictionary representing the bootaction nodefilter set.
"""
model = objects.NodeFilterSet()
model.filter_set_type = nf.get('filter_set_type', None)
model.filter_set = []
for nf in nf.get('filter_set', []):
nf_model = objects.NodeFilter(**nf)
model.filter_set.append(nf_model)
return model
def process_drydock_node(self, name, data):
"""Process the data/spec section of a BaremetalNode document.
:param name: the document name attribute
:param data: the dictionary of the data/spec section
"""
model = objects.BaremetalNode()
model.name = name
model.source = hd_fields.ModelSource.Designed
self.process_host_common_fields(data, model)
node_metadata = data.get('metadata', {})
model.boot_mac = node_metadata.get('boot_mac', None)
addresses = data.get('addressing', [])
if len(addresses) == 0:
raise errors.IngesterError('BaremetalNode needs at least'
' 1 assigned address')
model.addressing = objects.IpAddressAssignmentList()
for a in addresses:
assignment = objects.IpAddressAssignment()
address = a.get('address', '')
if address == 'dhcp':
assignment.type = 'dhcp'
assignment.address = None
assignment.network = a.get('network')
model.addressing.append(assignment)
elif address != '':
assignment.type = 'static'
assignment.address = a.get('address')
assignment.network = a.get('network')
model.addressing.append(assignment)
else:
self.log.error("Invalid address assignment %s on Node %s" %
(address, self.name))
return model
def process_host_common_fields(self, data, model):
"""Process fields common to the host-based documents.
Update the provided model with the values of fields common
to BaremetalNode and HostProfile documents.
:param data: dictionary from YAML parsing of the document data/spec section
:param model: instance of objects.HostProfile or objects.BaremetalNode to update
"""
model.parent_profile = data.get('host_profile', None)
model.hardware_profile = data.get('hardware_profile', None)
oob = data.get('oob', {})
model.oob_parameters = {}
for k, v in oob.items():
if k == 'type':
model.oob_type = oob.get('type', None)
else:
model.oob_parameters[k] = v
(model.storage_devices,
model.volume_groups) = self.process_node_storage(
data.get('storage', {}))
interfaces = data.get('interfaces', {})
model.interfaces = objects.HostInterfaceList()
for k, v in interfaces.items():
int_model = objects.HostInterface()
# A null value indicates this interface should be removed
# from any parent profiles
if v is None:
int_model.device_name = '!' + k
continue
int_model.device_name = k
int_model.network_link = v.get('device_link', None)
int_model.hardware_slaves = []
slaves = v.get('slaves', [])
for s in slaves:
int_model.hardware_slaves.append(s)
int_model.networks = []
networks = v.get('networks', [])
for n in networks:
int_model.networks.append(n)
model.interfaces.append(int_model)
platform = data.get('platform', {})
model.image = platform.get('image', None)
model.kernel = platform.get('kernel', None)
model.kernel_params = {}
for k, v in platform.get('kernel_params', {}).items():
model.kernel_params[k] = v
model.primary_network = data.get('primary_network', None)
node_metadata = data.get('metadata', {})
metadata_tags = node_metadata.get('tags', [])
model.tags = metadata_tags
owner_data = node_metadata.get('owner_data', {})
model.owner_data = {}
for k, v in owner_data.items():
model.owner_data[k] = v
model.rack = node_metadata.get('rack', None)
return model
def process_node_storage(self, storage):
"""Process the storage data for a node-based document.
Return a tuple of of two lists the first is a StorageDeviceList, the
second is a VolumeGroupList.
:param storage: dictionary of the storage section of a document
"""
phys_devs = storage.get('physical_devices', {})
storage_devices = objects.HostStorageDeviceList()
for k, v in phys_devs.items():
sd = objects.HostStorageDevice(name=k)
sd.source = hd_fields.ModelSource.Designed
if 'labels' in v:
sd.labels = v.get('labels').copy()
if 'volume_group' in v:
vg = v.get('volume_group')
sd.volume_group = vg
elif 'partitions' in v:
sd.partitions = objects.HostPartitionList()
for vv in v.get('partitions', []):
part_model = objects.HostPartition()
part_model.name = vv.get('name')
part_model.source = hd_fields.ModelSource.Designed
part_model.part_uuid = vv.get('part_uuid', None)
part_model.size = vv.get('size', None)
if 'labels' in vv:
part_model.labels = vv.get('labels').copy()
if 'volume_group' in vv:
part_model.volume_group = vv.get('vg')
elif 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
part_model.mountpoint = fs_info.get('mountpoint', None)
part_model.fstype = fs_info.get('fstype', 'ext4')
part_model.mount_options = fs_info.get(
'mount_options', 'defaults')
part_model.fs_uuid = fs_info.get('fs_uuid', None)
part_model.fs_label = fs_info.get('fs_label', None)
sd.partitions.append(part_model)
storage_devices.append(sd)
volume_groups = objects.HostVolumeGroupList()
vol_groups = storage.get('volume_groups', {})
for k, v in vol_groups.items():
vg = objects.HostVolumeGroup(name=k)
vg.vg_uuid = v.get('vg_uuid', None)
vg.logical_volumes = objects.HostVolumeList()
volume_groups.append(vg)
for vv in v.get('logical_volumes', []):
lv = objects.HostVolume(name=vv.get('name'))
lv.size = vv.get('size', None)
lv.lv_uuid = vv.get('lv_uuid', None)
if 'filesystem' in vv:
fs_info = vv.get('filesystem', {})
lv.mountpoint = fs_info.get('mountpoint', None)
lv.fstype = fs_info.get('fstype', 'ext4')
lv.mount_options = fs_info.get('mount_options', 'defaults')
lv.fs_uuid = fs_info.get('fs_uuid', None)
lv.fs_label = fs_info.get('fs_label', None)
vg.logical_volumes.append(lv)
return (storage_devices, volume_groups)
def load_schemas(self):
self.v1_doc_schemas = dict()
schema_dir = self._get_schema_dir()
for schema_file in os.listdir(schema_dir):
f = open(os.path.join(schema_dir, schema_file), 'r')
for schema in yaml.safe_load_all(f):
schema_for = schema['metadata']['name']
if schema_for in self.v1_doc_schemas:
self.logger.warning(
"Duplicate document schemas found for document kind %s."
% schema_for)
self.logger.debug(
"Loaded schema for document kind %s." % schema_for)
self.v1_doc_schemas[schema_for] = schema
f.close()
def _get_schema_dir(self):
return pkg_resources.resource_filename('drydock_provisioner',
'schemas')
# Mapping of handlers for different document kinds
v1_doc_handlers = {
'Region': process_drydock_region,
'Rack': process_drydock_rack,
'NetworkLink': process_drydock_networklink,
'Network': process_drydock_network,
'HardwareProfile': process_drydock_hwprofile,
'HostProfile': process_drydock_hostprofile,
'BaremetalNode': process_drydock_node,
'BootAction': process_drydock_bootaction,
}
| apache-2.0 | 6,543,234,833,431,814,000 | 35.890744 | 106 | 0.561556 | false |
mathgl67/pymmr | mmr/gtk/main_window.py | 1 | 10049 | #!/usr/bin/env python
# vi:ai:et:ts=4 sw=4
#
# -*- coding: utf8 -*-
#
# PyMmr My Music Renamer
# Copyright (C) 2007-2010 [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import gtk
import gobject
import threading
import copy
from mmr.folder import Folder
from mmr.album import Album
from mmr.plugin import PluginManager
from mmr.callback import Callback
from mmr.investigate_album import InvestigateAlbum
from mmr.gtk.error_message import ErrorMessage
from mmr.gtk.folder_view import FolderView
from mmr.gtk.album_view import AlbumView
from mmr.gtk.tracks_investigation_view import TracksInvestigationView
from mmr.gtk.tracks_view import TracksView
from mmr.gtk.plugin_manager import PluginManagerDialog
from fractions import Fraction
class MainWindow(object):
def __init__(self, config):
self.__init_builder__()
self.__init_window__()
self.__init_widgets__()
self.__init_views__()
self.config = config
self.plugin_manager = PluginManager(copy.deepcopy(self.config["pluginmanager"]))
self.plugin_manager.ensure_path_list_in_sys_path()
self.plugin_manager.load_all()
self._cur_folder_iter_ = None
self._statusbar_ctx_ = self._widgets_['statusbar'].get_context_id("StatusBar")
def __init_builder__(self):
# init builder
self._interface_file_ = "mmr/gtk/main_window.glade"
try:
self._builder_ = gtk.Builder()
self._builder_.add_from_file(self._interface_file_)
except:
err = ErrorMessage("Cannot load interface file: %s" % (
self._interface_file_
))
err.display_and_exit()
def __init_window__(self):
self._window_ = self._builder_.get_object("main_window")
self._builder_.connect_signals(self)
def __init_widgets__(self):
self._widgets_ = {
"statusbar": self._builder_.get_object("statusbar"),
"folder_view": self._builder_.get_object("folder_view"),
"album": {
"artist": self._builder_.get_object("entry_artist"),
"album": self._builder_.get_object("entry_album"),
"genre": self._builder_.get_object("entry_genre"),
"year": self._builder_.get_object("entry_year"),
},
"album_view": self._builder_.get_object("album_view"),
"tracks_investigation_view": self._builder_.get_object("view_tracks_investigation"),
"tracks_view": self._builder_.get_object("view_tracks_result"),
"progressbar1": self._builder_.get_object("progressbar1"),
}
def __init_views__(self):
self._views_ = {
"folder": FolderView(self._widgets_["folder_view"]),
"album": AlbumView(self._widgets_["album_view"]),
"tracks_investigation": TracksInvestigationView(self._widgets_["tracks_investigation_view"]),
"tracks_view": TracksView(self._widgets_["tracks_view"]),
}
# util
def show(self):
self._window_.show()
def set_statusbar_text(self, text):
self._widgets_['statusbar'].push(self._statusbar_ctx_, text)
# update function
def _update_album_(self):
if self._cur_folder_iter_:
# update album entry
album = self._views_['folder'].get_album(self._cur_folder_iter_)
if album:
if album.artist:
self._widgets_["album"]["artist"].set_text(album.artist)
else:
self._widgets_["album"]["artist"].set_text("")
if album.album:
self._widgets_["album"]["album"].set_text(album.album)
else:
self._widgets_["album"]["album"].set_text("")
if album.genre:
self._widgets_["album"]["genre"].set_text(album.genre)
else:
self._widgets_["album"]["genre"].set_text("")
if album.year:
self._widgets_["album"]["year"].set_text(str(album.year))
else:
self._widgets_["album"]["year"].set_text("")
else:
# blank it
for key in ['artist', 'album', 'genre', 'year']:
self._widgets_['album'][key].set_text("")
# update album_view
self._views_['album'].clear()
investigate_album = self._views_['folder'].get_investigate_album(self._cur_folder_iter_)
if investigate_album:
for result in investigate_album.result_list:
self._views_['album'].append(result)
# signals
def on_main_window_destroy(self, widget, data=None):
gtk.main_quit()
def on_menuitem_quit_activate(self, widget, data=None):
gtk.main_quit()
def on_button_investigate_clicked(self, widget, data=None):
def on_module_start(self, module_name):
self._widgets_['progressbar1'].set_text(module_name)
def on_module_end(self, module_name):
self._widgets_['progressbar1'].set_fraction(
self._widgets_['progressbar1'].get_fraction() + self.step
)
self._widgets_['progressbar1'].set_text("Done")
def thread(self):
gobject.idle_add(widget.set_sensitive, False)
self._widgets_['progressbar1'].set_fraction(0)
self.step = Fraction(1, len(self.plugin_manager.find(u"research")))
folder = self._views_['folder'].get_folder(self._cur_folder_iter_)
investigate_album = InvestigateAlbum(
config=self.config,
folder=folder,
plugin_manager=self.plugin_manager
)
investigate_album.cb_module_start = Callback(on_module_start, self)
investigate_album.cb_module_end = Callback(on_module_end, self)
investigate_album.investigate()
investigate_album.result_list.sort()
self._views_['folder'].set_investigate_album(
self._cur_folder_iter_,
investigate_album
)
gobject.idle_add(self._update_album_)
gobject.idle_add(widget.set_sensitive, True)
print "investigate"
if self._cur_folder_iter_:
thread = threading.Thread(target=thread, args = [self])
thread.start()
def on_button_validate_clicked(self, widget, data=None):
print "validate!"
if self._cur_folder_iter_:
album = Album('validate')
album.artist = self._widgets_['album']['artist'].get_text()
album.album = self._widgets_['album']['album'].get_text()
album.genre = self._widgets_['album']['genre'].get_text()
try:
album.year = int(self._widgets_['album']['year'].get_text())
except:
err = ErrorMessage("Cannot convert year to integer!")
err.display()
self._views_['folder'].set_album(self._cur_folder_iter_, album)
def on_button_set_clicked(self, widget, data=None):
print "set!"
it = self._views_['album'].get_selected()
if it and self._cur_folder_iter_:
self._views_['folder'].set_album(self._cur_folder_iter_,
self._views_['album'].get_album(it)
)
self._update_album_()
def on_folder_view_row_activated(self, treeview, path, view_column):
self._cur_folder_iter_ = self._views_['folder'].get_selected()
# should update...
self._update_album_()
def on_toolbutton_list_add_clicked(self, widget, data=None):
dialog = gtk.FileChooserDialog(
title="Directory selection",
parent=self._window_,
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
)
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
folder_path = dialog.get_filename()
try:
folder = Folder.factory(folder_path)
if folder:
self._views_['folder'].append(folder)
except:
err = ErrorMessage("Cannot add floder: %s" % (folder_path))
err.display()
dialog.destroy()
def on_toolbutton_list_rem_clicked(self, widget, data=None):
iter = self._views_['folder'].get_selected()
if iter:
self._views_['folder'].remove(iter)
def on_toolbutton_list_investigate_clicked(self, widget, data=None):
for it in self._views_["folder"].get_folder_iter_list():
folder = self._views_['folder'].get_folder(it)
investigate_album = InvestigateAlbum(
config=self.config,
folder=folder,
plugin_manager=self.plugin_manager
)
investigate_album.investigate()
investigate_album.sort()
self._views_['folder'].set_investigate_album(it, investigate_album)
self._update_album_()
def on_menuitem_plugins_activate(self, widget, data=None):
plugin_manager_dialog = PluginManagerDialog(self.config, self.plugin_manager)
plugin_manager_dialog.show()
| gpl-2.0 | -3,168,804,858,337,180,000 | 37.65 | 103 | 0.582546 | false |
morucci/repoxplorer | repoxplorer/controllers/users.py | 1 | 6635 | # Copyright 2016-2017, Fabien Boucher
# Copyright 2016-2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan import conf
from pecan import abort
from pecan import expose
from pecan import request
from pecan import response
from pecan.rest import RestController
from repoxplorer import index
from repoxplorer.exceptions import UnauthorizedException
from repoxplorer.index import users
from repoxplorer.controllers import utils
if conf.get('users_endpoint', False) and conf.get('oidc', False):
from repoxplorer.auth import OpenIDConnectEngine as AuthEngine
else:
from repoxplorer.auth import CAuthEngine as AuthEngine
AUTH_ENGINE = AuthEngine()
xorkey = conf.get('xorkey') or 'default'
class UsersController(RestController):
auth = AUTH_ENGINE
def abort_if_not_active(self):
if not self.auth.is_configured():
abort(403)
def _authorize(self, uid=None):
self.abort_if_not_active()
# Shortcircuit the authorization for testing purpose
# return
try:
self.auth.authorize(request, uid)
except UnauthorizedException as e:
abort(401, str(e))
except Exception as e:
abort(500, "Unexpected error: %s" % e)
self.auth.provision_user(request)
def _validate(self, data):
mandatory_keys = (
'uid', 'name', 'default-email', 'emails')
email_keys = (
('email', True),
('groups', False))
group_keys = (
('group', True),
('begin-date', False),
('end-date', False))
# All keys must be provided
if set(data.keys()) != set(mandatory_keys):
# Mandatory keys are missing
return False
if not isinstance(data['emails'], list):
# Wrong data type for email
return False
if len(data['name']) >= 100:
return False
mekeys = set([mk[0] for mk in email_keys if mk[1]])
mgkeys = set([mk[0] for mk in group_keys if mk[1]])
if data['emails']:
for email in data['emails']:
if not mekeys.issubset(set(email.keys())):
# Mandatory keys are missing
return False
if not set(email.keys()).issubset(
set([k[0] for k in email_keys])):
# Found extra keys
return False
if 'groups' in email.keys():
for group in email['groups']:
if not mgkeys.issubset(set(group.keys())):
# Mandatory keys are missing
return False
if not set(group.keys()).issubset(
set([k[0] for k in group_keys])):
# Found extra keys
return False
return True
def _modify_protected_fields(self, prev, new):
if new['uid'] != prev['uid']:
return True
if new['default-email'] != prev['default-email']:
return True
# Adding or removing emails is forbidden
prev_emails = set([e['email'] for e in prev['emails']])
new_emails = set([e['email'] for e in new['emails']])
if (not new_emails.issubset(prev_emails) or
not prev_emails.issubset(new_emails)):
return True
return False
# curl -H 'Remote-User: admin' -H 'Admin-Token: abc' \
# "http://localhost:51000/api/v1/users/fabien"
@expose('json')
def get(self, uid):
self._authorize(uid)
_users = users.Users(
index.Connector(index_suffix='users'))
u = _users.get(uid)
if not u:
abort(404)
u['cid'] = utils.encrypt(xorkey, u['default-email'])
return u
@expose('json')
def delete(self, uid):
self._authorize(uid)
_users = users.Users(
index.Connector(index_suffix='users'))
u = _users.get(uid)
if not u:
abort(404)
_users.delete(uid)
# curl -X PUT -H 'Remote-User: admin' -H 'Admin-Token: abc' \
# -H "Content-Type: application/json" --data \
# '{"uid":"fabien","name":"Fabien Boucher","default-email": \
# "[email protected]","emails": [{"email": "[email protected]"}]}' \
# "http://localhost:51000/api/v1/users/fabien"
@expose('json')
def put(self, uid):
# We don't pass uid to authorize, then only admin logged with
# admin token will be authorized
self._authorize()
_users = users.Users(
index.Connector(index_suffix='users'))
u = _users.get(uid)
if u:
abort(409)
infos = request.json if request.content_length else {}
if not self._validate(infos):
abort(400)
# Need to check infos content
infos['uid'] = uid
_users.create(infos)
response.status = 201
# curl -X POST -H 'Remote-User: admin' -H 'Admin-Token: abc' \
# -H "Content-Type: application/json" --data \
# '{"uid":"fabien","name":"Fabien Boucher","default-email": \
# "[email protected]","emails": [{"email": "[email protected]"}, \
# {"email": "[email protected]"}]}' \
# "http://localhost:51000/api/v1/users/fabien"
@expose('json')
def post(self, uid):
requester = self._authorize(uid)
_users = users.Users(
index.Connector(index_suffix='users'))
u = _users.get(uid)
if not u:
abort(404)
infos = request.json if request.content_length else {}
infos['uid'] = uid
# Can be provided by mistake, just remove it
if 'cid' in infos:
del infos['cid']
if not self._validate(infos):
abort(400)
if requester != 'admin':
# User is not allowed to modify some raw_fields
# like adding or removing emails ...
if self._modify_protected_fields(u, infos):
abort(403)
_users.update(infos)
| apache-2.0 | 4,234,832,941,563,420,700 | 34.864865 | 76 | 0.567596 | false |
picklecai/OMOOC2py | _src/om2pyItem/scaffold/main.py | 1 | 11733 | # _*_ coding:utf-8 _*_
#qpy:webapp:babyrecord
#qpy:fullscreen
#qpy://localhost:8800
"""
Babyrecordapp
@Author Picklecai
"""
import os
from os.path import exists
from bottle import Bottle, ServerAdapter, request, template
import sqlite3
import time
import datetime
from email.header import Header
ROOT = os.path.dirname(os.path.abspath(__file__))
class MyWSGIRefServer(ServerAdapter):
server = None
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
self.server = make_server(self.host, self.port, handler, **self.options)
self.server.serve_forever()
def stop(self):
# sys.stderr.close()
import threading
threading.Thread(target=self.server.shutdown).start()
# self.server.shutdown()
self.server.server_close()
print "# QWEBAPPEND"
def __exit():
global server
server.stop()
def __ping():
return "OK"
def readbabyname():
filename = ROOT+'/babyinfo.db'
if exists(filename):
conn = sqlite3.connect(ROOT+'/babyinfo.db')
cursor = conn.cursor()
cursor.execute('select name from babyinfo order by settingtime desc limit 0,1')
n = str(cursor.fetchall())
name = n[4:-4].decode('unicode_escape')
tips = u"宝宝:%s" % name
else:
tips = "友情提醒:如果第一次使用,请先点击菜单“宝宝信息”上传您宝宝的基本信息,否则系统无法计算宝宝年龄。"
return tips
def home():
filename = ROOT+'/babyinfo.db'
if exists(filename):
conn = sqlite3.connect(ROOT+'/babyinfo.db')
cursor = conn.cursor()
cursor.execute('select name from babyinfo order by settingtime desc limit 0,1')
n = str(cursor.fetchall())
name = n[4:-4].decode('unicode_escape')
tips = u"宝宝:%s" % name
return template(ROOT+'/index.html', tips=tips)
else:
name = "未设置"
gender = "未设置"
birthtime = "未设置"
momemail = "未设置"
tips = "请上传您宝宝的基本信息,否则系统无法计算宝宝年龄。"
return template(ROOT+'/baby.html', name=name, gender=gender, birthtime=birthtime, momemail=momemail, tips=tips)
def inputnewline(data):
newline = request.forms.get('newline')
nowtime = time.strftime("%d/%m/%Y %H:%M:%S")
babyage = calbabyage()
data = nowtime.decode('utf-8'), babyage, newline.decode('utf-8')
conn = sqlite3.connect(ROOT+'/noterecord.db')
cursor = conn.cursor()
cursor.execute('create table if not exists record (time text, age text, record text)')
cursor.execute('insert into record (time, age, record) values (?,?,?)', data)
cursor.close()
conn.commit()
conn.close()
def save():
newline = request.forms.get('newline')
nowtime = time.strftime("%d/%m/%Y %H:%M:%S")
babyage = calbabyage()
data = nowtime.decode('utf-8'), babyage, newline.decode('utf-8')
inputnewline(data)
tips = readbabyname()
return template(ROOT+'/index.html', tips = tips)
def history():
conn = sqlite3.connect(ROOT+'/noterecord.db')
cursor = conn.cursor()
cursor.execute('create table if not exists record (time text, age text, record text)')
cursor.execute('select * from record')
notelist = cursor.fetchall()
tips = readbabyname()
return template(ROOT+'/history.html', historylabel=notelist, tips=tips)
def createbaby(data):
conn = sqlite3.connect(ROOT+'/babyinfo.db')
cursor = conn.cursor()
cursor.execute('create table if not exists babyinfo (name text, gender text, birthtime text, momemail text, settingtime text)')
cursor.execute('insert into babyinfo (name, gender, birthtime, momemail, settingtime) values (?,?,?,?,?)', data)
cursor.close()
conn.commit()
conn.close()
def readbaby():
conn = sqlite3.connect(ROOT+'/babyinfo.db')
cursor = conn.cursor()
cursor.execute('create table if not exists babyinfo (name text, gender text, birthtime text, momemail text, settingtime text)')
cursor.execute('select * from babyinfo')
babyinfolist = cursor.fetchall()
return babyinfolist
def calbabyage():
today = datetime.date.today()
filename = ROOT+'/babyinfo.db'
if exists(filename):
conn = sqlite3.connect(ROOT+'/babyinfo.db')
cursor = conn.cursor()
cursor.execute('select birthtime from babyinfo order by settingtime desc limit 0,1')
bn = str(cursor.fetchall())
babybirth = datetime.date(int(bn[4:8]), int(bn[9:11]), int(bn[12:14]))
babyage = str((today - babybirth).days)
return babyage
def baby():
filename = ROOT+'/babyinfo.db'
if exists(filename):
conn = sqlite3.connect(ROOT+'/babyinfo.db')
cursor = conn.cursor()
cursor.execute('select name from babyinfo order by settingtime desc limit 0,1')
n = str(cursor.fetchall())
name = n[4:-4].decode('unicode_escape')
cursor.execute('select gender from babyinfo order by settingtime desc limit 0,1')
g = str(cursor.fetchall())
gender = g[4:-4].decode('unicode_escape')
cursor.execute('select birthtime from babyinfo order by settingtime desc limit 0,1')
bn = str(cursor.fetchall())
birthtime = datetime.date(int(bn[4:8]), int(bn[9:11]), int(bn[12:14]))
cursor.execute('select momemail from babyinfo order by settingtime desc limit 0,1')
em = str(cursor.fetchall())
momemail = em[4:-4].decode('utf-8')
tips = u"宝宝:%s"%name
else:
name = "未设置"
gender = "未设置"
birthtime = "未设置"
momemail = "未设置"
tips = "请上传您宝宝的基本信息,否则系统无法计算宝宝年龄。"
return template(ROOT+'/baby.html', name=name, gender=gender, birthtime=birthtime, momemail=momemail, tips=tips)
def savebaby():
name = request.forms.get('name')
gender = request.forms.get('gender')
birthtime = datetime.date(int(request.forms.get('year')), int(request.forms.get('month')), int(request.forms.get('date')))
momemail = request.forms.get('email')
settingtime = time.strftime("%d/%m/%Y %H:%M:%S")
if name==None or gender==None or birthtime==None or validateEmail(momemail)== 0:
name = "重新设置"
gender = "重新设置"
birthtime = "重新设置"
momemail = "重新设置"
tips = "请重新设置宝宝信息及妈妈邮箱。"
return template(ROOT+'/baby.html', name=name, gender=gender, birthtime=birthtime, momemail=momemail, tips=tips)
else:
data = name.decode('utf-8'), gender.decode('utf-8'), birthtime, momemail, settingtime
tips = "宝宝:%s" % name
createbaby(data)
readbaby()
return template(ROOT+'/baby2.html', name=name, gender=gender, birthtime=birthtime, momemail=momemail, tips=tips)
def _format_addr(s):
from email.utils import parseaddr, formataddr
name, addr = parseaddr(s)
return formataddr(( \
Header(name, 'utf-8').encode(), \
addr.encode('utf-8') if isinstance(addr, unicode) else addr))
def validateEmail(email):
import re
if len(email) > 7:
if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) != None:
return 1
return 0
def sendmail():
# 导入email模块
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import smtplib
# 设置邮件变量
from_addr = "[email protected]"
password = "ahcai318"
smtp_server = "smtp.163.com"
filename = ROOT+'/babyinfo.db'
if exists(filename):
conn = sqlite3.connect(ROOT+'/babyinfo.db')
cursor = conn.cursor()
cursor.execute('select momemail from babyinfo order by settingtime desc limit 0,1')
em = str(cursor.fetchall())
momemail = em[4:-4].decode('utf-8')
else:
momemail = "[email protected]"
to_addr = momemail
historyrecord = ROOT+'/noterecord.db'
# 发邮件
if exists(historyrecord):
msg = MIMEMultipart()
msg['From'] = _format_addr(u'我在长大 <%s>' % from_addr)
msg['To'] = _format_addr(u'亲爱的妈妈 <%s>' % to_addr)
msg['Subject'] = Header(u'您的宝宝记录……', 'utf-8').encode()
msg.attach(MIMEText('附件是您宝宝的日常记录,请查收。祝您生活愉快!宝宝健康快乐!', 'plain', 'utf-8'))
with open(historyrecord, 'rb') as f:
mime = MIMEBase('database', 'db', filename='noterecord.db')
mime.add_header('Content-Disposition', 'attachment', filename='noterecord.db')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
mime.set_payload(f.read())
encoders.encode_base64(mime)
msg.attach(mime)
else:
msg = MIMEText('您尚未开始记录宝宝的日常记录,记录后可收到带宝宝记录附件的邮件!', 'plain', 'utf-8')
msg['From'] = _format_addr(u'我在长大 <%s>' % from_addr)
msg['To'] = _format_addr(u'亲爱的妈妈 <%s>' % to_addr)
msg['Subject'] = Header(u'您的宝宝记录……', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25) # SMTP协议默认端口是25
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
return template(ROOT+'/email.html', momemail=momemail)
def savephotoname(data):
# 保存照片名列表
conn = sqlite3.connect(ROOT+'/photoname.db')
cursor = conn.cursor()
cursor.execute('create table if not exists photoname (time text, name text)')
cursor.execute('insert into photoname (time, name) values (?,?)', data)
cursor.close()
conn.commit()
conn.close()
def readphotoname():
conn = sqlite3.connect(ROOT+'/photoname.db')
cursor = conn.cursor()
cursor.execute('create table if not exists photoname (time text, name text)')
cursor.execute('select * from photoname')
namelist = cursor.fetchall()
return namelist
def camerababy():
import androidhelper
droid = androidhelper.Android()
if not exists(ROOT+'/photo'):
photoid = 1
os.makedirs(ROOT+'/photo')
else:
photoid = sum([len(files) for root,dirs,files in os.walk(ROOT+'/photo')]) + 1
# 设置照片名
photoname = str('babyrecordphoto%d.jpg' % photoid)
# 拍照
droid.cameraInteractiveCapturePicture(ROOT+'/photo/%s' % photoname)
# 保存照片名
nowtime = time.strftime("%d/%m/%Y %H:%M:%S")
data = nowtime, photoname
savephotoname(data)
# 读取照片名
namelist = readphotoname()
return template(ROOT+'/camera.html', photoid=photoid, photoname=namelist)
app = Bottle()
app.route('/', method='GET')(home)
app.route('/index.html', method='POST')(save)
app.route('/history.html', method='GET')(history)
app.route('/baby.html', method='GET')(baby)
app.route('/baby2.html', method='POST')(savebaby)
app.route('/email.html', method='GET')(sendmail)
app.route('/camera.html', method='GET')(camerababy)
app.route('/__exit', method=['GET', 'HEAD'])(__exit)
app.route('/__ping', method=['GET', 'HEAD'])(__ping)
try:
server = MyWSGIRefServer(host="localhost", port="8800")
app.run(server=server, reloader=False)
except Exception, ex:
print "Exception: %s" % repr(ex) | mit | -2,399,050,248,471,428,600 | 35.950166 | 131 | 0.633216 | false |
pi-one/amunweb | amunweb/amun/models.py | 1 | 2030 | from django.db import models
class initial_connection(models.Model):
attacker_ip = models.IPAddressField()
attacker_port = models.PositiveIntegerField()
victim_ip = models.IPAddressField()
victim_port = models.PositiveIntegerField()
count = models.PositiveIntegerField(default=1)
first_seen = models.DateTimeField(null=True, auto_now=False, auto_now_add=True)
last_seen = models.DateTimeField(null=True, auto_now=True, auto_now_add=False)
sensor_id = models.PositiveIntegerField()
class Meta:
unique_together = ('attacker_ip', 'victim_ip', 'victim_port', 'sensor_id')
index_together = [
["attacker_ip", "victim_ip", "victim_port", "sensor_id"]
]
get_latest_by = 'last_seen'
class successful_exploit(models.Model):
attacker = models.ForeignKey(initial_connection)
vulnerability_name = models.CharField(max_length=256, blank=True, db_index=True)
download_method = models.CharField(max_length=256, blank=True)
shellcode_name = models.CharField(max_length=256, blank=True)
count = models.PositiveIntegerField(default=1)
first_seen = models.DateTimeField(auto_now=False, auto_now_add=True)
last_seen = models.DateTimeField(auto_now=True, auto_now_add=False)
class Meta:
get_latest_by = 'last_seen'
index_together = [
["vulnerability_name", "download_method", "shellcode_name"]
]
class successful_submission(models.Model):
exploit = models.ForeignKey(successful_exploit)
download_url = models.CharField(max_length=1025)
download_method = models.CharField(max_length=256, blank=True)
md5_hash = models.CharField(max_length=32)
sha256_hash = models.CharField(max_length=64)
count = models.PositiveIntegerField(default=1)
first_seen = models.DateTimeField(auto_now=False, auto_now_add=True)
last_seen = models.DateTimeField(auto_now=True, auto_now_add=False)
class Meta:
unique_together = ('download_method', 'download_url', 'md5_hash', 'sha256_hash')
index_together = [
["download_method", "download_url", "md5_hash", "sha256_hash"]
]
get_latest_by = 'last_seen'
| gpl-2.0 | 860,344,446,100,767,600 | 39.6 | 82 | 0.738424 | false |
Atrox/haikunatorpy | setup.py | 1 | 1025 | #! /usr/bin/env python
from setuptools import setup
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
]
setup(name='haikunator',
author='Atrox',
author_email='[email protected]',
description='Heroku-like random name generator for python.',
license='BSD',
version='2.1.0',
url='https://github.com/Atrox/haikunatorpy',
packages=['haikunator'],
test_suite='haikunator.tests',
include_package_data=True,
classifiers=CLASSIFIERS,
platforms=['any'])
| bsd-3-clause | 5,656,050,360,466,256,000 | 32.064516 | 67 | 0.635122 | false |
mstritt/orbit-image-analysis | src/main/python/deeplearn/preprocess_data.py | 1 | 2511 | import glob
from PIL import Image
import numpy as np
import os
import tqdm
import multiprocessing
import threading
# used instead of proprocess_data.ipynb -> adjust dirs and path separator, here \\ for windows, replace by / for linux
# multithreading dies not work -> replaced by for loop
dest_dir = 'C:\git\python\glomeruli3\data_preprocessed'
dest_dir_masks = os.path.join( dest_dir, 'masks')
dest_dir_img = os.path.join( dest_dir, 'img')
palette = {(0, 0, 0) : 0 ,
(0, 0, 255) : 0 ,
(255, 255, 255) : 1
}
def convert_from_color_segmentation(arr_3d):
arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)
for c, i in palette.items():
m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)
arr_2d[m] = i
return arr_2d
images = glob.glob('C:\git\python\glomeruli3\data/*.jpg')
masks = glob.glob('C:\git\python\glomeruli3\data/*.png')
masks.sort()
images.sort()
assert( len(images) == len(masks))
def rotate(img, img_name, mask, mask_name, degree, postfix):
img = img.rotate(degree)
mask = mask.rotate(degree)
mask_arr = np.array(mask)
mask_conved = convert_from_color_segmentation(mask_arr)
img.save(os.path.join(dest_dir_img, postfix + img_name))
Image.fromarray(mask_conved).save(os.path.join(dest_dir_masks, postfix + mask_name))
return
def process(args):
image_src, mask_src = args
image_name = '_'.join(image_src.split('\\')[-3:]) # -1 for absolute directories!
mask_name = '_'.join(mask_src.split('\\')[-3:])
img = Image.open(image_src)
mask = Image.open(mask_src)
img = img.resize((512, 512), Image.NEAREST)
mask = mask.resize((512, 512), Image.NEAREST)
rotate(img, image_name, mask, mask_name, 90, "90_")
rotate(img, image_name, mask, mask_name, 180, "180_")
rotate(img, image_name, mask, mask_name, 270, "270_")
mask_arr = np.array(mask)
mask_conved = convert_from_color_segmentation(mask_arr)
img.save(os.path.join(dest_dir_img, image_name))
Image.fromarray(mask_conved).save(os.path.join(dest_dir_masks, mask_name))
# for i in range(len(masks)):
# print(str(i+1)+"/"+str(len(masks)) +": "+ images[i]+" / "+masks[i])
# process((images[i], masks[i]))
if __name__ == '__main__':
pool = multiprocessing.Pool(10)
tasks = []
for i in range(len(masks)):
tasks.append((images[i], masks[i]))
for _ in tqdm.tqdm(pool.imap_unordered(process, tasks), total=len(tasks)):
pass
| gpl-3.0 | 1,342,795,322,420,620,800 | 28.197674 | 118 | 0.630426 | false |
stonescar/multi-user-blog | blogmods/handlers/welcome.py | 1 | 1529 | from google.appengine.ext import db
from .. import utils
from main_handler import Handler
class Welcome(Handler):
"""Handler for welcome page"""
def count(self):
# Collecting statistics for user
# Count blog posts by user
p = db.GqlQuery("""SELECT * FROM Posts
WHERE author = KEY('Users', %s)""" % self.uid())
uid = self.uid()
posts = p.count()
# Count comments by user
c = db.GqlQuery("""SELECT * FROM Comments
WHERE author = KEY('Users', %s)""" % self.uid())
comments = c.count()
# Count number of votes by user
votes = self.user.get_votes(uid)
# Count average score of posts by user
avg_score = self.user.get_post_scores(uid)
# Count score of votes
tot_votes = self.user.get_vote_score(uid)
return [posts, comments, votes, avg_score, tot_votes]
@utils.login_required
def get(self):
p = db.GqlQuery("""SELECT * FROM Posts
WHERE author = KEY('Users', %s)
ORDER BY created DESC
LIMIT 5""" % self.uid())
c = db.GqlQuery("""SELECT * FROM Comments
WHERE author = KEY('Users', %s)
ORDER BY time DESC
LIMIT 5""" % self.uid())
self.render("welcome.html",
posts=p, comms=c,
u=self.user.username,
count=self.count())
| mit | 3,792,237,561,269,462,500 | 32.977778 | 72 | 0.509483 | false |
quantumlib/OpenFermion-Cirq | openfermioncirq/experiments/hfvqe/molecular_data/hydrogen_chains/make_rhf_simulations.py | 1 | 3495 | # coverage: ignore
"""
Implement the H2-experiment with OpenFermion and OpenFermion-Cirq
"""
# Numerical Imports
import numpy
import scipy
import os
from openfermion.ops import general_basis_change
from openfermioncirq.experiments.hfvqe.molecular_data.molecular_data_construction import (h6_linear_molecule,
h8_linear_molecule,
h10_linear_molecule,
h12_linear_molecule,
get_ao_integrals
)
from openfermioncirq.experiments.hfvqe.gradient_hf import rhf_minimization, rhf_func_generator
from openfermioncirq.experiments.hfvqe.objective import \
RestrictedHartreeFockObjective, generate_hamiltonian
def make_rhf_objective(molecule):
# coverage: ignore
S, Hcore, TEI = get_ao_integrals(molecule)
evals, X = scipy.linalg.eigh(Hcore, S)
molecular_hamiltonian = generate_hamiltonian(
general_basis_change(Hcore, X, (1, 0)),
numpy.einsum('psqr', general_basis_change(TEI, X, (1, 0, 1, 0)),
molecule.nuclear_repulsion)
)
rhf_objective = RestrictedHartreeFockObjective(molecular_hamiltonian,
molecule.n_electrons)
return rhf_objective, S, Hcore, TEI
if __name__ == "__main__":
# coverage: ignore
# make simulations
molecule_generator = {6: h6_linear_molecule,
8: h8_linear_molecule,
10: h10_linear_molecule,
12: h12_linear_molecule}
for n in range(6, 13, 2):
name = "h_{}_sto-3g".format(n)
print(name)
# now make a dirctory with the name
os.mkdir(name)
os.chdir(name)
# # now make a separate folder for each of 50 points along a line
bond_distances = numpy.linspace(0.5, 2.5, 6)
for bb in bond_distances:
print(bb)
local_dir = 'bond_distance_{:.1f}'.format(bb)
os.mkdir(local_dir)
os.chdir(local_dir)
molecule = molecule_generator[n](bb)
rhf_objective, S, HCore, TEI = make_rhf_objective(molecule)
numpy.save("overlap.npy", S)
numpy.save("h_core.npy", HCore)
numpy.save("tei.npy", TEI)
ansatz, energy, gradient = rhf_func_generator(rhf_objective)
scipy_result = rhf_minimization(rhf_objective)
print(molecule.hf_energy)
print(scipy_result.fun)
assert numpy.isclose(molecule.hf_energy, scipy_result.fun)
numpy.save("parameters.npy", numpy.asarray(scipy_result.x))
initial_opdm = numpy.diag([1] * rhf_objective.nocc + [0] * rhf_objective.nvirt)
unitary = ansatz(scipy_result.x)
final_opdm = unitary @ initial_opdm @ numpy.conjugate(unitary).T
assert numpy.isclose(rhf_objective.energy_from_opdm(final_opdm), scipy_result.fun)
numpy.save("true_opdm.npy", numpy.asarray(final_opdm))
molecule.filename = os.path.join(os.getcwd(), molecule.name)
molecule.save()
os.chdir('../')
os.chdir('../')
| apache-2.0 | 3,456,418,451,376,939,000 | 38.715909 | 110 | 0.538484 | false |
tdanford/datacleaning | utils.py | 1 | 5342 | import string
import re
import sys
from pprint import pprint
"""
utils.py is a library of methods for dealing with streams (or lists) of tuples
and objects as generated by some of the other libraries for data cleaning, here.
"""
def query( stream ):
"""
Takes a generator and returns a list. The terminal operator in a lot of operations.
>>> query([1, 2, 3])
[1, 2, 3]
"""
return [x for x in stream]
def table( tuple_stream, outf=sys.stdout ):
"""
Pretty-prints out a table of tuples, where each column is adjusted (via padding with
spaces) to the right width.
"""
def find_length( tvalues, i ):
lens = [len(str(t[i])) for t in tvalues]
return max(lens)
table_values = query(tuple_stream)
if len(table_values) > 0:
indices = range(len(table_values[0]))
lengths = [ find_length(table_values, i) for i in indices ]
for tup in table_values:
for i in indices:
if i > 0: outf.write('\t')
outf.write(str(tup[i]).ljust(lengths[i], ' '))
outf.write('\n')
def tuples( keys, object_stream ):
"""
Extracts a stream of objects into a stream of tuples.
The first argument, 'keys', is a list-of-strings which are the keys
of the objects (and the order of those keys) in which to extract the values;
if 'keys' is not specified (i.e. is None or false-y), then the keys of the
first object are used instead.
>>> query(tuples(['foo', 'bar'], [{ 'foo': 1, 'bar': 2 }, { 'foo': 3, 'bar': 4 }] ))
[(1, 2), (3, 4)]
'tuples' is the dual of 'objects'
>>> query(tuples(['foo', 'bar'], objects(['foo', 'bar'], [(1, 2), (3, 4)] )))
[(1, 2), (3, 4)]
"""
kk = keys
for obj in object_stream:
if not kk: kk = obj.keys()
yield tuple( obj[k] for k in kk )
def objects( keys, tuple_stream ):
"""
Extracts a stream of tuples into a stream of objects.
The 'keys' argument, which is required, is used for naming each member of a tuple
as it is inserted into the corresponding object.
>>> query(objects(['foo', 'bar'], [(1, 2), (3, 4)]))
[{'foo': 1, 'bar': 2}, {'foo': 3, 'bar': 4}]
'objects' is the dual of 'tuples'
>>> query(objects(['foo', 'bar'], tuples(['foo', 'bar'], [{'foo': 1, 'bar': 2}, {'foo': 3, 'bar': 4}])))
[{'foo': 1, 'bar': 2}, {'foo': 3, 'bar': 4}]
"""
for tup in tuple_stream:
yield { keys[i]: tup[i] for i in range(len(keys)) }
def tuple_project( indices, tuple_stream ):
"""
Projects a stream of tuples down to a subset of the indices
>>> query(tuple_project([1, 3], [(0, 1, 2, 3), (10, 11, 12, 13)]))
[(1, 3), (11, 13)]
"""
for tup in tuple_stream:
yield tuple( tup[i] for i in indices )
def object_project( keys, object_stream ):
"""
Projects a stream of objects down onto a subset of the keys
>>> query(object_project(['foo', 'bar'], [{'foo': 1, 'bar': 2, 'grok': 3}, {'foo': 10, 'bar': 11, 'grok': 12 }]))
[{'foo': 1, 'bar': 2}, {'foo': 10, 'bar': 11}]
"""
for obj in object_stream:
yield { k: obj[k] for k in keys }
def object_subtract( keys, object_stream ):
"""
The dual of project -- removes the keys specified from the objects.
>>> query(object_subtract(['foo'], [{'foo': 1, 'bar': 1}, {'foo': 2, 'bar': 2}]))
[{'bar': 1}, {'bar': 2}]
"""
for obj in object_stream:
yield { k: obj[k] for k in obj.keys() if not k in keys }
def filter( pred, stream ):
"""
>>> query(filter(lambda x: x > 3, [1, 2, 3, 4, 5]))
[4, 5]
"""
for value in stream:
if pred(value):
yield value
def group_tuples_by( index, stream ):
"""
Groups a set of tuples by the values in the specified index
>>> query(group_tuples_by(0, [(1, 1), (1, 2), (2, 1)]))
[(1, [(1, 1), (1, 2)]), (2, [(2, 1)])]
"""
keyed = {}
for tup in stream:
if not tup[index] in keyed: keyed[tup[index]] = []
keyed[tup[index]].append(tup)
return [ (k, keyed[k]) for k in keyed.keys() ]
def group_objects_by( key, stream ):
"""
Groups a set of objects by the values in the specified key
>>> query(group_objects_by('foo', [{'foo': 1, 'bar': 1}, {'foo': 1, 'bar': 2}, {'foo': 2, 'bar': 1}]))
[(1, [{'foo': 1, 'bar': 1}, {'foo': 1, 'bar': 2}]), (2, [{'foo': 2, 'bar': 1}])]
"""
keyed = {}
for obj in stream:
if not obj[key] in keyed: keyed[obj[key]] = []
keyed[obj[key]].append(obj)
return [ (k, keyed[k]) for k in keyed.keys()]
def flatten_matrix( keys, tuple_stream ):
for tup in tuple_stream:
for i in range(1, len(keys)):
yield ( tup[0], keys[i], tup[i] )
def lookup( dict_value ):
"""
Given a dictionary-like value, returns a function which looks up values within
that dictionary.
>>> lookup({'foo': 3})('foo')
3
>>> print lookup({'foo': 3})('bar')
None
"""
def looker( value ):
if value in dict_value: return dict_value[value]
else: return None
return looker
def flatmap( f, stream ):
"""
>>> query(flatmap(lookup({'foo': 1, 'bar': 2}), ['foo', 'bar']))
[1, 2]
>>> query(flatmap(lookup({'foo': [1, 2], 'bar': [3]}), ['foo', 'bar', 'grok']))
[1, 2, 3]
"""
def flatvals(v):
if not v: return []
elif hasattr(v, '__iter__'): return v
else: return [v]
for val in stream:
for innerv in flatvals(f(val)):
yield innerv
def count( stream ):
"""
Takes a stream of (key, sequence-of-values) tuples, and produces
a stream of (key, integer-count) values
>>> query(count([('foo', [1, 2]), ('bar', [3])]))
[('foo', 2), ('bar', 1)]
"""
for (k, values) in stream:
yield (k, len(values))
| apache-2.0 | 2,779,504,615,477,332,000 | 28.351648 | 114 | 0.59079 | false |
lmjohns3/cube-experiment | plots/stable-postures-foot-speed.py | 1 | 3698 | import climate
import collections
import lmj.cubes
import lmj.plot
import matplotlib.colors
import numpy as np
import pandas as pd
COLORS = {
'marker00-r-head-back': '#9467bd',
'marker01-r-head-front': '#9467bd',
'marker02-l-head-front': '#9467bd',
'marker03-l-head-back': '#9467bd',
'marker07-r-shoulder': '#111111',
'marker13-r-fing-index': '#111111',
'marker14-r-mc-outer': '#111111',
'marker19-l-shoulder': '#111111',
'marker25-l-fing-index': '#111111',
'marker26-l-mc-outer': '#111111',
'marker31-sternum': '#111111',
'marker34-l-ilium': '#2ca02c',
'marker35-r-ilium': '#2ca02c',
'marker36-r-hip': '#2ca02c',
'marker40-r-heel': '#1f77b4',
'marker41-r-mt-outer': '#1f77b4',
'marker42-r-mt-inner': '#1f77b4',
'marker43-l-hip': '#2ca02c',
'marker47-l-heel': '#d62728',
'marker48-l-mt-outer': '#d62728',
'marker49-l-mt-inner': '#d62728',
}
RCM = matplotlib.colors.LinearSegmentedColormap('b', dict(
red= ((0, 0.8, 0.8), (1, 0.8, 0.8)),
green=((0, 0.1, 0.1), (1, 0.1, 0.1)),
blue= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
GCM = matplotlib.colors.LinearSegmentedColormap('b', dict(
red= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
green=((0, 0.6, 0.6), (1, 0.6, 0.6)),
blue= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
BCM = matplotlib.colors.LinearSegmentedColormap('r', dict(
red= ((0, 0.1, 0.1), (1, 0.1, 0.1)),
green=((0, 0.5, 0.5), (1, 0.5, 0.5)),
blue= ((0, 0.7, 0.7), (1, 0.7, 0.7)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
OCM = matplotlib.colors.LinearSegmentedColormap('r', dict(
red= ((0, 1.0, 1.0), (1, 1.0, 1.0)),
green=((0, 0.5, 0.5), (1, 0.5, 0.5)),
blue= ((0, 0.0, 0.0), (1, 0.0, 0.0)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
PCM = matplotlib.colors.LinearSegmentedColormap('r', dict(
red= ((0, 0.6, 0.6), (1, 0.6, 0.6)),
green=((0, 0.4, 0.4), (1, 0.4, 0.4)),
blue= ((0, 0.7, 0.7), (1, 0.7, 0.7)),
alpha=((0, 1.0, 1.0), (1, 0.0, 0.0)),
))
# fewf, http://stackoverflow.com/questions/4494404
def contig(cond):
idx = np.diff(cond).nonzero()[0] + 1
if cond[0]:
idx = np.r_[0, idx]
if cond[-1]:
idx = np.r_[idx, cond.size]
return idx.reshape((-1, 2))
def main(root, pattern='*'):
points = collections.defaultdict(list)
for trial in lmj.cubes.Experiment(root).trials_matching(pattern):
trial.load()
trial.add_velocities(7)
lheel = trial.trajectory(40, velocity=True)
lheel['speed'] = np.sqrt((lheel[['vx', 'vy', 'vz']] ** 2).sum(axis=1))
lslow = contig((lheel.speed < 1).values)
rheel = trial.trajectory(47, velocity=True)
rheel['speed'] = np.sqrt((rheel[['vx', 'vy', 'vz']] ** 2).sum(axis=1))
#rslow = contig((rheel.speed < 2).values)
for m in trial.marker_columns:
t = trial.trajectory(m, velocity=True)
t['speed'] = np.sqrt((t[['vx', 'vy', 'vz']] ** 2).sum(axis=1))
for s, e in lslow:
if e - s > 0:
points[m].append(t.iloc[s:e, :])
for m in sorted(points):
print(m, len(points[m]))
kw = dict(s=30, vmin=0, vmax=1, lw=0, alpha=0.5, cmap=BCM)
with lmj.plot.axes3d() as ax:
ax.scatter([0], [0], [0], color='#111111', alpha=0.5, s=200, marker='s', lw=0)
for m, dfs in points.items():
for df in dfs:
# subsample.
sel = np.random.rand(len(df)) < 0.01
ax.scatter(df[sel].x, df[sel].z, df[sel].y, c=df[sel].speed, **kw)
if __name__ == '__main__':
climate.call(main)
| mit | 6,808,240,918,538,263,000 | 29.816667 | 86 | 0.522445 | false |
moopie/botologist | plugins/redditeu.py | 1 | 6865 | import logging
log = logging.getLogger(__name__)
import random
import re
import botologist.plugin
class Bitcoin:
currencies = (
'Razielcoins', 'bitcoins', 'cmd.exe resizes', 'scotweb extortions',
'warp-in staplers', 'dead Palestinian children', 'typematrix keyboards',
'marble eggs in a shitty condom', 'clean teeth', 'mutalisks on creep',
'mutalisks off creep', 'floating cars', 'floating keys', 'burned rice',
'wordpress conference tickets', 'base64 encoded o\'reilly books',
'rolls of vitamin E toilet paper', 'one-night trips to Rhodos',
('WISPY BEARDED POT SMOKING FAT FAGCUNT BITCOIN WORSHIPPERS WHO OBSESS '
'OVER ME AND MAKE A SPORT OUT OF DRIVING ME INSANE AAAAAAAAAAAAAAAAAAA'
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'),
'nazi salutes', 'syka blyats', 'pizza rolls', 'raziel whiskers',
'hardon colliders', 'apple phones', 'null pointer references',
'gigabytes', 'nodejs libraries', 'windows reinstalls', 'BSODs',
'memes'
)
@classmethod
def get_worth(cls):
num = random.randint(100, 100000) / 100
currency = random.choice(cls.currencies)
return '%.2f %s' % (num, currency)
class Raziel:
nicks = ('radio', 'brazier', 'easel', 'raIel', 'easiek', 'ramen', 'russell',
'fazorø', 'razu', 'rsdirø', 'rasjjmm', 'fszirh', 'eterisk feKfl',
'Raskelf', 'was oro', 'raIeö', 'fackförbund', 'taxiförare', 'Dr Dre',
'e as isj')
@classmethod
def get_random_nick(cls):
return random.choice(cls.nicks)
def get_random_time():
hour = random.choice((
random.randint(-23, -1),
random.randint(13, 32),
))
minute = random.randint(0, 99)
ampm = random.choice(('AM', 'PM'))
return '{}:{} {}'.format(hour, str(minute).zfill(2), ampm)
def get_random_date():
year = random.randint(1000, 9999)
month = random.randint(0, 12)
day = random.randint(0, 31)
args = [str(day).zfill(2), str(month).zfill(2), str(year)]
random.shuffle(args)
separator = random.choice(('-', '/', '.'))
return separator.join(args)
class RedditeuPlugin(botologist.plugin.Plugin):
"""#redditeu plugin."""
def __init__(self, bot, channel):
super().__init__(bot, channel)
self.insults = (
re.compile(r'.*fuck(\s+you)\s*,?\s*'+self.bot.nick+r'.*', re.IGNORECASE),
re.compile(r'.*'+self.bot.nick+r'[,:]?\s+fuck\s+you.*', re.IGNORECASE),
re.compile(r'.*shut\s*(the\s*fuck)?\s*up\s*,?\s*'+self.bot.nick+r'.*', re.IGNORECASE),
re.compile(r'.*'+self.bot.nick+r'[,:]?\s+shut\s*(the\s*fuck)?\s*up.*', re.IGNORECASE),
)
self.monologue_lastuser = None
self.monologue_counter = 0
@botologist.plugin.reply()
def return_insults(self, msg):
for expr in self.insults:
if expr.match(msg.message):
return ('{}: I feel offended by your recent action(s). Please '
'read http://stop-irc-bullying.eu/stop').format(msg.user.nick)
no_work = re.compile(r".*(__)?bot(__)?\s+(no|not|does ?n.?t)\s+work.*", re.IGNORECASE)
@botologist.plugin.reply()
def bot_always_works(self, msg):
if self.no_work.match(msg.message):
return 'I always work'
@botologist.plugin.command('btc')
def get_btc_worth(self, cmd):
return '1 bitcoin is currently worth ' + Bitcoin.get_worth()
@botologist.plugin.command('michael')
def who_is_michael(self, cmd):
'''Find out what nick Michael is hiding under.'''
channel = self.bot.client.channels.get(cmd.message.target)
if not channel:
return
for user in channel.users:
if 'nevzetz' in user.host or 'ip51cc146b.speed.planet.nl' in user.host:
return 'Michael is ' + user.name
if 'steele' in user.name.lower():
return "There's a chance it's " + user.name
return 'Michael not found!'
@botologist.plugin.command('simon')
def who_is_simon(self, cmd):
'''Find out what nick Simon is hiding under.'''
channel = self.bot.client.channels.get(cmd.message.target)
if not channel:
return
for user in channel.users:
if '0x57.co' in user.host:
return 'Simon is ' + user.name
return 'Simon not found!'
@botologist.plugin.command('time')
def the_time(self, cmd):
'''If you need to know what the time really is. For really reals.'''
return 'the time is {} - the date is {}'.format(
get_random_time(), get_random_date())
@botologist.plugin.command('speedtest')
def speedtest(self, cmd):
return 'Pretty fast, thank you.'
@botologist.plugin.join()
def welcome(self, user, channel):
if 'happy0' in user.nick.lower():
return 'ypyotootp hippy 0'
if user.nick.lower().startswith('raziel'):
return 'hello ' + Raziel.get_random_nick()
@botologist.plugin.reply()
def no_more_that_are_stupid(self, msg):
if 'no more irc binds that are stupid' in msg.message.lower():
return 'https://www.youtube.com/watch?v=LGxS-qjViNQ'
@botologist.plugin.reply()
def garner_masturbation_video(self, msg):
if 'garner masturbation video' in msg.message.lower():
return 'https://www.youtube.com/watch?v=akTE1n-U0C0'
@botologist.plugin.reply()
def deridu(self, msg):
msgl = msg.message.lower()
if msgl == 'deridu':
return 'what the fuck is this'
elif 'fuck is this' in msgl or 'what the fuck is' in msgl or 'wtf is this' in msgl:
return 'watch yo profamity'
elif msgl == 'watch your profanity' or msgl == 'watch your profamity' \
or msgl == 'watch yo profamity' or msgl == 'watchoprofamity' \
or msgl == 'watcho profamity':
return 'right I\'m sorry'
@botologist.plugin.reply()
def monologue_detector(self, msg):
if msg.user == self.monologue_lastuser:
self.monologue_counter += 1
else:
self.monologue_lastuser = msg.user
count = self.monologue_counter
self.monologue_counter = 1
if count > 15:
return 'AUTISM C-C-C-C-COMBO BREAKER! ({} line long monologue)'.format(count)
@botologist.plugin.kick()
def kick_handler(self, kicked_user, channel, user):
if kicked_user == self.monologue_lastuser:
self.monologue_lastuser = None
count = self.monologue_counter
self.monologue_counter = 1
if count > 15:
return 'AUTISM C-C-C-C-COMBO BREAKER! ({} line long monologue)'.format(count)
@botologist.plugin.reply()
def nooooo(self, msg):
if 'nooo' in msg.message.lower():
return 'http://files.lutro.me/noooo.webm'
@botologist.plugin.reply()
def guys(self, msg):
msgl = msg.message.lower()
if 'dont be late' in msgl or "don't be late" in msgl:
return 'same to you'
@botologist.plugin.reply()
def dadziel(self, msg):
daddy = msg.message.lower()
if 'dadziel' in daddy:
return 'https://i.imgur.com/YqXHpqn.jpg'
@botologist.plugin.command('grandpa')
def grandpa(self, cmd):
return 'https://i.imgur.com/YqXHpqn.jpg'
@botologist.plugin.reply()
def internet_memes(self, msg):
msgl = msg.message.lower()
if 'no way to ayy' in msgl:
return 'https://www.youtube.com/watch?v=tCOIZDttei4&t=1m15s'
elif 'no more internet memes' in msgl:
return 'https://www.youtube.com/watch?v=tCOIZDttei4&t=1m20s'
| mit | 1,645,002,937,080,925,200 | 31.206573 | 89 | 0.678571 | false |
djstein/messages-grailed | project/api/migrations/0001_initial.py | 1 | 1569 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-10 21:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=100)),
('users', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('text', models.TextField()),
('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='api.Channel')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['date'],
},
),
migrations.AlterUniqueTogether(
name='message',
unique_together=set([('channel', 'date')]),
),
]
| mit | -5,269,600,860,185,608,000 | 34.659091 | 135 | 0.574888 | false |
AbsoluteMSTR/isort | test_isort.py | 1 | 78866 | # coding: utf-8
"""test_isort.py.
Tests all major functionality of the isort library
Should be ran using py.test by simply running py.test in the isort project directory
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import codecs
import os
import shutil
import tempfile
from isort.isort import SortImports
from isort.pie_slice import *
from isort.settings import WrapModes
SHORT_IMPORT = "from third_party import lib1, lib2, lib3, lib4"
REALLY_LONG_IMPORT = ("from third_party import lib1, lib2, lib3, lib4, lib5, lib6, lib7, lib8, lib9, lib10, lib11,"
"lib12, lib13, lib14, lib15, lib16, lib17, lib18, lib20, lib21, lib22")
REALLY_LONG_IMPORT_WITH_COMMENT = ("from third_party import lib1, lib2, lib3, lib4, lib5, lib6, lib7, lib8, lib9, "
"lib10, lib11, lib12, lib13, lib14, lib15, lib16, lib17, lib18, lib20, lib21, lib22"
" # comment")
def test_happy_path():
"""Test the most basic use case, straight imports no code, simply not organized by category."""
test_input = ("import sys\n"
"import os\n"
"import myproject.test\n"
"import django.settings")
test_output = SortImports(file_contents=test_input, known_third_party=['django']).output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import django.settings\n"
"\n"
"import myproject.test\n")
def test_code_intermixed():
"""Defines what should happen when isort encounters imports intermixed with
code.
(it should pull them all to the top)
"""
test_input = ("import sys\n"
"print('yo')\n"
"print('I like to put code between imports cause I want stuff to break')\n"
"import myproject.test\n")
test_output = SortImports(file_contents=test_input).output
assert test_output == ("import sys\n"
"\n"
"import myproject.test\n"
"\n"
"print('yo')\n"
"print('I like to put code between imports cause I want stuff to break')\n")
def test_correct_space_between_imports():
"""Ensure after imports a correct amount of space (in newlines) is
enforced.
(2 for method, class, or decorator definitions 1 for anything else)
"""
test_input_method = ("import sys\n"
"def my_method():\n"
" print('hello world')\n")
test_output_method = SortImports(file_contents=test_input_method).output
assert test_output_method == ("import sys\n"
"\n"
"\n"
"def my_method():\n"
" print('hello world')\n")
test_input_decorator = ("import sys\n"
"@my_decorator\n"
"def my_method():\n"
" print('hello world')\n")
test_output_decorator = SortImports(file_contents=test_input_decorator).output
assert test_output_decorator == ("import sys\n"
"\n"
"\n"
"@my_decorator\n"
"def my_method():\n"
" print('hello world')\n")
test_input_class = ("import sys\n"
"class MyClass(object):\n"
" pass\n")
test_output_class = SortImports(file_contents=test_input_class).output
assert test_output_class == ("import sys\n"
"\n"
"\n"
"class MyClass(object):\n"
" pass\n")
test_input_other = ("import sys\n"
"print('yo')\n")
test_output_other = SortImports(file_contents=test_input_other).output
assert test_output_other == ("import sys\n"
"\n"
"print('yo')\n")
def test_sort_on_number():
"""Ensure numbers get sorted logically (10 > 9 not the other way around)"""
test_input = ("import lib10\n"
"import lib9\n")
test_output = SortImports(file_contents=test_input).output
assert test_output == ("import lib9\n"
"import lib10\n")
def test_line_length():
"""Ensure isort enforces the set line_length."""
assert len(SortImports(file_contents=REALLY_LONG_IMPORT, line_length=80).output.split("\n")[0]) <= 80
assert len(SortImports(file_contents=REALLY_LONG_IMPORT, line_length=120).output.split("\n")[0]) <= 120
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, line_length=42).output
assert test_output == ("from third_party import (lib1, lib2, lib3,\n"
" lib4, lib5, lib6,\n"
" lib7, lib8, lib9,\n"
" lib10, lib11,\n"
" lib12, lib13,\n"
" lib14, lib15,\n"
" lib16, lib17,\n"
" lib18, lib20,\n"
" lib21, lib22)\n")
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, line_length=42, wrap_length=32).output
assert test_output == ("from third_party import (lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22)\n")
def test_output_modes():
"""Test setting isort to use various output modes works as expected"""
test_output_grid = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.GRID, line_length=40).output
assert test_output_grid == ("from third_party import (lib1, lib2,\n"
" lib3, lib4,\n"
" lib5, lib6,\n"
" lib7, lib8,\n"
" lib9, lib10,\n"
" lib11, lib12,\n"
" lib13, lib14,\n"
" lib15, lib16,\n"
" lib17, lib18,\n"
" lib20, lib21,\n"
" lib22)\n")
test_output_vertical = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.VERTICAL, line_length=40).output
assert test_output_vertical == ("from third_party import (lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22)\n")
comment_output_vertical = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.VERTICAL, line_length=40).output
assert comment_output_vertical == ("from third_party import (lib1, # comment\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22)\n")
test_output_hanging_indent = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent=" ").output
assert test_output_hanging_indent == ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, \\\n"
" lib8, lib9, lib10, lib11, lib12, \\\n"
" lib13, lib14, lib15, lib16, lib17, \\\n"
" lib18, lib20, lib21, lib22\n")
comment_output_hanging_indent = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent=" ").output
assert comment_output_hanging_indent == ("from third_party import lib1, \\ # comment\n"
" lib2, lib3, lib4, lib5, lib6, \\\n"
" lib7, lib8, lib9, lib10, lib11, \\\n"
" lib12, lib13, lib14, lib15, lib16, \\\n"
" lib17, lib18, lib20, lib21, lib22\n")
test_output_vertical_indent = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=40, indent=" ").output
assert test_output_vertical_indent == ("from third_party import (\n"
" lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22\n"
")\n")
comment_output_vertical_indent = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=40, indent=" ").output
assert comment_output_vertical_indent == ("from third_party import ( # comment\n"
" lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
" lib5,\n"
" lib6,\n"
" lib7,\n"
" lib8,\n"
" lib9,\n"
" lib10,\n"
" lib11,\n"
" lib12,\n"
" lib13,\n"
" lib14,\n"
" lib15,\n"
" lib16,\n"
" lib17,\n"
" lib18,\n"
" lib20,\n"
" lib21,\n"
" lib22\n"
")\n")
test_output_vertical_grid = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.VERTICAL_GRID,
line_length=40, indent=" ").output
assert test_output_vertical_grid == ("from third_party import (\n"
" lib1, lib2, lib3, lib4, lib5, lib6,\n"
" lib7, lib8, lib9, lib10, lib11,\n"
" lib12, lib13, lib14, lib15, lib16,\n"
" lib17, lib18, lib20, lib21, lib22)\n")
comment_output_vertical_grid = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.VERTICAL_GRID,
line_length=40, indent=" ").output
assert comment_output_vertical_grid == ("from third_party import ( # comment\n"
" lib1, lib2, lib3, lib4, lib5, lib6,\n"
" lib7, lib8, lib9, lib10, lib11,\n"
" lib12, lib13, lib14, lib15, lib16,\n"
" lib17, lib18, lib20, lib21, lib22)\n")
test_output_vertical_grid_grouped = SortImports(file_contents=REALLY_LONG_IMPORT,
multi_line_output=WrapModes.VERTICAL_GRID_GROUPED,
line_length=40, indent=" ").output
assert test_output_vertical_grid_grouped == ("from third_party import (\n"
" lib1, lib2, lib3, lib4, lib5, lib6,\n"
" lib7, lib8, lib9, lib10, lib11,\n"
" lib12, lib13, lib14, lib15, lib16,\n"
" lib17, lib18, lib20, lib21, lib22\n"
")\n")
comment_output_vertical_grid_grouped = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.VERTICAL_GRID_GROUPED,
line_length=40, indent=" ").output
assert comment_output_vertical_grid_grouped == ("from third_party import ( # comment\n"
" lib1, lib2, lib3, lib4, lib5, lib6,\n"
" lib7, lib8, lib9, lib10, lib11,\n"
" lib12, lib13, lib14, lib15, lib16,\n"
" lib17, lib18, lib20, lib21, lib22\n"
")\n")
output_noqa = SortImports(file_contents=REALLY_LONG_IMPORT_WITH_COMMENT,
multi_line_output=WrapModes.NOQA).output
assert output_noqa == "from third_party import lib1 lib2 lib3 lib4 lib5 lib6 lib7 lib8 lib9 lib10 lib11 lib12 lib13 lib14 lib15 lib16 lib17 lib18 lib20 lib21 lib22 # NOQA comment\n" # NOQA
def test_qa_comment_case():
test_input = "from veryveryveryveryveryveryveryveryveryveryvery import X # NOQA"
test_output = SortImports(file_contents=test_input, line_length=40, multi_line_output=WrapModes.NOQA).output
assert test_output == "from veryveryveryveryveryveryveryveryveryveryvery import X # NOQA\n"
test_input = "import veryveryveryveryveryveryveryveryveryveryvery # NOQA"
test_output = SortImports(file_contents=test_input, line_length=40, multi_line_output=WrapModes.NOQA).output
assert test_output == "import veryveryveryveryveryveryveryveryveryveryvery # NOQA\n"
def test_length_sort():
"""Test setting isort to sort on length instead of alphabetically."""
test_input = ("import medium_sizeeeeeeeeeeeeee\n"
"import shortie\n"
"import looooooooooooooooooooooooooooooooooooooong\n"
"import medium_sizeeeeeeeeeeeeea\n")
test_output = SortImports(file_contents=test_input, length_sort=True).output
assert test_output == ("import shortie\n"
"import medium_sizeeeeeeeeeeeeea\n"
"import medium_sizeeeeeeeeeeeeee\n"
"import looooooooooooooooooooooooooooooooooooooong\n")
def test_convert_hanging():
"""Ensure that isort will convert hanging indents to correct indent
method."""
test_input = ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, \\\n"
" lib8, lib9, lib10, lib11, lib12, \\\n"
" lib13, lib14, lib15, lib16, lib17, \\\n"
" lib18, lib20, lib21, lib22\n")
test_output = SortImports(file_contents=test_input, multi_line_output=WrapModes.GRID,
line_length=40).output
assert test_output == ("from third_party import (lib1, lib2,\n"
" lib3, lib4,\n"
" lib5, lib6,\n"
" lib7, lib8,\n"
" lib9, lib10,\n"
" lib11, lib12,\n"
" lib13, lib14,\n"
" lib15, lib16,\n"
" lib17, lib18,\n"
" lib20, lib21,\n"
" lib22)\n")
def test_custom_indent():
"""Ensure setting a custom indent will work as expected."""
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent=" ", balanced_wrapping=False).output
assert test_output == ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, lib8, \\\n"
" lib9, lib10, lib11, lib12, lib13, \\\n"
" lib14, lib15, lib16, lib17, lib18, \\\n"
" lib20, lib21, lib22\n")
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent="' '", balanced_wrapping=False).output
assert test_output == ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, lib8, \\\n"
" lib9, lib10, lib11, lib12, lib13, \\\n"
" lib14, lib15, lib16, lib17, lib18, \\\n"
" lib20, lib21, lib22\n")
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent="tab", balanced_wrapping=False).output
assert test_output == ("from third_party import lib1, lib2, \\\n"
"\tlib3, lib4, lib5, lib6, lib7, lib8, \\\n"
"\tlib9, lib10, lib11, lib12, lib13, \\\n"
"\tlib14, lib15, lib16, lib17, lib18, \\\n"
"\tlib20, lib21, lib22\n")
test_output = SortImports(file_contents=REALLY_LONG_IMPORT, multi_line_output=WrapModes.HANGING_INDENT,
line_length=40, indent=2, balanced_wrapping=False).output
assert test_output == ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, lib8, \\\n"
" lib9, lib10, lib11, lib12, lib13, \\\n"
" lib14, lib15, lib16, lib17, lib18, \\\n"
" lib20, lib21, lib22\n")
def test_use_parentheses():
test_input = (
"from fooooooooooooooooooooooooo.baaaaaaaaaaaaaaaaaaarrrrrrr import \\"
" my_custom_function as my_special_function"
)
test_output = SortImports(
file_contents=test_input, known_third_party=['django'],
line_length=79, use_parentheses=True,
).output
assert '(' in test_output
def test_skip():
"""Ensure skipping a single import will work as expected."""
test_input = ("import myproject\n"
"import django\n"
"print('hey')\n"
"import sys # isort:skip this import needs to be placed here\n\n\n\n\n\n\n")
test_output = SortImports(file_contents=test_input, known_third_party=['django']).output
assert test_output == ("import django\n"
"\n"
"import myproject\n"
"\n"
"print('hey')\n"
"import sys # isort:skip this import needs to be placed here\n")
def test_skip_with_file_name():
"""Ensure skipping a file works even when file_contents is provided."""
test_input = ("import django\n"
"import myproject\n")
skipped = SortImports(file_path='/baz.py', file_contents=test_input, known_third_party=['django'],
skip=['baz.py']).skipped
assert skipped
def test_force_to_top():
"""Ensure forcing a single import to the top of its category works as expected."""
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1\n")
test_output = SortImports(file_contents=test_input, force_to_top=['lib5']).output
assert test_output == ("import lib5\n"
"import lib1\n"
"import lib2\n"
"import lib6\n")
def test_add_imports():
"""Ensures adding imports works as expected."""
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1\n\n")
test_output = SortImports(file_contents=test_input, add_imports=['import lib4', 'import lib7']).output
assert test_output == ("import lib1\n"
"import lib2\n"
"import lib4\n"
"import lib5\n"
"import lib6\n"
"import lib7\n")
# Using simplified syntax
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1\n\n")
test_output = SortImports(file_contents=test_input, add_imports=['lib4', 'lib7', 'lib8.a']).output
assert test_output == ("import lib1\n"
"import lib2\n"
"import lib4\n"
"import lib5\n"
"import lib6\n"
"import lib7\n"
"from lib8 import a\n")
# On a file that has no pre-existing imports
test_input = ('"""Module docstring"""\n'
'\n'
'class MyClass(object):\n'
' pass\n')
test_output = SortImports(file_contents=test_input, add_imports=['from __future__ import print_function']).output
assert test_output == ('"""Module docstring"""\n'
'from __future__ import print_function\n'
'\n'
'\n'
'class MyClass(object):\n'
' pass\n')
# On a file that has no pre-existing imports, and no doc-string
test_input = ('class MyClass(object):\n'
' pass\n')
test_output = SortImports(file_contents=test_input, add_imports=['from __future__ import print_function']).output
assert test_output == ('from __future__ import print_function\n'
'\n'
'\n'
'class MyClass(object):\n'
' pass\n')
# On a file with no content what so ever
test_input = ("")
test_output = SortImports(file_contents=test_input, add_imports=['lib4']).output
assert test_output == ("")
# On a file with no content what so ever, after force_adds is set to True
test_input = ("")
test_output = SortImports(file_contents=test_input, add_imports=['lib4'], force_adds=True).output
assert test_output == ("import lib4\n")
def test_remove_imports():
"""Ensures removing imports works as expected."""
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1")
test_output = SortImports(file_contents=test_input, remove_imports=['lib2', 'lib6']).output
assert test_output == ("import lib1\n"
"import lib5\n")
# Using natural syntax
test_input = ("import lib6\n"
"import lib2\n"
"import lib5\n"
"import lib1\n"
"from lib8 import a")
test_output = SortImports(file_contents=test_input, remove_imports=['import lib2', 'import lib6',
'from lib8 import a']).output
assert test_output == ("import lib1\n"
"import lib5\n")
def test_explicitly_local_import():
"""Ensure that explicitly local imports are separated."""
test_input = ("import lib1\n"
"import lib2\n"
"import .lib6\n"
"from . import lib7")
assert SortImports(file_contents=test_input).output == ("import lib1\n"
"import lib2\n"
"\n"
"import .lib6\n"
"from . import lib7\n")
def test_quotes_in_file():
"""Ensure imports within triple quotes don't get imported."""
test_input = ('import os\n'
'\n'
'"""\n'
'Let us\n'
'import foo\n'
'okay?\n'
'"""\n')
assert SortImports(file_contents=test_input).output == test_input
test_input = ('import os\n'
'\n'
"'\"\"\"'\n"
'import foo\n')
assert SortImports(file_contents=test_input).output == ('import os\n'
'\n'
'import foo\n'
'\n'
"'\"\"\"'\n")
test_input = ('import os\n'
'\n'
'"""Let us"""\n'
'import foo\n'
'"""okay?"""\n')
assert SortImports(file_contents=test_input).output == ('import os\n'
'\n'
'import foo\n'
'\n'
'"""Let us"""\n'
'"""okay?"""\n')
test_input = ('import os\n'
'\n'
'#"""\n'
'import foo\n'
'#"""')
assert SortImports(file_contents=test_input).output == ('import os\n'
'\n'
'import foo\n'
'\n'
'#"""\n'
'#"""\n')
test_input = ('import os\n'
'\n'
"'\\\n"
"import foo'\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ('import os\n'
'\n'
"'''\n"
"\\'''\n"
'import junk\n'
"'''\n")
assert SortImports(file_contents=test_input).output == test_input
def test_check_newline_in_imports(capsys):
"""Ensure tests works correctly when new lines are in imports."""
test_input = ('from lib1 import (\n'
' sub1,\n'
' sub2,\n'
' sub3\n)\n')
SortImports(file_contents=test_input, multi_line_output=WrapModes.VERTICAL_HANGING_INDENT, line_length=20,
check=True, verbose=True)
out, err = capsys.readouterr()
assert 'SUCCESS' in out
def test_forced_separate():
"""Ensure that forcing certain sub modules to show separately works as expected."""
test_input = ('import sys\n'
'import warnings\n'
'from collections import OrderedDict\n'
'\n'
'from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation\n'
'from django.core.paginator import InvalidPage\n'
'from django.core.urlresolvers import reverse\n'
'from django.db import models\n'
'from django.db.models.fields import FieldDoesNotExist\n'
'from django.utils import six\n'
'from django.utils.deprecation import RenameMethodsBase\n'
'from django.utils.encoding import force_str, force_text\n'
'from django.utils.http import urlencode\n'
'from django.utils.translation import ugettext, ugettext_lazy\n'
'\n'
'from django.contrib.admin import FieldListFilter\n'
'from django.contrib.admin.exceptions import DisallowedModelAdminLookup\n'
'from django.contrib.admin.options import IncorrectLookupParameters, IS_POPUP_VAR, TO_FIELD_VAR\n')
assert SortImports(file_contents=test_input, forced_separate=['django.contrib'],
known_third_party=['django'], line_length=120, order_by_type=False).output == test_input
test_input = ('from .foo import bar\n'
'\n'
'from .y import ca\n')
assert SortImports(file_contents=test_input, forced_separate=['.y'],
line_length=120, order_by_type=False).output == test_input
def test_default_section():
"""Test to ensure changing the default section works as expected."""
test_input = ("import sys\n"
"import os\n"
"import myproject.test\n"
"import django.settings")
test_output = SortImports(file_contents=test_input, known_third_party=['django'],
default_section="FIRSTPARTY").output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import django.settings\n"
"\n"
"import myproject.test\n")
test_output_custom = SortImports(file_contents=test_input, known_third_party=['django'],
default_section="STDLIB").output
assert test_output_custom == ("import myproject.test\n"
"import os\n"
"import sys\n"
"\n"
"import django.settings\n")
def test_first_party_overrides_standard_section():
"""Test to ensure changing the default section works as expected."""
test_input = ("import sys\n"
"import os\n"
"import profile.test\n")
test_output = SortImports(file_contents=test_input, known_first_party=['profile']).output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import profile.test\n")
def test_thirdy_party_overrides_standard_section():
"""Test to ensure changing the default section works as expected."""
test_input = ("import sys\n"
"import os\n"
"import profile.test\n")
test_output = SortImports(file_contents=test_input, known_third_party=['profile']).output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import profile.test\n")
def test_force_single_line_imports():
"""Test to ensure forcing imports to each have their own line works as expected."""
test_input = ("from third_party import lib1, lib2, \\\n"
" lib3, lib4, lib5, lib6, lib7, \\\n"
" lib8, lib9, lib10, lib11, lib12, \\\n"
" lib13, lib14, lib15, lib16, lib17, \\\n"
" lib18, lib20, lib21, lib22\n")
test_output = SortImports(file_contents=test_input, multi_line_output=WrapModes.GRID,
line_length=40, force_single_line=True).output
assert test_output == ("from third_party import lib1\n"
"from third_party import lib2\n"
"from third_party import lib3\n"
"from third_party import lib4\n"
"from third_party import lib5\n"
"from third_party import lib6\n"
"from third_party import lib7\n"
"from third_party import lib8\n"
"from third_party import lib9\n"
"from third_party import lib10\n"
"from third_party import lib11\n"
"from third_party import lib12\n"
"from third_party import lib13\n"
"from third_party import lib14\n"
"from third_party import lib15\n"
"from third_party import lib16\n"
"from third_party import lib17\n"
"from third_party import lib18\n"
"from third_party import lib20\n"
"from third_party import lib21\n"
"from third_party import lib22\n")
def test_force_single_line_long_imports():
test_input = ("from veryveryveryveryveryvery import small, big\n")
test_output = SortImports(file_contents=test_input, multi_line_output=WrapModes.NOQA,
line_length=40, force_single_line=True).output
assert test_output == ("from veryveryveryveryveryvery import big\n"
"from veryveryveryveryveryvery import small # NOQA\n")
def test_titled_imports():
"""Tests setting custom titled/commented import sections."""
test_input = ("import sys\n"
"import unicodedata\n"
"import statistics\n"
"import os\n"
"import myproject.test\n"
"import django.settings")
test_output = SortImports(file_contents=test_input, known_third_party=['django'],
import_heading_stdlib="Standard Library", import_heading_firstparty="My Stuff").output
assert test_output == ("# Standard Library\n"
"import os\n"
"import statistics\n"
"import sys\n"
"import unicodedata\n"
"\n"
"import django.settings\n"
"\n"
"# My Stuff\n"
"import myproject.test\n")
test_second_run = SortImports(file_contents=test_output, known_third_party=['django'],
import_heading_stdlib="Standard Library", import_heading_firstparty="My Stuff").output
assert test_second_run == test_output
def test_balanced_wrapping():
"""Tests balanced wrapping mode, where the length of individual lines maintain width."""
test_input = ("from __future__ import (absolute_import, division, print_function,\n"
" unicode_literals)")
test_output = SortImports(file_contents=test_input, line_length=70, balanced_wrapping=True).output
assert test_output == ("from __future__ import (absolute_import, division,\n"
" print_function, unicode_literals)\n")
def test_relative_import_with_space():
"""Tests the case where the relation and the module that is being imported from is separated with a space."""
test_input = ("from ... fields.sproqet import SproqetCollection")
assert SortImports(file_contents=test_input).output == ("from ...fields.sproqet import SproqetCollection\n")
def test_multiline_import():
"""Test the case where import spawns multiple lines with inconsistent indentation."""
test_input = ("from pkg \\\n"
" import stuff, other_suff \\\n"
" more_stuff")
assert SortImports(file_contents=test_input).output == ("from pkg import more_stuff, other_suff, stuff\n")
# test again with a custom configuration
custom_configuration = {'force_single_line': True,
'line_length': 120,
'known_first_party': ['asdf', 'qwer'],
'default_section': 'THIRDPARTY',
'forced_separate': 'asdf'}
expected_output = ("from pkg import more_stuff\n"
"from pkg import other_suff\n"
"from pkg import stuff\n")
assert SortImports(file_contents=test_input, **custom_configuration).output == expected_output
def test_atomic_mode():
# without syntax error, everything works OK
test_input = ("from b import d, c\n"
"from a import f, e\n")
assert SortImports(file_contents=test_input, atomic=True).output == ("from a import e, f\n"
"from b import c, d\n")
# with syntax error content is not changed
test_input += "while True print 'Hello world'" # blatant syntax error
assert SortImports(file_contents=test_input, atomic=True).output == test_input
def test_order_by_type():
test_input = "from module import Class, CONSTANT, function"
assert SortImports(file_contents=test_input,
order_by_type=True).output == ("from module import CONSTANT, Class, function\n")
# More complex sample data
test_input = "from module import Class, CONSTANT, function, BASIC, Apple"
assert SortImports(file_contents=test_input,
order_by_type=True).output == ("from module import BASIC, CONSTANT, Apple, Class, function\n")
# Really complex sample data, to verify we don't mess with top level imports, only nested ones
test_input = ("import StringIO\n"
"import glob\n"
"import os\n"
"import shutil\n"
"import tempfile\n"
"import time\n"
"from subprocess import PIPE, Popen, STDOUT\n")
assert SortImports(file_contents=test_input, order_by_type=True).output == \
("import glob\n"
"import os\n"
"import shutil\n"
"import StringIO\n"
"import tempfile\n"
"import time\n"
"from subprocess import PIPE, STDOUT, Popen\n")
def test_custom_lines_after_import_section():
"""Test the case where the number of lines to output after imports has been explicitly set."""
test_input = ("from a import b\n"
"foo = 'bar'\n")
# default case is one space if not method or class after imports
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"foo = 'bar'\n")
# test again with a custom number of lines after the import section
assert SortImports(file_contents=test_input, lines_after_imports=2).output == ("from a import b\n"
"\n"
"\n"
"foo = 'bar'\n")
def test_smart_lines_after_import_section():
"""Tests the default 'smart' behavior for dealing with lines after the import section"""
# one space if not method or class after imports
test_input = ("from a import b\n"
"foo = 'bar'\n")
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"foo = 'bar'\n")
# two spaces if a method or class after imports
test_input = ("from a import b\n"
"def my_function():\n"
" pass\n")
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"\n"
"def my_function():\n"
" pass\n")
# two spaces if a method or class after imports - even if comment before function
test_input = ("from a import b\n"
"# comment should be ignored\n"
"def my_function():\n"
" pass\n")
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"\n"
"# comment should be ignored\n"
"def my_function():\n"
" pass\n")
# ensure logic works with both style comments
test_input = ("from a import b\n"
'"""\n'
" comment should be ignored\n"
'"""\n'
"def my_function():\n"
" pass\n")
assert SortImports(file_contents=test_input).output == ("from a import b\n"
"\n"
"\n"
'"""\n'
" comment should be ignored\n"
'"""\n'
"def my_function():\n"
" pass\n")
def test_settings_combine_instead_of_overwrite():
"""Test to ensure settings combine logically, instead of fully overwriting."""
assert set(SortImports(known_standard_library=['not_std_library']).config['known_standard_library']) == \
set(SortImports().config['known_standard_library'] + ['not_std_library'])
assert set(SortImports(not_known_standard_library=['thread']).config['known_standard_library']) == \
set(item for item in SortImports().config['known_standard_library'] if item != 'thread')
def test_combined_from_and_as_imports():
"""Test to ensure it's possible to combine from and as imports."""
test_input = ("from translate.misc.multistring import multistring\n"
"from translate.storage import base, factory\n"
"from translate.storage.placeables import general, parse as rich_parse\n")
assert SortImports(file_contents=test_input, combine_as_imports=True).output == test_input
def test_as_imports_with_line_length():
"""Test to ensure it's possible to combine from and as imports."""
test_input = ("from translate.storage import base as storage_base\n"
"from translate.storage.placeables import general, parse as rich_parse\n")
assert SortImports(file_contents=test_input, combine_as_imports=False, line_length=40).output == \
("from translate.storage import \\\n base as storage_base\n"
"from translate.storage.placeables import \\\n parse as rich_parse\n"
"from translate.storage.placeables import \\\n general\n")
def test_keep_comments():
"""Test to ensure isort properly keeps comments in tact after sorting."""
# Straight Import
test_input = ("import foo # bar\n")
assert SortImports(file_contents=test_input).output == test_input
# Star import
test_input_star = ("from foo import * # bar\n")
assert SortImports(file_contents=test_input_star).output == test_input_star
# Force Single Line From Import
test_input = ("from foo import bar # comment\n")
assert SortImports(file_contents=test_input, force_single_line=True).output == test_input
# From import
test_input = ("from foo import bar # My Comment\n")
assert SortImports(file_contents=test_input).output == test_input
# More complicated case
test_input = ("from a import b # My Comment1\n"
"from a import c # My Comment2\n")
assert SortImports(file_contents=test_input).output == \
("from a import b # My Comment1\n"
"from a import c # My Comment2\n")
# Test case where imports comments make imports extend pass the line length
test_input = ("from a import b # My Comment1\n"
"from a import c # My Comment2\n"
"from a import d\n")
assert SortImports(file_contents=test_input, line_length=45).output == \
("from a import b # My Comment1\n"
"from a import c # My Comment2\n"
"from a import d\n")
# Test case where imports with comments will be beyond line length limit
test_input = ("from a import b, c # My Comment1\n"
"from a import c, d # My Comment2 is really really really really long\n")
assert SortImports(file_contents=test_input, line_length=45).output == \
("from a import (b, # My Comment1; My Comment2 is really really really really long\n"
" c, d)\n")
# Test that comments are not stripped from 'import ... as ...' by default
test_input = ("from a import b as bb # b comment\n"
"from a import c as cc # c comment\n")
assert SortImports(file_contents=test_input).output == test_input
# Test that 'import ... as ...' comments are not collected inappropriately
test_input = ("from a import b as bb # b comment\n"
"from a import c as cc # c comment\n"
"from a import d\n")
assert SortImports(file_contents=test_input).output == test_input
assert SortImports(file_contents=test_input, combine_as_imports=True).output == (
"from a import b as bb, c as cc, d # b comment; c comment\n"
)
def test_multiline_split_on_dot():
"""Test to ensure isort correctly handles multiline imports, even when split right after a '.'"""
test_input = ("from my_lib.my_package.test.level_1.level_2.level_3.level_4.level_5.\\\n"
" my_module import my_function")
assert SortImports(file_contents=test_input, line_length=70).output == \
("from my_lib.my_package.test.level_1.level_2.level_3.level_4.level_5.my_module import \\\n"
" my_function\n")
def test_import_star():
"""Test to ensure isort handles star imports correctly"""
test_input = ("from blah import *\n"
"from blah import _potato\n")
assert SortImports(file_contents=test_input).output == ("from blah import *\n"
"from blah import _potato\n")
assert SortImports(file_contents=test_input, combine_star=True).output == ("from blah import *\n")
def test_include_trailing_comma():
"""Test for the include_trailing_comma option"""
test_output_grid = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.GRID,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_grid == (
"from third_party import (lib1, lib2,\n"
" lib3, lib4,)\n"
)
test_output_vertical = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.VERTICAL,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_vertical == (
"from third_party import (lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,)\n"
)
test_output_vertical_indent = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_vertical_indent == (
"from third_party import (\n"
" lib1,\n"
" lib2,\n"
" lib3,\n"
" lib4,\n"
")\n"
)
test_output_vertical_grid = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.VERTICAL_GRID,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_vertical_grid == (
"from third_party import (\n"
" lib1, lib2, lib3, lib4,)\n"
)
test_output_vertical_grid_grouped = SortImports(
file_contents=SHORT_IMPORT,
multi_line_output=WrapModes.VERTICAL_GRID_GROUPED,
line_length=40,
include_trailing_comma=True,
).output
assert test_output_vertical_grid_grouped == (
"from third_party import (\n"
" lib1, lib2, lib3, lib4,\n"
")\n"
)
def test_similar_to_std_library():
"""Test to ensure modules that are named similarly to a standard library import don't end up clobbered"""
test_input = ("import datetime\n"
"\n"
"import requests\n"
"import times\n")
assert SortImports(file_contents=test_input, known_third_party=["requests", "times"]).output == test_input
def test_correctly_placed_imports():
"""Test to ensure comments stay on correct placement after being sorted"""
test_input = ("from a import b # comment for b\n"
"from a import c # comment for c\n")
assert SortImports(file_contents=test_input, force_single_line=True).output == \
("from a import b # comment for b\n"
"from a import c # comment for c\n")
assert SortImports(file_contents=test_input).output == ("from a import b # comment for b\n"
"from a import c # comment for c\n")
# Full example test from issue #143
test_input = ("from itertools import chain\n"
"\n"
"from django.test import TestCase\n"
"from model_mommy import mommy\n"
"\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_item_product\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_item_product_d"
"efinition\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_item_product_d"
"efinition_platform\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_item_product_p"
"latform\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_territory_reta"
"il_model\n"
"from apps.clientman.commands.download_usage_rights import associate_right_for_territory_reta"
"il_model_definition_platform_provider # noqa\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_item_product\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_item_product_defini"
"tion\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_item_product_defini"
"tion_platform\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_item_product_platfo"
"rm\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_territory_retail_mo"
"del\n"
"from apps.clientman.commands.download_usage_rights import clear_right_for_territory_retail_mo"
"del_definition_platform_provider # noqa\n"
"from apps.clientman.commands.download_usage_rights import create_download_usage_right\n"
"from apps.clientman.commands.download_usage_rights import delete_download_usage_right\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_item_product\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_item_product_d"
"efinition\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_item_product_d"
"efinition_platform\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_item_product_p"
"latform\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_territory_reta"
"il_model\n"
"from apps.clientman.commands.download_usage_rights import disable_download_for_territory_reta"
"il_model_definition_platform_provider # noqa\n"
"from apps.clientman.commands.download_usage_rights import get_download_rights_for_item\n"
"from apps.clientman.commands.download_usage_rights import get_right\n")
assert SortImports(file_contents=test_input, force_single_line=True, line_length=140,
known_third_party=["django", "model_mommy"]).output == test_input
def test_auto_detection():
"""Initial test to ensure isort auto-detection works correctly - will grow over time as new issues are raised."""
# Issue 157
test_input = ("import binascii\n"
"import os\n"
"\n"
"import cv2\n"
"import requests\n")
assert SortImports(file_contents=test_input, known_third_party=["cv2", "requests"]).output == test_input
# alternative solution
assert SortImports(file_contents=test_input, default_section="THIRDPARTY").output == test_input
def test_same_line_statements():
"""Ensure isort correctly handles the case where a single line contains multiple statements including an import"""
test_input = ("import pdb; import nose\n")
assert SortImports(file_contents=test_input).output == ("import pdb\n"
"\n"
"import nose\n")
test_input = ("import pdb; pdb.set_trace()\n"
"import nose; nose.run()\n")
assert SortImports(file_contents=test_input).output == test_input
def test_long_line_comments():
"""Ensure isort correctly handles comments at the end of extreamly long lines"""
test_input = ("from foo.utils.fabric_stuff.live import check_clean_live, deploy_live, sync_live_envdir, "
"update_live_app, update_live_cron # noqa\n"
"from foo.utils.fabric_stuff.stage import check_clean_stage, deploy_stage, sync_stage_envdir, "
"update_stage_app, update_stage_cron # noqa\n")
assert SortImports(file_contents=test_input).output == \
("from foo.utils.fabric_stuff.live import (check_clean_live, deploy_live, # noqa\n"
" sync_live_envdir, update_live_app, update_live_cron)\n"
"from foo.utils.fabric_stuff.stage import (check_clean_stage, deploy_stage, # noqa\n"
" sync_stage_envdir, update_stage_app, update_stage_cron)\n")
def test_tab_character_in_import():
"""Ensure isort correctly handles import statements that contain a tab character"""
test_input = ("from __future__ import print_function\n"
"from __future__ import\tprint_function\n")
assert SortImports(file_contents=test_input).output == "from __future__ import print_function\n"
def test_split_position():
"""Ensure isort splits on import instead of . when possible"""
test_input = ("from p24.shared.exceptions.master.host_state_flag_unchanged import HostStateUnchangedException\n")
assert SortImports(file_contents=test_input, line_length=80).output == \
("from p24.shared.exceptions.master.host_state_flag_unchanged import \\\n"
" HostStateUnchangedException\n")
def test_place_comments():
"""Ensure manually placing imports works as expected"""
test_input = ("import sys\n"
"import os\n"
"import myproject.test\n"
"import django.settings\n"
"\n"
"# isort:imports-thirdparty\n"
"# isort:imports-firstparty\n"
"print('code')\n"
"\n"
"# isort:imports-stdlib\n")
test_output = SortImports(file_contents=test_input, known_third_party=['django']).output
assert test_output == ("\n# isort:imports-thirdparty\n"
"import django.settings\n"
"\n"
"# isort:imports-firstparty\n"
"import myproject.test\n"
"\n"
"print('code')\n"
"\n"
"# isort:imports-stdlib\n"
"import os\n"
"import sys\n")
def test_placement_control():
"""Ensure that most specific placement control match wins"""
test_input = ("import os\n"
"import sys\n"
"from bottle import Bottle, redirect, response, run\n"
"import p24.imports._argparse as argparse\n"
"import p24.imports._subprocess as subprocess\n"
"import p24.imports._VERSION as VERSION\n"
"import p24.shared.media_wiki_syntax as syntax\n")
test_output = SortImports(file_contents=test_input,
known_first_party=['p24', 'p24.imports._VERSION'],
known_standard_library=['p24.imports'],
known_third_party=['bottle'],
default_section="THIRDPARTY").output
assert test_output == ("import os\n"
"import p24.imports._argparse as argparse\n"
"import p24.imports._subprocess as subprocess\n"
"import sys\n"
"\n"
"from bottle import Bottle, redirect, response, run\n"
"\n"
"import p24.imports._VERSION as VERSION\n"
"import p24.shared.media_wiki_syntax as syntax\n")
def test_custom_sections():
"""Ensure that most specific placement control match wins"""
test_input = ("import os\n"
"import sys\n"
"from django.conf import settings\n"
"from bottle import Bottle, redirect, response, run\n"
"import p24.imports._argparse as argparse\n"
"from django.db import models\n"
"import p24.imports._subprocess as subprocess\n"
"import pandas as pd\n"
"import p24.imports._VERSION as VERSION\n"
"import numpy as np\n"
"import p24.shared.media_wiki_syntax as syntax\n")
test_output = SortImports(file_contents=test_input,
known_first_party=['p24', 'p24.imports._VERSION'],
import_heading_stdlib='Standard Library',
import_heading_thirdparty='Third Party',
import_heading_firstparty='First Party',
import_heading_django='Django',
import_heading_pandas='Pandas',
known_standard_library=['p24.imports'],
known_third_party=['bottle'],
known_django=['django'],
known_pandas=['pandas', 'numpy'],
default_section="THIRDPARTY",
sections=["FUTURE", "STDLIB", "DJANGO", "THIRDPARTY", "PANDAS", "FIRSTPARTY", "LOCALFOLDER"]).output
assert test_output == ("# Standard Library\n"
"import os\n"
"import p24.imports._argparse as argparse\n"
"import p24.imports._subprocess as subprocess\n"
"import sys\n"
"\n"
"# Django\n"
"from django.conf import settings\n"
"from django.db import models\n"
"\n"
"# Third Party\n"
"from bottle import Bottle, redirect, response, run\n"
"\n"
"# Pandas\n"
"import numpy as np\n"
"import pandas as pd\n"
"\n"
"# First Party\n"
"import p24.imports._VERSION as VERSION\n"
"import p24.shared.media_wiki_syntax as syntax\n")
def test_sticky_comments():
"""Test to ensure it is possible to make comments 'stick' above imports"""
test_input = ("import os\n"
"\n"
"# Used for type-hinting (ref: https://github.com/davidhalter/jedi/issues/414).\n"
"from selenium.webdriver.remote.webdriver import WebDriver # noqa\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("from django import forms\n"
"# While this couples the geographic forms to the GEOS library,\n"
"# it decouples from database (by not importing SpatialBackend).\n"
"from django.contrib.gis.geos import GEOSException, GEOSGeometry\n"
"from django.utils.translation import ugettext_lazy as _\n")
assert SortImports(file_contents=test_input).output == test_input
def test_zipimport():
"""Imports ending in "import" shouldn't be clobbered"""
test_input = "from zipimport import zipimport\n"
assert SortImports(file_contents=test_input).output == test_input
def test_from_ending():
"""Imports ending in "from" shouldn't be clobbered."""
test_input = "from foo import get_foo_from, get_foo\n"
expected_output = "from foo import get_foo, get_foo_from\n"
assert SortImports(file_contents=test_input).output == expected_output
def test_from_first():
"""Tests the setting from_first works correctly"""
test_input = "from os import path\nimport os\n"
assert SortImports(file_contents=test_input, from_first=True).output == test_input
def test_top_comments():
"""Ensure correct behavior with top comments"""
test_input = ("# -*- encoding: utf-8 -*-\n"
"# Test comment\n"
"#\n"
"from __future__ import unicode_literals\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("# -*- coding: utf-8 -*-\n"
"from django.db import models\n"
"from django.utils.encoding import python_2_unicode_compatible\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("# Comment\n"
"import sys\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("# -*- coding\n"
"import sys\n")
assert SortImports(file_contents=test_input).output == test_input
def test_consistency():
"""Ensures consistency of handling even when dealing with non ordered-by-type imports"""
test_input = "from sqlalchemy.dialects.postgresql import ARRAY, array\n"
assert SortImports(file_contents=test_input, order_by_type=True).output == test_input
def test_force_grid_wrap():
"""Ensures removing imports works as expected."""
test_input = (
"from foo import lib6, lib7\n"
"from bar import lib2\n"
)
test_output = SortImports(
file_contents=test_input,
force_grid_wrap=True,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT
).output
assert test_output == """from bar import lib2
from foo import (
lib6,
lib7
)
"""
def test_force_grid_wrap_long():
"""Ensure that force grid wrap still happens with long line length"""
test_input = (
"from foo import lib6, lib7\n"
"from bar import lib2\n"
"from babar import something_that_is_kind_of_long"
)
test_output = SortImports(
file_contents=test_input,
force_grid_wrap=True,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=9999,
).output
assert test_output == """from babar import something_that_is_kind_of_long
from bar import lib2
from foo import (
lib6,
lib7
)
"""
def test_uses_jinja_variables():
"""Test a basic set of imports that use jinja variables"""
test_input = ("import sys\n"
"import os\n"
"import myproject.{ test }\n"
"import django.{ settings }")
test_output = SortImports(file_contents=test_input, known_third_party=['django'],
known_first_party=['myproject']).output
assert test_output == ("import os\n"
"import sys\n"
"\n"
"import django.{ settings }\n"
"\n"
"import myproject.{ test }\n")
test_input = ("import {{ cookiecutter.repo_name }}\n"
"from foo import {{ cookiecutter.bar }}\n")
assert SortImports(file_contents=test_input).output == test_input
def test_fcntl():
"""Test to ensure fcntl gets correctly recognized as stdlib import"""
test_input = ("import fcntl\n"
"import os\n"
"import sys\n")
assert SortImports(file_contents=test_input).output == test_input
def test_import_split_is_word_boundary_aware():
"""Test to ensure that isort splits words in a boundry aware mannor"""
test_input = ("from mycompany.model.size_value_array_import_func import ("
" get_size_value_array_import_func_jobs,"
")")
test_output = SortImports(file_contents=test_input,
multi_line_output=WrapModes.VERTICAL_HANGING_INDENT,
line_length=79).output
assert test_output == ("from mycompany.model.size_value_array_import_func import \\\n"
" get_size_value_array_import_func_jobs\n")
def test_other_file_encodings():
"""Test to ensure file encoding is respected"""
try:
tmp_dir = tempfile.mkdtemp()
for encoding in ('latin1', 'utf8'):
tmp_fname = os.path.join(tmp_dir, 'test_{0}.py'.format(encoding))
with codecs.open(tmp_fname, mode='w', encoding=encoding) as f:
file_contents = "# coding: {0}\n\ns = u'ã'\n".format(encoding)
f.write(file_contents)
assert SortImports(file_path=tmp_fname).output == file_contents
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
def test_comment_at_top_of_file():
"""Test to ensure isort correctly handles top of file comments"""
test_input = ("# Comment one\n"
"from django import forms\n"
"# Comment two\n"
"from django.contrib.gis.geos import GEOSException\n")
assert SortImports(file_contents=test_input).output == test_input
test_input = ("# -*- coding: utf-8 -*-\n"
"from django.db import models\n")
assert SortImports(file_contents=test_input).output == test_input
def test_alphabetic_sorting():
"""Test to ensure isort correctly handles top of file comments"""
test_input = ("from django.contrib.gis.geos import GEOSException\n"
"from plone.app.testing import getRoles\n"
"from plone.app.testing import ManageRoles\n"
"from plone.app.testing import setRoles\n"
"from Products.CMFPlone import utils\n"
"\n"
"import ABC\n"
"import unittest\n"
"import Zope\n")
options = {'force_single_line': True,
'force_alphabetical_sort': True, }
assert SortImports(file_contents=test_input, **options).output == test_input
test_input = ("# -*- coding: utf-8 -*-\n"
"from django.db import models\n")
assert SortImports(file_contents=test_input).output == test_input
def test_comments_not_duplicated():
"""Test to ensure comments aren't duplicated: issue 303"""
test_input = ('from flask import url_for\n'
"# Whole line comment\n"
'from service import demo # inline comment\n'
'from service import settings\n')
output = SortImports(file_contents=test_input).output
assert output.count("# Whole line comment\n") == 1
assert output.count("# inline comment\n") == 1
def test_top_of_line_comments():
"""Test to ensure top of line comments stay where they should: issue 260"""
test_input = ('# -*- coding: utf-8 -*-\n'
'from django.db import models\n'
'#import json as simplejson\n'
'from myproject.models import Servidor\n'
'\n'
'import reversion\n'
'\n'
'import logging\n')
output = SortImports(file_contents=test_input).output
assert output.startswith('# -*- coding: utf-8 -*-\n')
def test_basic_comment():
"""Test to ensure a basic comment wont crash isort"""
test_input = ('import logging\n'
'# Foo\n'
'import os\n')
assert SortImports(file_contents=test_input).output == test_input
def test_shouldnt_add_lines():
"""Ensure that isort doesn't add a blank line when a top of import comment is present, issue #316"""
test_input = ('"""Text"""\n'
'# This is a comment\n'
'import pkg_resources\n')
assert SortImports(file_contents=test_input).output == test_input
def test_sections_parsed_correct():
"""Ensure that modules for custom sections parsed as list from config file and isort result is correct"""
tmp_conf_dir = None
conf_file_data = (
'[settings]\n'
'sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER,COMMON\n'
'known_common=nose\n'
'import_heading_common=Common Library\n'
'import_heading_stdlib=Standard Library\n'
)
test_input = (
'import os\n'
'from nose import *\n'
'import nose\n'
'from os import path'
)
correct_output = (
'# Standard Library\n'
'import os\n'
'from os import path\n'
'\n'
'# Common Library\n'
'import nose\n'
'from nose import *\n'
)
try:
tmp_conf_dir = tempfile.mkdtemp()
tmp_conf_name = os.path.join(tmp_conf_dir, '.isort.cfg')
with codecs.open(tmp_conf_name, 'w') as test_config:
test_config.writelines(conf_file_data)
assert SortImports(file_contents=test_input, settings_path=tmp_conf_dir).output == correct_output
finally:
shutil.rmtree(tmp_conf_dir, ignore_errors=True)
| mit | -3,622,024,409,609,949,700 | 48.600629 | 194 | 0.484588 | false |
lightrabbit/PyBitmessage | src/addresses.py | 1 | 10807 | import hashlib
from struct import *
from pyelliptic import arithmetic
#There is another copy of this function in Bitmessagemain.py
def convertIntToString(n):
a = __builtins__.hex(n)
if a[-1:] == 'L':
a = a[:-1]
if (len(a) % 2) == 0:
return a[2:].decode('hex')
else:
return ('0'+a[2:]).decode('hex')
ALPHABET = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
def encodeBase58(num, alphabet=ALPHABET):
"""Encode a number in Base X
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
"""
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
#print 'num is:', num
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def decodeBase58(string, alphabet=ALPHABET):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
"""
base = len(alphabet)
num = 0
try:
for char in string:
num *= base
num += alphabet.index(char)
except:
#character not found (like a space character or a 0)
return 0
return num
def encodeVarint(integer):
if integer < 0:
logger.error('varint cannot be < 0')
raise SystemExit
if integer < 253:
return pack('>B',integer)
if integer >= 253 and integer < 65536:
return pack('>B',253) + pack('>H',integer)
if integer >= 65536 and integer < 4294967296:
return pack('>B',254) + pack('>I',integer)
if integer >= 4294967296 and integer < 18446744073709551616:
return pack('>B',255) + pack('>Q',integer)
if integer >= 18446744073709551616:
logger.error('varint cannot be >= 18446744073709551616')
raise SystemExit
class varintDecodeError(Exception):
pass
def decodeVarint(data):
"""
Decodes an encoded varint to an integer and returns it.
Per protocol v3, the encoded value must be encoded with
the minimum amount of data possible or else it is malformed.
Returns a tuple: (theEncodedValue, theSizeOfTheVarintInBytes)
"""
if len(data) == 0:
return (0,0)
firstByte, = unpack('>B',data[0:1])
if firstByte < 253:
# encodes 0 to 252
return (firstByte,1) #the 1 is the length of the varint
if firstByte == 253:
# encodes 253 to 65535
if len(data) < 3:
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 3.' % (firstByte, len(data)))
encodedValue, = unpack('>H',data[1:3])
if encodedValue < 253:
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
return (encodedValue,3)
if firstByte == 254:
# encodes 65536 to 4294967295
if len(data) < 5:
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 5.' % (firstByte, len(data)))
encodedValue, = unpack('>I',data[1:5])
if encodedValue < 65536:
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
return (encodedValue,5)
if firstByte == 255:
# encodes 4294967296 to 18446744073709551615
if len(data) < 9:
raise varintDecodeError('The first byte of this varint as an integer is %s but the total length is only %s. It needs to be at least 9.' % (firstByte, len(data)))
encodedValue, = unpack('>Q',data[1:9])
if encodedValue < 4294967296:
raise varintDecodeError('This varint does not encode the value with the lowest possible number of bytes.')
return (encodedValue,9)
def calculateInventoryHash(data):
sha = hashlib.new('sha512')
sha2 = hashlib.new('sha512')
sha.update(data)
sha2.update(sha.digest())
return sha2.digest()[0:32]
def encodeAddress(version,stream,ripe):
if version >= 2 and version < 4:
if len(ripe) != 20:
raise Exception("Programming error in encodeAddress: The length of a given ripe hash was not 20.")
if ripe[:2] == '\x00\x00':
ripe = ripe[2:]
elif ripe[:1] == '\x00':
ripe = ripe[1:]
elif version == 4:
if len(ripe) != 20:
raise Exception("Programming error in encodeAddress: The length of a given ripe hash was not 20.")
ripe = ripe.lstrip('\x00')
storedBinaryData = encodeVarint(version) + encodeVarint(stream) + ripe
# Generate the checksum
sha = hashlib.new('sha512')
sha.update(storedBinaryData)
currentHash = sha.digest()
sha = hashlib.new('sha512')
sha.update(currentHash)
checksum = sha.digest()[0:4]
asInt = int(storedBinaryData.encode('hex') + checksum.encode('hex'),16)
return 'BM-'+ encodeBase58(asInt)
def decodeAddress(address):
#returns (status, address version number, stream number, data (almost certainly a ripe hash))
address = str(address).strip()
if address[:3] == 'BM-':
integer = decodeBase58(address[3:])
else:
integer = decodeBase58(address)
if integer == 0:
status = 'invalidcharacters'
return status,0,0,""
#after converting to hex, the string will be prepended with a 0x and appended with a L
hexdata = hex(integer)[2:-1]
if len(hexdata) % 2 != 0:
hexdata = '0' + hexdata
#print 'hexdata', hexdata
data = hexdata.decode('hex')
checksum = data[-4:]
sha = hashlib.new('sha512')
sha.update(data[:-4])
currentHash = sha.digest()
#print 'sha after first hashing: ', sha.hexdigest()
sha = hashlib.new('sha512')
sha.update(currentHash)
#print 'sha after second hashing: ', sha.hexdigest()
if checksum != sha.digest()[0:4]:
status = 'checksumfailed'
return status,0,0,""
#else:
# print 'checksum PASSED'
try:
addressVersionNumber, bytesUsedByVersionNumber = decodeVarint(data[:9])
except varintDecodeError as e:
logger.error(str(e))
status = 'varintmalformed'
return status,0,0,""
#print 'addressVersionNumber', addressVersionNumber
#print 'bytesUsedByVersionNumber', bytesUsedByVersionNumber
if addressVersionNumber > 4:
logger.error('cannot decode address version numbers this high')
status = 'versiontoohigh'
return status,0,0,""
elif addressVersionNumber == 0:
logger.error('cannot decode address version numbers of zero.')
status = 'versiontoohigh'
return status,0,0,""
try:
streamNumber, bytesUsedByStreamNumber = decodeVarint(data[bytesUsedByVersionNumber:])
except varintDecodeError as e:
logger.error(str(e))
status = 'varintmalformed'
return status,0,0,""
#print streamNumber
status = 'success'
if addressVersionNumber == 1:
return status,addressVersionNumber,streamNumber,data[-24:-4]
elif addressVersionNumber == 2 or addressVersionNumber == 3:
embeddedRipeData = data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
if len(embeddedRipeData) == 19:
return status,addressVersionNumber,streamNumber,'\x00'+embeddedRipeData
elif len(embeddedRipeData) == 20:
return status,addressVersionNumber,streamNumber,embeddedRipeData
elif len(embeddedRipeData) == 18:
return status,addressVersionNumber,streamNumber,'\x00\x00'+embeddedRipeData
elif len(embeddedRipeData) < 18:
return 'ripetooshort',0,0,""
elif len(embeddedRipeData) > 20:
return 'ripetoolong',0,0,""
else:
return 'otherproblem',0,0,""
elif addressVersionNumber == 4:
embeddedRipeData = data[bytesUsedByVersionNumber+bytesUsedByStreamNumber:-4]
if embeddedRipeData[0:1] == '\x00':
# In order to enforce address non-malleability, encoded RIPE data must have NULL bytes removed from the front
return 'encodingproblem',0,0,""
elif len(embeddedRipeData) > 20:
return 'ripetoolong',0,0,""
elif len(embeddedRipeData) < 4:
return 'ripetooshort',0,0,""
else:
x00string = '\x00' * (20 - len(embeddedRipeData))
return status,addressVersionNumber,streamNumber,x00string+embeddedRipeData
def addBMIfNotPresent(address):
address = str(address).strip()
if address[:3] != 'BM-':
return 'BM-'+address
else:
return address
if __name__ == "__main__":
print 'Let us make an address from scratch. Suppose we generate two random 32 byte values and call the first one the signing key and the second one the encryption key:'
privateSigningKey = '93d0b61371a54b53df143b954035d612f8efa8a3ed1cf842c2186bfd8f876665'
privateEncryptionKey = '4b0b73a54e19b059dc274ab69df095fe699f43b17397bca26fdf40f4d7400a3a'
print 'privateSigningKey =', privateSigningKey
print 'privateEncryptionKey =', privateEncryptionKey
print 'Now let us convert them to public keys by doing an elliptic curve point multiplication.'
publicSigningKey = arithmetic.privtopub(privateSigningKey)
publicEncryptionKey = arithmetic.privtopub(privateEncryptionKey)
print 'publicSigningKey =', publicSigningKey
print 'publicEncryptionKey =', publicEncryptionKey
print 'Notice that they both begin with the \\x04 which specifies the encoding type. This prefix is not send over the wire. You must strip if off before you send your public key across the wire, and you must add it back when you receive a public key.'
publicSigningKeyBinary = arithmetic.changebase(publicSigningKey,16,256,minlen=64)
publicEncryptionKeyBinary = arithmetic.changebase(publicEncryptionKey,16,256,minlen=64)
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha512')
sha.update(publicSigningKeyBinary+publicEncryptionKeyBinary)
ripe.update(sha.digest())
addressVersionNumber = 2
streamNumber = 1
print 'Ripe digest that we will encode in the address:', ripe.digest().encode('hex')
returnedAddress = encodeAddress(addressVersionNumber,streamNumber,ripe.digest())
print 'Encoded address:', returnedAddress
status,addressVersionNumber,streamNumber,data = decodeAddress(returnedAddress)
print '\nAfter decoding address:'
print 'Status:', status
print 'addressVersionNumber', addressVersionNumber
print 'streamNumber', streamNumber
print 'length of data(the ripe hash):', len(data)
print 'ripe data:', data.encode('hex')
| mit | 8,261,789,843,015,548,000 | 37.459075 | 255 | 0.656519 | false |
chrissly31415/amimanera | competition_scripts/crawldata.py | 1 | 5442 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" crawl data
"""
import pandas as pd
import re
import math
def crawlHTML(lXall):
"""
crawling raw data
"""
print("Crawling html data...")
basedir='../stumbled_upon/raw_content/'
#phtml = re.compile("</[^>]*?>")
phtml = re.compile("<[^>]*?>")
tutto=[]
for ind in lXall.index:
row=[]
#nl=lXall.ix[ind,'numberOfLinks']
#nl=1+lXall.ix[ind,'non_markup_alphanum_characters']
#print "numberOfLinks:",nl
with open(basedir+str(ind), 'r') as content_file:
content = content_file.read()
#print "id:",ind,
row.append(ind)
res = phtml.findall(content)
tmp=[x for x in res]
tmp=tmp[:100]
tmp=' '.join(tmp)
tmp=str(tmp, errors='replace')
tmp=tmp.lower()
tmp=tmp.replace("<","").replace(">","").replace("/","")
#tmp=tmp.decode("utf8")
#print tmp
row.append(tmp)
#if len(res)>0:
#print ind,": ",res
#raw_input("HITKEY")
tutto.append(row)
newdf=pd.DataFrame(tutto).set_index(0)
newdf.columns=['htmltag']
print(newdf.head(20))
print(newdf.describe())
return newdf
def crawlRawData(lXall):
"""
crawling raw data
"""
print("Crawling raw data...")
basedir='../stumbled_upon/raw_content/'
pfacebook = re.compile("www.{1,2}facebook.{1,2}com")
pfacebook2 = re.compile("developers.{1,2}facebook.{1,2}com.{1,2}docs.{1,2}reference.{1,2}plugins.{1,2}like|facebook.{1,2}com.{1,2}plugins.{1,2}like")
plinkedin = re.compile("platform.{1,2}linkedin.{1,2}com")
ptwitter = re.compile("twitter.{1,2}com.{1,2}share")
prss=re.compile("rss feed",re.IGNORECASE)
pgooglep=re.compile("apis.{1,2}google.{1,2}com")
#pstumble=re.compile("www.{1,2}stumbleupon.{1,2}com")
pstumble=re.compile("stumbleupon")
pcolor=re.compile("colorscheme|color_scheme|color=|color:",re.IGNORECASE)
psignup=re.compile("signup|register|login|sign up",re.IGNORECASE)
pcomment=re.compile("leave a comment|leave comment",re.IGNORECASE)
pncomment=re.compile("comment-",re.IGNORECASE)
pmail=re.compile("email",re.IGNORECASE)
ppics=re.compile("\.png|\.tif|\.jpg",re.IGNORECASE)
pgif=re.compile("\.gif",re.IGNORECASE)
psmile=re.compile(":-\)|;-\)")
plbreak=re.compile("<br>")
psearch=re.compile("searchstring|customsearch|searchcontrol|searchquery|searchform|searchbox",re.IGNORECASE)
pcaptcha=re.compile("captcha",re.IGNORECASE)
padvert=re.compile("advertis",re.IGNORECASE)
pnewline=re.compile("\n")
pgooglead=re.compile("google_ad_client")
phtml5=re.compile("html5",re.IGNORECASE)
phuff=re.compile("www.huffingtonpost.com",re.IGNORECASE)
pflash=re.compile("shockwave-flash",re.IGNORECASE)
pdynlink=re.compile("<a href.+?.+>")
pnofollow=re.compile("rel=\"nofollow\"",re.IGNORECASE)
pschemaorg=re.compile("schema\.org",re.IGNORECASE)
pmobileredirect=re.compile("mobile redirect",re.IGNORECASE)
#pshare=re.compile("sharearticle|share.{1,20}article",re.IGNORECASE)
plang=re.compile("en-US|en_US",re.IGNORECASE)
tutto=[]
for ind in lXall.index:
row=[]
nl=1.0+lXall.ix[ind,'numberOfLinks']
nchar=1.0+lXall.ix[ind,'non_markup_alphanum_characters']
#print "numberOfLinks:",nl
with open(basedir+str(ind), 'r') as content_file:
content = content_file.read()
#print "id:",ind,
row.append(ind)
res = pfacebook.findall(content)
row.append(len(res)/float(nl))
res = pfacebook2.findall(content)
row.append(len(res)/float(nl))
res = ptwitter.findall(content)
row.append(len(res)/float(nl))
#res = prss.findall(content)
#row.append(len(res)/float(nl))
#res = pgooglep.findall(content)
#row.append(len(res)/float(nl))
#res = pstumble.findall(content)
#row.append(len(res)/float(nl))
res = pncomment.findall(content)
row.append(len(res))
#res = pcolor.findall(content)
#row.append(len(res))
#res = psmile.findall(content)
#row.append(len(res))
#if len(res)>0:
#print ind,": ",res
#raw_input("HITKEY")
#res = plbreak.findall(content)
#row.append(len(res))
#res = padvert.findall(content)
#row.append(len(res))
res = pnewline.findall(content)
row.append(math.log(1.0+len(res)))
#res = pdynlink.findall(content)
#row.append(len(res))
#res = pnofollow.findall(content)
#row.append(len(res))
#res = pschemaorg.findall(content)
#row.append(len(res))
#res = pmobileredirect.findall(content)
#row.append(len(res))
#m = pgooglead.search(content)
#if m:
# row.append(1)
# else:
# row.append(0)
#if len(res)>0:
#print ind,": ",res
#raw_input("HITKEY")
#res = pshare.findall(content)
#row.append(len(res)/float(nl))
#print ""
tutto.append(row)
newdf=pd.DataFrame(tutto).set_index(0)
newdf.columns=['wwwfacebook_ratio','facebooklike_ratio','twitter_ratio','n_comment','logn_newline']
pd.set_printoptions(max_rows=40, max_columns=20)
print(newdf.head(20))
print(newdf.describe())
return newdf | lgpl-3.0 | 596,703,456,825,445,100 | 29.926136 | 155 | 0.599412 | false |
debugger06/MiroX | lib/test/utiltest.py | 1 | 41453 | # coding=latin-1
# The above comment is required, because it includes non-latin characters
# as an inline string in the source, we need to have this here as per PEP 263.
import itertools
import os
import tempfile
import time
import signal
import shutil
import unittest
import sys
import zipfile
from miro.test.framework import skip_for_platforms, MiroTestCase
from miro import download_utils
from miro import util
from miro import buildutils
from miro.fileobject import FilenameType
from miro.plat.utils import unicode_to_filename
# We're going to override this so we can guarantee that if the order
# changes later that it doesn't really affect us.
util.PREFERRED_TYPES = [
'application/x-bittorrent', 'video/ogg', 'video/mp4',
'video/quicktime', 'video/mpeg']
class FakeStream:
"""Fake streams are used for the AutoFlushingStream test. They
don't really do much, except check that write is always called
with a string object (unicode won't always work when writing to
stdout).
"""
def write(self, out):
if not isinstance(out, str):
raise ValueError("Got non-string object (%s) from "
"autoflushing stream" % str.__class__)
def flush(self):
pass
class MockCache(util.Cache):
"""
MockCache is used to test the Cache object. The new values are a tuple of
the value passed in and a counter value, incremented each time a new value
is made.
"""
def __init__(self, size):
util.Cache.__init__(self, size)
self.value_counter = itertools.count()
def create_new_value(self, val, invalidator=None):
return (val, self.value_counter.next())
class AutoFlushingStreamTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.stream = FakeStream()
self.afs = util.AutoFlushingStream(self.stream)
def test_basic_write(self):
self.afs.write("Hello World\n")
self.afs.write("")
self.afs.write("LotsofData" * 200)
def test_unicode_write(self):
self.afs.write(u'\xf8')
class LoggingStreamTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.warnings = []
self.errors = []
self.stdout = util.AutoLoggingStream(
self.warn_callback, '(from stdout) ')
self.stderr = util.AutoLoggingStream(
self.err_callback, '(from stderr) ')
def _check_data(self, data):
"""Check that write is always called with a string object
(unicode won't always work when writing to stdout)
"""
if not isinstance(data, str):
raise ValueError("Got non-string object (%r) from LoggingStream" %
data)
def warn_callback(self, data):
self._check_data(data)
self.warnings.append(data)
def err_callback(self, data):
self._check_data(data)
self.errors.append(data)
def test_basic_write(self):
self.stdout.write("Hello World\n")
self.stdout.write("")
self.stderr.write("LotsofData" * 200)
self.assertEquals(len(self.warnings), 1)
self.assertEquals(self.warnings[0], '(from stdout) Hello World')
self.assertEquals(len(self.errors), 1)
self.assertEquals(self.errors[0], '(from stderr) ' +
"LotsofData" * 200)
def test_unicode_write(self):
self.stdout.write(u'\xf8')
self.assertEquals(len(self.warnings), 1)
self.assertEquals(self.warnings[0], '(from stdout) \\xf8')
class UtilTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.filesize_elements = [
{'href': u'http://example.org/1.ogg',
'type': u'video/ogg',
'filesize': u'21663'},
{'href': u'http://example.org/2.ogg',
'type': u'video/ogg',
'filesize': u'notafilesize'},
{'href': u'http://example.org/3.ogg',
'type': u'video/ogg',
'filesize': u'288'},
{'href': u'http://example.org/4.ogg',
'type': u'video/ogg',
'filesize': u'800088'},
{'href': u'http://example.org/5.ogg',
'type': u'video/ogg',
'filesize': u'82'}]
self.type_elements = [
{'href': u'http://example.org/1.mp4',
'type': u'video/mp4',
'filesize': u'2000'},
{'href': u'http://example.org/2.mpeg',
'type': u'video/mpeg',
'filesize': u'2000'},
{'href': u'http://example.org/3.mov',
'type': u'video/quicktime',
'filesize': u'2000'},
{'href': u'http://example.org/4.torrent',
'type': u'application/x-bittorrent',
'filesize': u'2000'},
{'href': u'http://example.org/5.ogg',
'type': u'video/ogg',
'filesize': u'2000'}]
self.combination_elements = [
{'href': u'http://example.org/1.ogg',
'type': u'video/ogg',
'filesize': u'302999'},
{'href': u'http://example.org/2.mov',
'type': u'video/quicktime',
'filesize': u'2000'},
{'href': u'http://example.org/3.mp4',
'type': u'video/mp4',
'filesize': u'401971'},
{'href': u'http://example.org/4.ogg',
'type': u'video/ogg',
'filesize': u'166955'},
{'href': u'http://example.org/5.mpeg',
'type': u'video/mpeg',
'filesize': u'244700'}]
def test_is_url_positive(self):
for testurl in [u"http://foo.bar.com/",
u"https://foo.bar.com/",
]:
self.assertEqual(util.is_url(testurl), True)
def test_is_url_negative(self):
for testurl in [u"",
None,
u"feed://foo.bar.com/",
u"http://foo.bar.com",
u"http:foo.bar.com/",
u"https:foo.bar.com/",
u"feed:foo.bar.com/",
u"http:/foo.bar.com/",
u"https:/foo.bar.com/",
u"feed:/foo.bar.com/",
u"http:///foo.bar.com/",
u"https:///foo.bar.com/",
u"feed:///foo.bar.com/",
u"foo.bar.com",
u"crap:foo.bar.com",
u"crap:/foo.bar.com",
u"crap://foo.bar.com",
u"crap:///foo.bar.com",
# Bug #12645
u"No license (All rights reserved)",
]:
self.assertEqual(util.is_url(testurl), False)
def test_stringify(self):
# input, handleerror, expected output if handlerror is None,
# then it isn't passed in as an argument
class GoodStringObject(object):
"""Object whose __str__ method returns an ASCII string."""
def __str__(self):
return "abc"
class BadStringObject(object):
"""Object whose __str__ method returns a non-ASCII string."""
def __str__(self):
return "abc\xe4"
for i, h, o in [
( "", None, ""),
( "abc", None, "abc"),
( 5, None, "5"),
( 5.5, None, "5.5"),
( u"abc", None, "abc"),
( u"abc\xe4", None, "abcä"),
( u"abc\xe4", "replace", "abc?"),
# test that bytestrings are converted to plain ASCII
( "abc", None, "abc"),
( "abc\xe4", None, "abc?"),
# test that objects are converted to plain ASCII
( GoodStringObject(), None, "abc"),
( BadStringObject(), None, "abc?"),
]:
if h == None:
self.assertEquals(util.stringify(i), o)
else:
self.assertEquals(util.stringify(i, h), o)
def test_random_string(self):
ret = util.random_string(0)
self.assertEquals(len(ret), 0)
for length in (1, 5, 10):
ret = util.random_string(length)
self.assertEquals(len(ret), length)
self.assertEquals(ret.isalpha(), True)
def test_cmp_enclosures(self):
"""
Test for util.cmp_enclosures
"""
def get_hrefs(enclosures):
return [enclosure['href'] for enclosure in enclosures]
self.filesize_elements.sort(util.cmp_enclosures)
self.type_elements.sort(util.cmp_enclosures)
self.combination_elements.sort(util.cmp_enclosures)
self.assertEqual(
get_hrefs(self.filesize_elements),
[u'http://example.org/4.ogg',
u'http://example.org/1.ogg',
u'http://example.org/3.ogg',
u'http://example.org/5.ogg',
u'http://example.org/2.ogg'])
self.assertEqual(
get_hrefs(self.type_elements),
[u'http://example.org/4.torrent',
u'http://example.org/5.ogg',
u'http://example.org/1.mp4',
u'http://example.org/3.mov',
u'http://example.org/2.mpeg'])
self.assertEqual(
get_hrefs(self.combination_elements),
[u'http://example.org/1.ogg',
u'http://example.org/4.ogg',
u'http://example.org/3.mp4',
u'http://example.org/2.mov',
u'http://example.org/5.mpeg'])
def test_get_first_video_enclosure(self):
"""
Test for util.get_first_video_enclosure
"""
class FakeEntry(object):
def __init__(self, enclosures):
self.enclosures = enclosures
# set up the entries..
filesizes_entry = FakeEntry(self.filesize_elements)
types_entry = FakeEntry(self.type_elements)
combinations_entry = FakeEntry(self.combination_elements)
# get their "selected" results
selected_filesize = util.get_first_video_enclosure(filesizes_entry)
selected_type = util.get_first_video_enclosure(types_entry)
selected_combination = util.get_first_video_enclosure(
combinations_entry)
# now make sure they returned what we expected..
self.assertEqual(selected_filesize['href'],
u'http://example.org/4.ogg')
self.assertEqual(selected_type['href'],
u'http://example.org/4.torrent')
self.assertEqual(selected_combination['href'],
u'http://example.org/1.ogg')
def test_clamp_text(self):
# limit 20
self.assertRaises(TypeError, util.clamp_text, None)
self.assertEqual('', util.clamp_text(''))
self.assertEqual('1', util.clamp_text('1'))
self.assertEqual('12345678901234567890',
util.clamp_text('12345678901234567890'))
self.assertEqual('12345678901234567...',
util.clamp_text('123456789012345678901'))
self.assertEqual('12345678901234567...',
util.clamp_text(
'12345678901234567890 1234 1234 1234'))
# limit 4
self.assertRaises(TypeError, util.clamp_text, None, 4)
self.assertEqual('', util.clamp_text('', 4))
self.assertEqual('1', util.clamp_text('1', 4))
self.assertEqual(
'1...', util.clamp_text('12345678901234567890', 4))
self.assertEqual(
'1...', util.clamp_text('123456789012345678901', 4))
self.assertEqual(
'1...', util.clamp_text('12345678901234567890 1234 1234 1234', 4))
def test_check_u(self):
util.check_u(None)
util.check_u(u'abc')
util.check_u(u'&*@!#)*) !@)( !@# !)@(#')
self.assertRaises(util.MiroUnicodeError, util.check_u, 'abc')
self.assertRaises(util.MiroUnicodeError,
util.check_u, '&*@!#)*) !@)( !@# !)@(#')
def test_check_b(self):
util.check_b(None);
util.check_b("abc");
self.assertRaises(util.MiroUnicodeError, util.check_b, 42)
self.assertRaises(util.MiroUnicodeError, util.check_b, [])
self.assertRaises(util.MiroUnicodeError, util.check_b, ['1','2'])
self.assertRaises(util.MiroUnicodeError, util.check_b, {})
self.assertRaises(util.MiroUnicodeError, util.check_b, {'a': 1, 'b':2})
def test_check_f(self):
def test_name(text):
correct_type = FilenameType(text)
util.check_f(correct_type)
if sys.platform == 'win32':
incorrect_type = str(text)
else:
incorrect_type = unicode(text)
self.assertRaises(util.MiroUnicodeError,
util.check_f, incorrect_type)
util.check_f(None)
test_name("")
test_name("abc.txt")
test_name("./xyz.avi")
def assertEqualWithType(self, expected, expectedType, val):
self.assertEqual(val, expected)
self.assertTrue(isinstance(val, expectedType),
"Not of type " + str(expectedType))
def test_unicodify(self):
self.assertEqual(None, util.unicodify(None))
# Int
self.assertEqualWithType(5, int, util.unicodify(5))
# String
self.assertEqualWithType('abc', unicode, util.unicodify('abc'))
# List
res = util.unicodify(['abc', '123'])
self.assertEqualWithType('abc', unicode, res[0])
self.assertEqualWithType('123', unicode, res[1])
# Dict
res = util.unicodify({'a': 'abc', 'b': '123'})
self.assertEqualWithType('abc', unicode, res['a'])
self.assertEqualWithType('123', unicode, res['b'])
# List of dicts
res = util.unicodify([{'a': 'abc', 'b': '$$$'},
{'y': u'25', 'z': '28'}])
self.assertEqualWithType('abc', unicode, res[0]['a'])
self.assertEqualWithType('$$$', unicode, res[0]['b'])
self.assertEqualWithType('25', unicode, res[1]['y'])
self.assertEqualWithType('28', unicode, res[1]['z'])
def test_quote_unicode_url(self):
# Non-unicode
self.assertRaises(util.MiroUnicodeError, util.quote_unicode_url,
'http://www.example.com')
# Unicode, no substitution
self.assertEqualWithType('http://www.example.com', unicode,
util.quote_unicode_url(u'http://www.example.com'))
# Unicode, substitution
self.assertEqualWithType(u'http://www.example.com/fran%C3%83%C2%A7ois',
unicode,
util.quote_unicode_url(u'http://www.example.com/fran\xc3\xa7ois'))
def test_call_command_failure(self):
# command doesn't exist
self.assertRaises(OSError, util.call_command, 'thiscommanddoesntexist')
# command exists but invalid option and returns error code.
#
# Note: on win32, this probably requires cygwin.
self.assertRaises(OSError, util.call_command, 'ps', '--badarg')
@skip_for_platforms('win32')
def test_call_command_success(self):
pid = int(os.getpid())
stdout = util.call_command('ps', '-p', str(pid), '-o', 'pid=')
pid_read = int(stdout)
self.assertEqual(pid, pid_read)
def test_to_uni(self):
# try it twice to make sure the cached value is correct as well
for i in range(0,2):
self.assertEqualWithType('', unicode, util.to_uni(''))
self.assertEqualWithType('', unicode, util.to_uni(u''))
self.assertEqualWithType('abc', unicode, util.to_uni('abc'))
self.assertEqualWithType('abc', unicode, util.to_uni(u'abc'))
self.assertEqualWithType('!@^)!@%I*', unicode,
util.to_uni('!@^)!@%I*'))
self.assertEqualWithType('!@^)!@%I*', unicode,
util.to_uni(u'!@^)!@%I*'))
def test_escape(self):
# try it twice to make sure the cached value is correct as well
for i in range(0,2):
self.assertEqualWithType('', unicode, util.escape(''))
self.assertEqualWithType('&', unicode, util.escape('&'))
self.assertEqualWithType('<', unicode, util.escape('<'))
self.assertEqualWithType('>', unicode, util.escape('>'))
self.assertEqualWithType('la & <html>', unicode,
util.escape('la & <html>'))
def test_entity_replace(self):
self.assertEqual('', util.entity_replace(''))
self.assertEqual('abcd yz XXX i!@#$%^&*()= 123 <>&',
util.entity_replace(
'abcd yz XXX i!@#$%^&*()= 123 <>&'))
self.assertEqual('#', util.entity_replace('#'))
self.assertEqual('\'', util.entity_replace('''))
self.assertEqual('\'', util.entity_replace('''))
self.assertEqual('"', util.entity_replace('"'))
self.assertEqual('"', util.entity_replace('"'))
self.assertEqual('&', util.entity_replace('&'))
self.assertEqual('&', util.entity_replace('&'))
self.assertEqual('<', util.entity_replace('<'))
self.assertEqual('<', util.entity_replace('<'))
self.assertEqual('>', util.entity_replace('>'))
self.assertEqual('>', util.entity_replace('>'))
self.assertEqual('abcd yz XX<X i!@#$%^&*()=& 123 <>&',
util.entity_replace(
'abcd yz XX<X i!@#$%^&*()=& 123 <>&'))
def test_ascii_lower(self):
self.assertEqual('', util.ascii_lower(''))
self.assertEqual('a', util.ascii_lower('a'))
self.assertEqual('a', util.ascii_lower('A'))
self.assertEqual('ab', util.ascii_lower('AB'))
self.assertEqual('a b', util.ascii_lower('A B'))
self.assertEqual('a b', util.ascii_lower('a B'))
self.assertEqual('a-b', util.ascii_lower('A-B'))
self.assertEqual('\xD1', util.ascii_lower('\xD1'))
self.assertEqual(';2%/*()_-?+z', util.ascii_lower(';2%/*()_-?+Z'))
class DownloadUtilsTest(MiroTestCase):
def check_clean_filename(self, filename, test_against):
self.assertEquals(download_utils.clean_filename(filename),
test_against)
def test_clean_filename(self):
self.check_clean_filename('normalname', 'normalname')
self.check_clean_filename('a:b?c>d<e|f*/g\\h"\'', 'abcdefgh')
self.check_clean_filename('', '_')
long_filename = 'booya' * 100
long_extension = '.' + 'foo' * 20
self.check_clean_filename(long_filename, long_filename[:100])
# total file length isn't over the limit, so the extension
# stays the same
self.check_clean_filename('abc' + long_extension,
'abc' + long_extension)
self.check_clean_filename(long_filename + long_extension,
long_filename[:50] + long_extension[:50])
def test_next_free_filename_generators(self):
# try path without extension
path = "/foo/.bar/test"
generator = util.next_free_filename_candidates(path)
# first candidate should just be the file itself
self.assertEquals(generator.next(), "/foo/.bar/test")
# next candidate should just be the file with .X added to it
self.assertEquals(generator.next(), "/foo/.bar/test.1")
self.assertEquals(generator.next(), "/foo/.bar/test.2")
# try path with extension
path = "/foo/.bar/test.jpg"
generator = util.next_free_filename_candidates(path)
# first candidate should just be the file itself
self.assertEquals(generator.next(), "/foo/.bar/test.jpg")
# next candidate should just be the file with .X added before the
# extension
self.assertEquals(generator.next(), "/foo/.bar/test.1.jpg")
self.assertEquals(generator.next(), "/foo/.bar/test.2.jpg")
# test that if we call it too many times, we get an exception
generator = util.next_free_filename_candidates(path)
for x in xrange(100000):
try:
generator.next()
except ValueError:
# this is good, it means that there's a failsafe if we try too
# many candidates
if x < 100:
raise # we shouldn't get an exception too soon
break
else:
raise AssertionError("next_free_filename_candidates() "
"continues forever")
def test_next_free_directory_generators(self):
path = "/foo/.bar/test"
generator = util.next_free_directory_candidates(path)
# first candidate should just be the file itself
self.assertEquals(generator.next(), "/foo/.bar/test")
# next candidate should just be the file with .X added to it
self.assertEquals(generator.next(), "/foo/.bar/test.1")
self.assertEquals(generator.next(), "/foo/.bar/test.2")
# test that if we call it too many times, we get an exception
generator = util.next_free_directory_candidates(path)
for x in xrange(100000):
try:
generator.next()
except ValueError:
# this is good, it means that there's a failsafe if we try too
# many candidates
if x < 100:
raise # we shouldn't get an exception too soon
break
else:
raise AssertionError("next_free_filename_candidates() "
"continues forever")
def test_next_free_filename(self):
# make a bunch of files that we should skip over
for name in ('foo', 'foo.1', 'foo.2', 'bar.jpg', 'bar.1.jpg'):
path = os.path.join(self.tempdir, name)
open(path, 'wt').write("FAKE FILE")
path1 = os.path.join(self.tempdir, 'foo')
# test we find the a nonexistent file
returned_path, fp = util.next_free_filename(path1)
self.assertEquals(returned_path, os.path.join(self.tempdir, 'foo.3'))
# test that we create the file
self.assert_(os.path.exists(returned_path))
# try with an extension
path2 = os.path.join(self.tempdir, 'bar.jpg')
returned_path, fp = util.next_free_filename(path2)
self.assertEquals(returned_path, os.path.join(self.tempdir,
'bar.2.jpg'))
self.assert_(os.path.exists(returned_path))
def test_next_free_directory(self):
# make a bunch of directories that we should skip over
for name in ('foo', 'foo.1', 'foo.2'):
path = os.path.join(self.tempdir, name)
os.mkdir(path)
path = os.path.join(self.tempdir, 'foo')
# test we find the a nonexistent file
returned_path = util.next_free_directory(path)
self.assertEquals(returned_path, os.path.join(self.tempdir, 'foo.3'))
# test that we don't create the directory
self.assert_(not os.path.exists(returned_path))
class Test_simple_config_file(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tempdir = tempfile.mkdtemp()
if not os.path.exists(self.tempdir):
os.makedirs(self.tempdir)
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tempdir, ignore_errors=True)
def test_read_simple_config_file(self):
fn = os.path.join(self.tempdir, "temp.config")
f = open(fn, "w")
f.write("""
a = b
c = dSSS
E = F
""".strip().replace("S", " "))
f.close()
cfg = buildutils.read_simple_config_file(fn)
self.assertEquals(cfg["a"], "b")
self.assertEquals(cfg["c"], "d ")
self.assertEquals(cfg["E"], "F")
self.assertEquals(cfg.get("G"), None)
def test_write_simple_config_file(self):
fn = os.path.join(self.tempdir, "temp.config")
cfg = {"a": "b",
"c": "d",
"E": "F "}
buildutils.write_simple_config_file(fn, cfg)
cfg2 = buildutils.read_simple_config_file(fn)
self.assertEquals(cfg2["a"], cfg["a"])
self.assertEquals(cfg2["c"], cfg["c"])
self.assertEquals(cfg2["E"], cfg["E"])
class MatrixTest(unittest.TestCase):
def test_matrix_init(self):
m = util.Matrix(1, 2)
self.assertEquals(list(m), [None, None])
m = util.Matrix(2, 1)
self.assertEquals(list(m), [None, None])
m = util.Matrix(1, 5)
self.assertEquals(m.columns, 1)
self.assertEquals(m.rows, 5)
m = util.Matrix(0, 0)
self.assertEquals(m.columns, 0)
self.assertEquals(m.rows, 0)
m = util.Matrix(5, 1)
self.assertEquals(m.columns, 5)
self.assertEquals(m.rows, 1)
self.assertEquals(m[0, 0], None)
self.assertEquals(m[1, 0], None)
self.assertEquals(m[2, 0], None)
self.assertEquals(m[3, 0], None)
self.assertEquals(m[4, 0], None)
def test_get_set(self):
m = util.Matrix(3, 2)
m[0, 0] = 1
m[0, 1] = 2
m[1, 0] = 3
m[1, 1] = 4
m[2, 0] = 5
m[2, 1] = 6
self.assertEquals(m[0,0], 1)
self.assertEquals(m[1,0], 3)
self.assertEquals(m[2,0], 5)
m[0,0] = 17
self.assertEquals(m[0,0], 17)
def test_columns(self):
m = util.Matrix(3, 2)
m[0, 0] = 1
m[0, 1] = 2
m[1, 0] = 3
m[1, 1] = 4
m[2, 0] = 5
m[2, 1] = 6
self.assertEquals(list(m.column(0)), [1, 2])
self.assertEquals(list(m.column(1)), [3, 4])
self.assertEquals(list(m.column(2)), [5, 6])
def test_rows(self):
m = util.Matrix(3, 2)
m[0, 0] = 1
m[0, 1] = 2
m[1, 0] = 3
m[1, 1] = 4
m[2, 0] = 5
m[2, 1] = 6
self.assertEquals(list(m.row(0)), [1, 3, 5])
self.assertEquals(list(m.row(1)), [2, 4, 6])
def test_remove(self):
m = util.Matrix(1, 2)
m[0,0] = 1
m[0,1] = 2
m.remove(2)
self.assertEquals(m[0,0], 1)
self.assertEquals(m[0,1], None)
class TestGatherSubtitlesFiles(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tempdir, ignore_errors=True)
def create_files(self, movie_file, sub_files=None):
if sub_files is None:
sub_files = []
movie_file = os.path.join(self.tempdir, movie_file)
sub_files = [os.path.join(self.tempdir, mem) for mem in sub_files]
sub_files.sort()
all_files = [movie_file] + list(sub_files)
for mem in all_files:
dirname = os.path.dirname(mem)
if not os.path.exists(dirname):
os.makedirs(dirname)
filep = open(mem, "w")
filep.write("lalala")
filep.close()
return movie_file, sub_files
def test_no_directory(self):
# tests the case where the foofeed directory doesn't exist
movie_file = os.path.join(self.tempdir, "foofeed", "foo.mov")
self.assertEquals(
[], util.gather_subtitle_files(FilenameType(movie_file)))
def test_no_subtitle_files(self):
movie_file, sub_files = self.create_files("foo.mov")
self.assertEquals(
sub_files, util.gather_subtitle_files(FilenameType(movie_file)))
def test_single_file(self):
movie_file, sub_files = self.create_files(
"foo.mov", ["foo.en.srt"])
self.assertEquals(
sub_files, util.gather_subtitle_files(FilenameType(movie_file)))
def test_multiple_files(self):
movie_file, sub_files = self.create_files(
"foo.mov", ["foo.en.srt", "foo.fr.srt", "foo.es.srt"])
self.assertEquals(
sub_files, util.gather_subtitle_files(FilenameType(movie_file)))
def test_lots_of_files(self):
movie_file, sub_files = self.create_files(
"foo.mov", ["foo.en.srt", "blah.ogv", "foo.ogv"])
# weed out the non-srt files so we can test correctly
sub_files = [mem for mem in sub_files if mem.endswith(".srt")]
self.assertEquals(
sub_files, util.gather_subtitle_files(FilenameType(movie_file)))
def test_subtitles_dir(self):
movie_file, sub_files = self.create_files(
"foo.mov", [os.path.join("subtitles", "foo.en.srt"),
os.path.join("subtitles", "foo.fr.srt")])
self.assertEquals(
sub_files, util.gather_subtitle_files(FilenameType(movie_file)))
def test_filename_possibilities(self):
movie_file, sub_files = self.create_files(
"foo.mov", ["foo.en.srt", "foo.en.sub", "foo.srt", "foo.sub"])
self.assertEquals(
sub_files, util.gather_subtitle_files(FilenameType(movie_file)))
class TestCopySubtitleFile(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tempdir, ignore_errors=True)
def create_files(self, files):
for mem in files:
dirname = os.path.dirname(mem)
if not os.path.exists(dirname):
os.makedirs(dirname)
filep = open(mem, "w")
filep.write("lalala")
filep.close()
def test_simple(self):
sub_path = os.path.join(self.tempdir, "otherdir/subtitle.srt")
video_path = os.path.join(self.tempdir, "foo.mov")
self.create_files([sub_path, video_path])
ret = util.copy_subtitle_file(sub_path, video_path)
expected = os.path.join(self.tempdir, "foo.srt")
self.assert_(os.path.exists(expected))
self.assertEqual(expected, ret)
def test_simple_with_language(self):
sub_path = os.path.join(self.tempdir, "otherdir/subtitle.en.srt")
video_path = os.path.join(self.tempdir, "foo.mov")
self.create_files([sub_path, video_path])
ret = util.copy_subtitle_file(sub_path, video_path)
expected = os.path.join(self.tempdir, "foo.en.srt")
self.assert_(os.path.exists(expected))
self.assertEqual(expected, ret)
def test_nonlanguage(self):
# "ex" is not a valid language code, so this should ignore
# that part
sub_path = os.path.join(self.tempdir, "otherdir/subtitle.ex.srt")
video_path = os.path.join(self.tempdir, "foo.mov")
self.create_files([sub_path, video_path])
ret = util.copy_subtitle_file(sub_path, video_path)
expected = os.path.join(self.tempdir, "foo.srt")
self.assert_(os.path.exists(expected))
self.assertEqual(expected, ret)
class TestNameSortKey(unittest.TestCase):
def test_simple(self):
for testcase in ((None, 'ZZZZZZZZZZZZZ'),
(u'', (u'',)),
(u'a', (u'a',)),
(u'a1a', (u'a', 1.0, u'a')),
(u'Episode_100', (u'episode_', 100.0, u'')),
(u'episode_1', (u'episode_', 1.0, u''))
):
self.assertEquals(util.name_sort_key(testcase[0]),
testcase[1])
def test_hashable(self):
for testcase in (None,
u'',
u'a',
u'a1a',
u'Episode_100',
u'episode_1',
):
hash(util.name_sort_key(testcase))
def test_sorting(self):
for inlist, outlist in (
([], []),
(["b", "a", "c"], ["a", "b", "c"]),
(["a_12", "a_1", "a_100"], ["a_1", "a_12", "a_100"]),
(["A_12", "a_1", "A_100"], ["a_1", "A_12", "A_100"])
):
inlist.sort(key=util.name_sort_key)
self.assertEquals(inlist, outlist)
class TestGatherMediaFiles(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tempdir = tempfile.mkdtemp()
if not os.path.exists(self.tempdir):
os.makedirs(self.tempdir)
self.expectedFiles = []
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tempdir, ignore_errors=True)
def add_file(self, filepath, expected):
"""Create a file in the temporary directory"""
fullfilepath = os.path.join(self.tempdir, filepath)
dirname = os.path.dirname(fullfilepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
filep = open(fullfilepath, "w")
filep.write("lalala")
filep.close()
if expected:
self.expectedFiles.append(fullfilepath)
def verify_results(self):
finder = util.gather_media_files(self.tempdir)
found = []
try:
while(True):
num_parsed, found = finder.next()
except StopIteration:
self.assertEquals(set(found), set(self.expectedFiles))
def test_empty_dir(self):
self.verify_results()
def test_dir_without_media(self):
self.add_file('index.html', False)
self.verify_results()
self.add_file('README.txt', False)
self.verify_results()
def test_dir_with_media(self):
self.add_file('test.ogv', True)
self.verify_results()
self.add_file('test.avi', True)
self.verify_results()
def test_dir_mixed_files(self):
self.add_file('index.html', False)
self.add_file('test.ogv', True)
self.verify_results()
def test_subdirs(self):
self.add_file(os.path.join('aaa', 'index.html'), False)
self.verify_results()
self.add_file(os.path.join('bbb', 'test.ogv'), True)
self.verify_results()
self.add_file('test.ogv', True)
self.verify_results()
class TestBackupSupportDir(MiroTestCase):
# Test backing up the support directory
def setUp(self):
MiroTestCase.setUp(self)
self.support_dir = self.make_temp_dir_path()
self.correct_files = []
self.skip_dirs = []
self.setup_support_dir()
def setup_support_dir(self):
"""Add objects to our fake support directory that we want around for
every test.
"""
# add log files
self.add_file_to_support_dir('miro.log')
self.add_file_to_support_dir('miro-downloader.log')
for i in range(1, 5):
self.add_file_to_support_dir('miro.log.%s' % i)
self.add_file_to_support_dir('miro-downloader.log.%s' % i)
# add database files
self.add_file_to_support_dir('sqlitedb')
self.add_file_to_support_dir('sqlitedb-journal')
self.add_file_to_support_dir('dbbackups/sqlitedb_backup_165')
self.add_file_to_support_dir('dbbackups/sqlitedb_backup_170')
self.add_file_to_support_dir('dbbackups/sqlitedb_backup_183')
# add other files
self.add_skip_dir('icon-cache')
self.add_skip_dir('cover-art')
self.add_file_to_support_dir('httpauth', should_skip=True)
self.add_file_to_support_dir('preferences.bin', should_skip=True)
for i in range(5):
self.add_file_to_support_dir('cover-art/Album-%s' % i,
should_skip=True)
self.add_file_to_support_dir('icon-cache/icon-%s' % i,
should_skip=True)
self.add_file_to_support_dir('crashes/crash-report-%i' % i)
def add_skip_dir(self, skip_dir):
self.skip_dirs.append(os.path.join(self.support_dir, skip_dir))
def add_file_to_support_dir(self, path, archive_name=None,
should_skip=False, contents='FAKE DATA'):
if archive_name is None:
archive_name = path
full_path = os.path.join(self.support_dir, path)
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.makedirs(directory)
open(full_path, "wt").write(contents)
if not should_skip:
self.correct_files.append(archive_name)
def check_backup(self):
backup = util.SupportDirBackup(self.support_dir, self.skip_dirs,
max_size=1000000)
archive = zipfile.ZipFile(backup.fileobj(), 'r')
errors = archive.testzip()
self.assertTrue(errors is None, "Errors in the zip file: %s" % errors)
self.assertSameSet(archive.namelist(), self.correct_files)
def test_backup(self):
self.check_backup()
def test_extendend_chars(self):
filename = unicode_to_filename(u'\ufffdxtended Chars')
self.add_file_to_support_dir(filename, 'xtended Chars')
self.check_backup()
def test_size_limit(self):
# create 200 kb worth of data
large_data = " " * 200000
# add a bunch of those files
for i in xrange(10):
self.add_file_to_support_dir('big-file-%s' % i,
contents=large_data)
# check that we don't max an archive file too much bigger than our max
# size
max_size = 1000000 # 1MB
backup = util.SupportDirBackup(self.support_dir, self.skip_dirs,
max_size=max_size)
filesize = os.stat(backup.backupfile).st_size
self.assertTrue(filesize <= 1100000,
"Backup file too big. filesize: %s max_size: %s" %
(filesize, max_size))
class MtimeInvalidatorTestCase(MiroTestCase):
def test_valid(self):
filename = os.path.join(self.tempdir, 'mtime_test')
file(filename, 'w').write('foo')
invalidator = util.mtime_invalidator(filename)
self.assertFalse(invalidator(None))
def test_invalid(self):
filename = os.path.join(self.tempdir, 'mtime_test_future')
file(filename, 'w').write('foo')
invalidator = util.mtime_invalidator(filename)
mtime = os.stat(filename).st_mtime
# pretend the file was modified in the future
os.utime(filename, (mtime + 10, mtime + 10))
self.assertTrue(invalidator(None))
def test_doesnotexist(self):
filename = os.path.join(self.tempdir,
'mtime_test_doesnotexist')
invalidator = util.mtime_invalidator(filename)
self.assertTrue(invalidator(None))
def test_disappears(self):
filename = os.path.join(self.tempdir,
'mtime_test_disappears')
file(filename, 'w').write('foo')
invalidator = util.mtime_invalidator(filename)
self.assertFalse(invalidator(None))
os.unlink(filename)
self.assertTrue(invalidator(None))
class CacheTestCase(MiroTestCase):
def setUp(self):
MiroTestCase.setUp(self)
self.cache = MockCache(2)
def test_set_get(self):
self.cache.set(1, 1)
self.assertEquals(self.cache.get(1), 1)
def test_create_new_value_get(self):
self.assertEquals(self.cache.get(1), (1, 0))
self.assertEquals(self.cache.get(3), (3, 1))
def test_remove(self):
self.cache.set(1, 1)
self.cache.remove(1)
self.assertFalse(1 in self.cache.keys())
def test_lru(self):
self.cache.get(1)
self.cache.get(2)
self.cache.get(3)
# 1 has expired out
self.assertEquals(set(self.cache.keys()), set((2, 3)))
def test_invalidator_set(self):
def invalidator(key):
return True
self.cache.set(1, 1, invalidator=invalidator)
# previous value is now invalid, get a new one
self.assertEquals(self.cache.get(1), (1, 0))
def test_invalidator_get(self):
def invalidator(key):
return True
self.assertEquals(self.cache.get(1, invalidator=invalidator),
(1, 0))
# previous value was invalid, get a new one
self.assertEquals(self.cache.get(1, invalidator=invalidator),
(1, 1))
class AlarmTestCase(MiroTestCase):
@staticmethod
def _long_function():
time.sleep(1.5)
return True
def _wrapped_function(self, set_signal=True):
with util.alarm(1, set_signal=set_signal):
return self._long_function()
if hasattr(signal, 'SIGALRM'):
def test_alarm_works(self):
self.assertRaises(IOError, self._wrapped_function)
def test_context_manager__True(self):
with util.alarm(1) as result:
self.assertTrue(result)
def test_alarm_noop(self):
self.assertTrue(self._wrapped_function(set_signal=False))
def test_context_manager__False(self):
with util.alarm(0, set_signal=False) as result:
self.assertFalse(result)
| gpl-2.0 | 8,869,159,769,845,299,000 | 36.144265 | 79 | 0.557909 | false |
adarnimrod/template | setup.py | 1 | 2042 | #!/usr/bin/env python
# pylint: disable=missing-docstring
from setuptools import setup, find_packages
from template import __doc__ as description
extras_require = {
"dev": ["pipenv"],
"jmespath": ["jmespath"],
"netaddr": ["netaddr"],
"toml": ["toml"],
"yaml": ["PyYAML"],
}
# Flatten the list and avoid duplicates.
extras_require["all"] = list(
{v for k, l in extras_require.items() if k != "dev" for v in l}
)
setup(
name="template",
version="0.7.2",
description=description,
long_description=open("README.rst", "r").read(),
long_description_content_type="text/x-rst",
url="https://git.shore.co.il/nimrod/template",
author="Nimrod Adar",
author_email="[email protected]",
license="AGPLv3+",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", # noqa: E501 pylint: disable=line-too-long
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Text Processing",
"Topic :: Utilities",
],
keywords="config configuration jinja template environment",
packages=find_packages(),
install_requires=[
"Jinja2",
"PyYAML",
"jmespath",
"toml",
"subprocess32>=3.5.0;python_version<'3.5'",
],
extras_require=extras_require,
entry_points={"console_scripts": ["template=template:main"]},
)
| agpl-3.0 | 2,910,190,702,474,790,000 | 33.610169 | 137 | 0.609696 | false |
lovelysystems/pyjamas | library/pyjamas/ui/StackPanel.py | 1 | 5218 | # Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas.ui.ComplexPanel import ComplexPanel
from pyjamas.ui import Event
class StackPanel(ComplexPanel):
def __init__(self, **kwargs):
self.body = None
self.visibleStack = -1
self.indices = {}
table = DOM.createTable()
self.setElement(table)
self.body = DOM.createTBody()
DOM.appendChild(table, self.body)
if not kwargs.has_key('Spacing'): kwargs['Spacing'] = 0
if not kwargs.has_key('Padding'): kwargs['Padding'] = 0
if not kwargs.has_key('StyleName'): kwargs['StyleName'] = "gwt-StackPanel"
DOM.sinkEvents(table, Event.ONCLICK)
ComplexPanel.__init__(self, **kwargs)
def add(self, widget, stackText="", asHTML=False):
widget.removeFromParent()
index = self.getWidgetCount()
tr = DOM.createTR()
td = DOM.createTD()
DOM.appendChild(self.body, tr)
DOM.appendChild(tr, td)
self.setStyleName(td, "gwt-StackPanelItem", True)
self._setIndex(td, index)
DOM.setAttribute(td, "height", "1px")
tr = DOM.createTR()
td = DOM.createTD()
DOM.appendChild(self.body, tr)
DOM.appendChild(tr, td)
DOM.setAttribute(td, "height", "100%")
DOM.setAttribute(td, "vAlign", "top")
ComplexPanel.add(self, widget, td)
self.setStackVisible(index, False)
if self.visibleStack == -1:
self.showStack(0)
if stackText != "":
self.setStackText(self.getWidgetCount() - 1, stackText, asHTML)
def getWidget(self, index):
return self.children[index]
def getWidgetCount(self):
return len(self.children)
def getWidgetIndex(self, child):
return self.children.index(child)
def onBrowserEvent(self, event):
if DOM.eventGetType(event) == "click":
index = self.getDividerIndex(DOM.eventGetTarget(event))
if index != -1:
self.showStack(index)
# also callable as remove(child) and remove(index)
def remove(self, child, index=None):
if index is None:
if isinstance(child, int):
index = child
child = self.getWidget(child)
else:
index = self.getWidgetIndex(child)
if child.getParent() != self:
return False
if self.visibleStack == index:
self.visibleStack = -1
elif self.visibleStack > index:
self.visibleStack -= 1
rowIndex = 2 * index
tr = DOM.getChild(self.body, rowIndex)
DOM.removeChild(self.body, tr)
tr = DOM.getChild(self.body, rowIndex)
DOM.removeChild(self.body, tr)
ComplexPanel.remove(self, child)
rows = self.getWidgetCount() * 2
#for (int i = rowIndex; i < rows; i = i + 2) {
for i in range(rowIndex, rows, 2):
childTR = DOM.getChild(self.body, i)
td = DOM.getFirstChild(childTR)
curIndex = self._getIndex(td)
self._setIndex(td, index)
index += 1
return True
def _setIndex(self, td, index):
self.indices[td] = index
def _getIndex(self, td):
return self.indices.get(td)
def setStackText(self, index, text, asHTML=False):
if index >= self.getWidgetCount():
return
td = DOM.getChild(DOM.getChild(self.body, index * 2), 0)
if asHTML:
DOM.setInnerHTML(td, text)
else:
DOM.setInnerText(td, text)
def showStack(self, index):
if (index >= self.getWidgetCount()) or (index == self.visibleStack):
return
if self.visibleStack >= 0:
self.setStackVisible(self.visibleStack, False)
self.visibleStack = index
self.setStackVisible(self.visibleStack, True)
def getDividerIndex(self, elem):
while (elem is not None) and not DOM.compare(elem, self.getElement()):
expando = self._getIndex(elem)
if expando is not None:
return int(expando)
elem = DOM.getParent(elem)
return -1
def setStackVisible(self, index, visible):
tr = DOM.getChild(self.body, (index * 2))
if tr is None:
return
td = DOM.getFirstChild(tr)
self.setStyleName(td, "gwt-StackPanelItem-selected", visible)
tr = DOM.getChild(self.body, (index * 2) + 1)
self.setVisible(tr, visible)
self.getWidget(index).setVisible(visible)
def getSelectedIndex(self):
return self.visibleStack
| apache-2.0 | -5,082,998,882,754,425,000 | 29.694118 | 82 | 0.602913 | false |
abstrakraft/rug | rug/repo.py | 1 | 2341 | import project
class Repo_Rev(project.Revset):
@staticmethod
def find_repo(repo_finder):
return repo_finder.project.manifest_repo
class Repo(object):
valid_repo = project.Project.valid_project
rev_class = Repo_Rev
def __init__(self, repo_dir, output_buffer=None):
from project import Project
self.project = Project(repo_dir, output_buffer=output_buffer)
p = self.project
mr = self.project.manifest_repo
delegated_methods = {
'valid_sha': mr.valid_sha,
'valid_rev': mr.valid_rev,
'update_ref': mr.update_ref,
'delete_ref': mr.delete_ref,
'head': mr.head,
'rev_parse': mr.rev_parse,
'symbolic_ref': mr.symbolic_ref,
'remote_list': p.source_list,
'remote_add': p.source_add,
'remote_set_url': p.source_set_url,
'remote_set_head': p.source_set_head,
'branch': p.revset,
'branch_create': p.revset_create,
'status': p.status,
'checkout': p.checkout,
'commit': p.commit,
'fetch': p.fetch,
'dirty': p.dirty,
'config': mr.config,
#'push': p.publish,
#'test_push': p.test_publish,
'merge': None, #TODO
'rebase': None, #TODO
}
self.__dict__.update(delegated_methods)
@classmethod
def init(cls, repo_dir=None, output_buffer=None):
project.Project.init(project_dir=repo_dir, output_buffer=output_buffer)
return cls(repo_dir)
@classmethod
def clone(cls, url, repo_dir=None, remote=None, rev=None, config=None, output_buffer=None):
project.Project.clone(url, project_dir=repo_dir, source=remote, revset=rev, repo_config=config, output_buffer=output_buffer)
return cls(repo_dir)
def fetch(self, remote=None):
#TODO: repo Project doesn't currently support fetching a particular source
self.project.fetch()
def add_ignore(self, pattern):
raise NotImplemented('ignoring through rug repos not implemented')
def push(self, remote, branch, force):
#TODO: this is a hack to drop the branch and force args, because rug repos don't handle them. Fix
return self.project.publish(remote)
def test_push(self, remote, branch, force):
#TODO: this is a hack to drop the branch and force args, because rug repos don't handle them. Fix
return self.project.test_publish(remote)
def update(self, recursive=False):
if self.project.dirty():
self.project.checkout()
self.project.update(recursive)
project.Project.register_vcs('rug', Repo)
| gpl-3.0 | -4,722,654,965,405,647,000 | 29.802632 | 126 | 0.703973 | false |
ryepdx/account_payment_ccapi_authdotnet | sale_order.py | 1 | 4286 | from openerp.osv import osv, fields
class sale_order(osv.osv):
_inherit = "sale.order"
def _get_prod_acc(self, product_id, journal_obj, context=False):
if product_id and product_id.property_account_income:
return product_id.property_account_income.id
elif product_id and product_id.categ_id.property_account_income_categ:
return product_id.categ_id.property_account_income_categ.id
else:
if journal_obj.default_credit_account_id:
return journal_obj.default_credit_account_id.id
return False
def create_sales_receipt(self, cr, uid, ids, context={}):
sale_obj = self.browse(cr, uid, ids[0], context=context)
vals = {}
cr_ids_list = []
cr_ids = {}
journal_ids = self.pool.get('account.journal').search(cr, uid, [('type', '=', 'sale')])
if journal_ids:
vals['journal_id'] = journal_ids[0]
journal_obj = self.pool.get('account.journal').browse(cr, uid, journal_ids[0])
if sale_obj and sale_obj.order_line:
for sale_line in sale_obj.order_line:
cr_ids['account_id'] = self._get_prod_acc(sale_line.product_id and sale_line.product_id, journal_obj)#journal_obj.default_debit_account_id.id #Change this account to product's income account
cr_ids['amount'] = sale_line.price_subtotal
cr_ids['partner_id'] = sale_obj.partner_id.id
cr_ids['name'] = sale_line.name
cr_ids_list.append(cr_ids.copy())
if sale_obj and sale_obj.shipcharge and sale_obj.ship_method_id and sale_obj.ship_method_id.account_id:
cr_ids['account_id'] = sale_obj.ship_method_id.account_id.id
cr_ids['amount'] = sale_obj.shipcharge
cr_ids['partner_id'] = sale_obj.partner_id.id
cr_ids['name'] = 'Shipping Charge for %s' % sale_line.name
cr_ids_list.append(cr_ids.copy())
else:
vals['journal_id'] = False
vals['partner_id'] = sale_obj.partner_id.id
#vals['date'] = sale_obj.date_order
vals['rel_sale_order_id'] = ids[0]
vals['name'] = 'Auto generated Sales Receipt'
vals['type'] = 'sale'
vals['currency_id'] = journal_obj.company_id.currency_id.id
vals['line_cr_ids'] = [(0, 0, cr_ids) for cr_ids in cr_ids_list]
# vals['narration'] = voucher_obj.narration
vals['pay_now'] = 'pay_now'
vals['account_id'] = journal_obj.default_debit_account_id.id
# vals['reference'] = voucher_obj.reference
# vals['tax_id'] = voucher_obj.tax_id.id
vals['amount'] = sale_obj.amount_total
vals['company_id'] = journal_obj.company_id.id
vals['origin'] = sale_obj.name
voucher_id = self.pool.get('account.voucher').create(cr, uid, vals, context)
return voucher_id
def action_wait(self, cr, uid, ids, context=None):
ret = super(sale_order, self).action_wait(cr, uid, ids, context=context)
for o in self.browse(cr, uid, ids, context=context):
if (o.order_policy == 'credit_card'):
#self.create_sales_receipt(cr, uid, [o.id])
invoice_id = self.action_invoice_create(cr, uid, [o.id], context=context)
wf_service = netsvc.LocalService('workflow')
wf_service.trg_validate(uid, 'account.invoice', invoice_id, 'invoice_open', cr)
self.pool.get('account.invoice').write(cr, uid, invoice_id, {'credit_card': True}, context=context)
return ret
def action_cancel(self, cr, uid, ids, context=None):
for sale in self.browse(cr, uid, ids, context=context):
for picking in sale.picking_ids:
if sale.order_policy == 'credit_card' and picking.state not in ('done', 'cancel'):
self.pool.get('stock.picking').action_cancel(cr, uid, [picking.id], {})
for inv in sale.invoice_ids:
if sale.order_policy == 'credit_card':
self.pool.get('account.invoice').action_cancel(cr, uid, [inv.id], {})
return super(sale_order, self).action_cancel(cr, uid, ids, context)
sale_order()
| agpl-3.0 | 1,730,736,720,920,891,000 | 51.268293 | 210 | 0.585861 | false |
z-Wind/Python_Challenge | Level16_gif palette.py | 1 | 1534 | # http://www.pythonchallenge.com/pc/return/mozart.html
__author__ = 'chihchieh.sun'
from PIL import Image
import urllib.request
from io import BytesIO
def getImg(url):
# User Name & Password
password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
top_level_url = 'http://www.pythonchallenge.com/pc/return/'
password_mgr.add_password(None, top_level_url, 'huge', 'file')
handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
# Proxy setting
proxy = urllib.request.getproxies()
proxy_support = urllib.request.ProxyHandler({'sock5': proxy.get('http')})
# opener setting
opener = urllib.request.build_opener(proxy_support,handler)
imUrl = opener.open(url).read()
return Image.open(BytesIO(imUrl)) # Image.open requires a file-like object
img = getImg('http://www.pythonchallenge.com/pc/return/mozart.gif')
h, v = img.size
bars = []
# find color
count, target, temp = 0, 0, 0
for i in range(h):
color = img.getpixel((i,0))
if color == temp:
count += 1
if count == 4:
target = temp
print('color :', target)
else:
count = 0
temp = color
# redraw
shift = Image.new(img.mode, (h, v))
for j in range(v):
colors = [img.getpixel((x,j)) for x in range(h)]
start = colors.index(target) - 1
colors = colors[start:] + colors[:start]
for i in range(h):
shift.putpixel((i, j), colors[i])
shift.putpalette(img.getpalette())
shift.show()
| mit | -3,434,674,608,681,263,600 | 26.407407 | 79 | 0.626467 | false |
bodacea/opendatatools | bigdatatoolkit/settings.py | 1 | 5895 | # -*- coding: utf-8 -*-
import os
gettext = lambda s: s
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
# Django settings for bdt project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SERVER_EMAIL = '[email protected]'
ADMINS = (
('Sara', '[email protected]'),
('Saratoo', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bdt',
'USER': 'postgres',
'PASSWORD': 's0meth1ng',
'HOST': '',
#'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(PROJECT_PATH, 'database.sqlite'),
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = os.path.join(PROJECT_PATH, "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
STATIC_ROOT = os.path.join(PROJECT_PATH, "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
#ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'latt3sarenice1att354reg00d!w4ntc0ff#$!nthem0rning'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, "templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
#'django.contrib.admindocs',
'cicada',
#'volta',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
##LOGGING = {
## 'version': 1,
## 'disable_existing_loggers': False,
## 'handlers': {
## 'mail_admins': {
## 'level': 'DEBUG',
## 'class': 'django.utils.log.AdminEmailHandler'
## }
## },
## 'loggers': {
## 'django.request': {
## 'handlers': ['mail_admins'],
## 'level': 'DEBUG',
## 'propagate': True,
## },
## }
##}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(PROJECT_PATH, 'errors_other.log'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
'request_handler': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(PROJECT_PATH, 'errors_djrequest.log'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter':'standard',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': True
},
},
'root': {
'handlers':['default'],
'level': 'DEBUG',
},
}
### Parse database configuration from $DATABASE_URL
##import dj_database_url
##DATABASES['default'] = dj_database_url.config()
| gpl-3.0 | 1,394,567,147,018,662,400 | 29.544041 | 79 | 0.636132 | false |
alexholehouse/SBMLIntegrator | libsbml-5.0.0/src/bindings/python/test/annotation/TestRDFAnnotation2.py | 1 | 10725 | #
# @file TestRDFAnnotation2.py
# @brief fomula units data unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id: TestRDFAnnotation2.py 11441 2010-07-09 02:22:23Z mhucka $
# $HeadURL: https://sbml.svn.sourceforge.net/svnroot/sbml/trunk/libsbml/src/bindings/python/test/annotation/TestRDFAnnotation2.py $
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/annotation/test/TestRDFAnnotation2.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def wrapString(s):
return s
pass
class TestRDFAnnotation2(unittest.TestCase):
global d2
d2 = None
global m2
m2 = None
def equals(self, *x):
if len(x) == 2:
return x[0] == x[1]
elif len(x) == 1:
return x[0] == self.OSS.str()
def setUp(self):
filename = "../../sbml/annotation/test/test-data/annotation2.xml"
self.d2 = libsbml.readSBML(filename)
self.m2 = self.d2.getModel()
pass
def tearDown(self):
self.d2 = None
pass
def test_RDFAnnotation2_getModelHistory(self):
history = self.m2.getModelHistory()
self.assert_( history != None )
mc = history.getCreator(0)
self.assert_(( "Hucka" == mc.getFamilyName() ))
self.assert_(( "Mike" == mc.getGivenName() ))
self.assert_(( "[email protected]" == mc.getEmail() ))
self.assert_(( "BNMC" == mc.getOrganisation() ))
mc1 = history.getCreator(1)
self.assert_(( "Keating" == mc1.getFamilyName() ))
self.assert_(( "Sarah" == mc1.getGivenName() ))
self.assert_(( "[email protected]" == mc1.getEmail() ))
self.assert_(( "UH" == mc1.getOrganisation() ))
date = history.getCreatedDate()
self.assert_( date.getYear() == 2005 )
self.assert_( date.getMonth() == 2 )
self.assert_( date.getDay() == 2 )
self.assert_( date.getHour() == 14 )
self.assert_( date.getMinute() == 56 )
self.assert_( date.getSecond() == 11 )
self.assert_( date.getSignOffset() == 0 )
self.assert_( date.getHoursOffset() == 0 )
self.assert_( date.getMinutesOffset() == 0 )
self.assert_(( "2005-02-02T14:56:11Z" == date.getDateAsString() ))
date = history.getModifiedDate()
self.assert_( date.getYear() == 2006 )
self.assert_( date.getMonth() == 5 )
self.assert_( date.getDay() == 30 )
self.assert_( date.getHour() == 10 )
self.assert_( date.getMinute() == 46 )
self.assert_( date.getSecond() == 2 )
self.assert_( date.getSignOffset() == 0 )
self.assert_( date.getHoursOffset() == 0 )
self.assert_( date.getMinutesOffset() == 0 )
self.assert_(( "2006-05-30T10:46:02Z" == date.getDateAsString() ))
date = history.getModifiedDate(1)
self.assert_( date.getYear() == 2007 )
self.assert_( date.getMonth() == 1 )
self.assert_( date.getDay() == 16 )
self.assert_( date.getHour() == 15 )
self.assert_( date.getMinute() == 31 )
self.assert_( date.getSecond() == 52 )
self.assert_( date.getSignOffset() == 0 )
self.assert_( date.getHoursOffset() == 0 )
self.assert_( date.getMinutesOffset() == 0 )
self.assert_(( "2007-01-16T15:31:52Z" == date.getDateAsString() ))
pass
def test_RDFAnnotation2_modelWithHistoryAndCVTerms(self):
h = libsbml.ModelHistory()
c = libsbml.ModelCreator()
c.setFamilyName("Keating")
c.setGivenName("Sarah")
h.addCreator(c)
d = libsbml.Date(2008,11,17,18,37,0,0,0,0)
h.setCreatedDate(d)
h.setModifiedDate(d)
self.m2.unsetModelHistory()
self.m2.setModelHistory(h)
cv = libsbml.CVTerm()
cv.setQualifierType(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_IS_VERSION_OF)
cv.addResource("http://www.geneontology.org/#GO:0005892")
self.m2.addCVTerm(cv)
ann = libsbml.RDFAnnotationParser.parseModelHistory(self.m2)
expected = wrapString("<annotation>\n" +
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:dcterms=\"http://purl.org/dc/terms/\" xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" xmlns:bqbiol=\"http://biomodels.net/biology-qualifiers/\" xmlns:bqmodel=\"http://biomodels.net/model-qualifiers/\">\n" +
" <rdf:Description rdf:about=\"#_000001\">\n" +
" <dc:creator rdf:parseType=\"Resource\">\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:parseType=\"Resource\">\n" +
" <vCard:N rdf:parseType=\"Resource\">\n" +
" <vCard:Family>Keating</vCard:Family>\n" +
" <vCard:Given>Sarah</vCard:Given>\n" +
" </vCard:N>\n" +
" </rdf:li>\n" +
" </rdf:Bag>\n" +
" </dc:creator>\n" +
" <dcterms:created rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2008-11-17T18:37:00Z</dcterms:W3CDTF>\n" +
" </dcterms:created>\n" +
" <dcterms:modified rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2008-11-17T18:37:00Z</dcterms:W3CDTF>\n" +
" </dcterms:modified>\n" +
" <bqbiol:isVersionOf>\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:resource=\"http://www.geneontology.org/#GO:0005892\"/>\n" +
" </rdf:Bag>\n" +
" </bqbiol:isVersionOf>\n" +
" </rdf:Description>\n" +
" </rdf:RDF>\n" +
"</annotation>")
if (ann != None):
self.assertEqual( True, self.equals(expected,ann.toXMLString()) )
pass
pass
pass
def test_RDFAnnotation2_modelWithHistoryAndMultipleModifiedDates(self):
h = libsbml.ModelHistory()
c = libsbml.ModelCreator()
c.setFamilyName("Keating")
c.setGivenName("Sarah")
h.addCreator(c)
d = libsbml.Date(2005,2,2,14,56,11)
h.setCreatedDate(d)
h.addModifiedDate(d)
h.addModifiedDate(d)
self.m2.unsetModelHistory()
self.m2.setModelHistory(h)
ann = libsbml.RDFAnnotationParser.parseModelHistory(self.m2)
expected = wrapString("<annotation>\n" +
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:dcterms=\"http://purl.org/dc/terms/\" xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" xmlns:bqbiol=\"http://biomodels.net/biology-qualifiers/\" xmlns:bqmodel=\"http://biomodels.net/model-qualifiers/\">\n" +
" <rdf:Description rdf:about=\"#_000001\">\n" +
" <dc:creator rdf:parseType=\"Resource\">\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:parseType=\"Resource\">\n" +
" <vCard:N rdf:parseType=\"Resource\">\n" +
" <vCard:Family>Keating</vCard:Family>\n" +
" <vCard:Given>Sarah</vCard:Given>\n" +
" </vCard:N>\n" +
" </rdf:li>\n" +
" </rdf:Bag>\n" +
" </dc:creator>\n" +
" <dcterms:created rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-02-02T14:56:11Z</dcterms:W3CDTF>\n" +
" </dcterms:created>\n" +
" <dcterms:modified rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-02-02T14:56:11Z</dcterms:W3CDTF>\n" +
" </dcterms:modified>\n" +
" <dcterms:modified rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-02-02T14:56:11Z</dcterms:W3CDTF>\n" +
" </dcterms:modified>\n" +
" </rdf:Description>\n" +
" </rdf:RDF>\n" +
"</annotation>")
self.assertEqual( True, self.equals(expected,ann.toXMLString()) )
pass
def test_RDFAnnotation2_modelWithHistoryWithCharacterReference(self):
h = libsbml.ModelHistory()
c = libsbml.ModelCreator()
c.setFamilyName("Dräger")
c.setGivenName("Andreas")
h.addCreator(c)
d = libsbml.Date(2005,2,2,14,56,11)
h.setCreatedDate(d)
h.addModifiedDate(d)
self.m2.unsetModelHistory()
self.m2.setModelHistory(h)
ann = libsbml.RDFAnnotationParser.parseModelHistory(self.m2)
expected = wrapString("<annotation>\n" +
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:dcterms=\"http://purl.org/dc/terms/\" xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" xmlns:bqbiol=\"http://biomodels.net/biology-qualifiers/\" xmlns:bqmodel=\"http://biomodels.net/model-qualifiers/\">\n" +
" <rdf:Description rdf:about=\"#_000001\">\n" +
" <dc:creator rdf:parseType=\"Resource\">\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:parseType=\"Resource\">\n" +
" <vCard:N rdf:parseType=\"Resource\">\n" +
" <vCard:Family>Dräger</vCard:Family>\n" +
" <vCard:Given>Andreas</vCard:Given>\n" +
" </vCard:N>\n" +
" </rdf:li>\n" +
" </rdf:Bag>\n" +
" </dc:creator>\n" +
" <dcterms:created rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-02-02T14:56:11Z</dcterms:W3CDTF>\n" +
" </dcterms:created>\n" +
" <dcterms:modified rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-02-02T14:56:11Z</dcterms:W3CDTF>\n" +
" </dcterms:modified>\n" +
" </rdf:Description>\n" +
" </rdf:RDF>\n" +
"</annotation>")
self.assertEqual( True, self.equals(expected,ann.toXMLString()) )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestRDFAnnotation2))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| gpl-3.0 | -902,378,586,924,969,100 | 41.391304 | 339 | 0.596923 | false |
Alignak-monitoring-contrib/alignak-backend | alignak_backend/models/userrestrictrole.py | 1 | 3359 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Resource information of userrestrictrole
"""
def get_name(friendly=False):
"""Get name of this resource
:return: name of this resource
:rtype: str
"""
if friendly: # pragma: no cover
return "User role restriction"
return 'userrestrictrole'
def get_doc(): # pragma: no cover
"""Get documentation of this resource
:return: rst string
:rtype: str
"""
return """
The ``userrestrictrole`` model is an internal data model used to define the CRUD
rights for an Alignak backend user.
This allows to defined, for a user and a given realm, the create, read, update, and
delete rights on each backend endpoint.
"""
def get_schema():
"""Schema structure of this resource
:return: schema dictionary
:rtype: dict
"""
return {
'schema': {
'schema_version': {
'type': 'integer',
'default': 1,
},
'user': {
'schema_version': 1,
'title': 'Concerned user',
'type': 'objectid',
'data_relation': {
'resource': 'user',
'embeddable': True
},
'required': True,
},
'realm': {
'schema_version': 1,
'title': 'Concerned realm',
'type': 'objectid',
'data_relation': {
'resource': 'realm',
'embeddable': True
},
'required': True,
},
'sub_realm': {
'schema_version': 1,
'title': 'Sub-realms',
'comment': 'Is this right applicable to the sub-realms of the realm?',
'type': 'boolean',
'default': False
},
'resource': {
'schema_version': 1,
'title': 'Concerned resource',
'comment': 'Resource concerned with the right',
'type': 'string',
'default': '*',
'allowed': [
'*',
'actionacknowledge', 'actiondowntime', 'actionforcecheck',
'alignak', 'alignakdaemon',
'realm', 'command', 'timeperiod',
'user', 'usergroup', 'userrestrictrole',
'host', 'hostgroup', 'hostdependency', 'hostescalation',
'service', 'servicegroup', 'servicedependency', 'serviceescalation',
'grafana', 'graphite', 'influxdb', 'statsd',
'timeseriesretention', 'aligank_notifications',
'livesynthesis', 'livesynthesisretention',
'logcheckresult', 'history'
],
},
'crud': {
'schema_version': 1,
'title': 'Right',
'comment': "User's right for the concerned resource in the concerned realm. "
"Use ``*`` if all resources are concerned.",
'type': 'list',
'default': ['read'],
'allowed': ['create', 'read', 'update', 'delete', 'custom']
},
},
'schema_deleted': {}
}
| agpl-3.0 | 1,560,267,021,345,080,000 | 31.298077 | 93 | 0.4546 | false |
FreeOpcUa/opcua-modeler | uamodeler/uamodeler_ui.py | 1 | 12811 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'uamodeler/uamodeler_ui.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_UaModeler(object):
def setupUi(self, UaModeler):
UaModeler.setObjectName("UaModeler")
UaModeler.resize(922, 755)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/object.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
UaModeler.setWindowIcon(icon)
self.centralWidget = QtWidgets.QWidget(UaModeler)
self.centralWidget.setObjectName("centralWidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralWidget)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(self.centralWidget)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.splitterCenter = QtWidgets.QSplitter(self.splitter)
self.splitterCenter.setOrientation(QtCore.Qt.Horizontal)
self.splitterCenter.setObjectName("splitterCenter")
self.splitterLeft = QtWidgets.QSplitter(self.splitterCenter)
self.splitterLeft.setOrientation(QtCore.Qt.Vertical)
self.splitterLeft.setObjectName("splitterLeft")
self.refNodeSetsView = QtWidgets.QTreeView(self.splitterLeft)
self.refNodeSetsView.setObjectName("refNodeSetsView")
self.namespaceView = QtWidgets.QTreeView(self.splitterLeft)
self.namespaceView.setObjectName("namespaceView")
self.treeView = QtWidgets.QTreeView(self.splitterLeft)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.treeView.sizePolicy().hasHeightForWidth())
self.treeView.setSizePolicy(sizePolicy)
self.treeView.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.treeView.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.treeView.setDragEnabled(True)
self.treeView.setDragDropMode(QtWidgets.QAbstractItemView.DragOnly)
self.treeView.setObjectName("treeView")
self.splitterRight = QtWidgets.QSplitter(self.splitterCenter)
self.splitterRight.setOrientation(QtCore.Qt.Vertical)
self.splitterRight.setObjectName("splitterRight")
self.frame = QtWidgets.QFrame(self.splitterRight)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.frame)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.attrView = QtWidgets.QTreeView(self.frame)
self.attrView.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.attrView.setEditTriggers(QtWidgets.QAbstractItemView.AllEditTriggers)
self.attrView.setProperty("showDropIndicator", False)
self.attrView.setTextElideMode(QtCore.Qt.ElideNone)
self.attrView.setAutoExpandDelay(-1)
self.attrView.setIndentation(18)
self.attrView.setSortingEnabled(True)
self.attrView.setWordWrap(True)
self.attrView.setObjectName("attrView")
self.verticalLayout.addWidget(self.attrView)
self.attrView.raise_()
self.label.raise_()
self.frame_2 = QtWidgets.QFrame(self.splitterRight)
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame_2)
self.verticalLayout_2.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_2 = QtWidgets.QLabel(self.frame_2)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.refView = QtWidgets.QTableView(self.frame_2)
self.refView.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.refView.setObjectName("refView")
self.verticalLayout_2.addWidget(self.refView)
self.logTextEdit = QtWidgets.QTextEdit(self.splitter)
self.logTextEdit.setMinimumSize(QtCore.QSize(400, 100))
self.logTextEdit.setObjectName("logTextEdit")
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
UaModeler.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(UaModeler)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 922, 25))
self.menuBar.setObjectName("menuBar")
self.menuOPC_UA_Client = QtWidgets.QMenu(self.menuBar)
self.menuOPC_UA_Client.setObjectName("menuOPC_UA_Client")
self.menuRecentFiles = QtWidgets.QMenu(self.menuBar)
self.menuRecentFiles.setObjectName("menuRecentFiles")
UaModeler.setMenuBar(self.menuBar)
self.statusBar = QtWidgets.QStatusBar(UaModeler)
self.statusBar.setObjectName("statusBar")
UaModeler.setStatusBar(self.statusBar)
self.toolBar = QtWidgets.QToolBar(UaModeler)
self.toolBar.setObjectName("toolBar")
UaModeler.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionAddObject = QtWidgets.QAction(UaModeler)
self.actionAddObject.setObjectName("actionAddObject")
self.actionAddVariable = QtWidgets.QAction(UaModeler)
self.actionAddVariable.setObjectName("actionAddVariable")
self.actionAddObjectType = QtWidgets.QAction(UaModeler)
self.actionAddObjectType.setObjectName("actionAddObjectType")
self.actionAddFolder = QtWidgets.QAction(UaModeler)
self.actionAddFolder.setObjectName("actionAddFolder")
self.actionAddProperty = QtWidgets.QAction(UaModeler)
self.actionAddProperty.setObjectName("actionAddProperty")
self.actionAddDataType = QtWidgets.QAction(UaModeler)
self.actionAddDataType.setObjectName("actionAddDataType")
self.actionAddVariableType = QtWidgets.QAction(UaModeler)
self.actionAddVariableType.setObjectName("actionAddVariableType")
self.actionAddReferenceType = QtWidgets.QAction(UaModeler)
self.actionAddReferenceType.setObjectName("actionAddReferenceType")
self.actionOpen = QtWidgets.QAction(UaModeler)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(UaModeler)
self.actionSave.setObjectName("actionSave")
self.actionQuit = QtWidgets.QAction(UaModeler)
self.actionQuit.setObjectName("actionQuit")
self.actionNew = QtWidgets.QAction(UaModeler)
self.actionNew.setObjectName("actionNew")
self.actionSaveAs = QtWidgets.QAction(UaModeler)
self.actionSaveAs.setObjectName("actionSaveAs")
self.actionImport = QtWidgets.QAction(UaModeler)
self.actionImport.setObjectName("actionImport")
self.actionCloseModel = QtWidgets.QAction(UaModeler)
self.actionCloseModel.setObjectName("actionCloseModel")
self.actionCopy = QtWidgets.QAction(UaModeler)
self.actionCopy.setObjectName("actionCopy")
self.actionPaste = QtWidgets.QAction(UaModeler)
self.actionPaste.setObjectName("actionPaste")
self.actionDelete = QtWidgets.QAction(UaModeler)
self.actionDelete.setObjectName("actionDelete")
self.actionInstantiate = QtWidgets.QAction(UaModeler)
self.actionInstantiate.setObjectName("actionInstantiate")
self.actionAddMethod = QtWidgets.QAction(UaModeler)
self.actionAddMethod.setObjectName("actionAddMethod")
self.actionAddEnum = QtWidgets.QAction(UaModeler)
self.actionAddEnum.setObjectName("actionAddEnum")
self.actionUseOpenUa = QtWidgets.QAction(UaModeler)
self.actionUseOpenUa.setCheckable(True)
self.actionUseOpenUa.setObjectName("actionUseOpenUa")
self.menuOPC_UA_Client.addAction(self.actionNew)
self.menuOPC_UA_Client.addAction(self.actionCloseModel)
self.menuOPC_UA_Client.addAction(self.actionOpen)
self.menuOPC_UA_Client.addAction(self.actionImport)
self.menuOPC_UA_Client.addAction(self.actionSave)
self.menuOPC_UA_Client.addAction(self.actionSaveAs)
self.menuOPC_UA_Client.addAction(self.actionUseOpenUa)
self.menuOPC_UA_Client.addAction(self.actionQuit)
self.menuBar.addAction(self.menuOPC_UA_Client.menuAction())
self.menuBar.addAction(self.menuRecentFiles.menuAction())
self.toolBar.addAction(self.actionNew)
self.toolBar.addAction(self.actionOpen)
self.toolBar.addAction(self.actionSave)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionCopy)
self.toolBar.addAction(self.actionPaste)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionAddFolder)
self.toolBar.addAction(self.actionAddObject)
self.toolBar.addAction(self.actionAddVariable)
self.toolBar.addAction(self.actionAddProperty)
self.toolBar.addAction(self.actionAddMethod)
self.toolBar.addAction(self.actionAddObjectType)
self.toolBar.addAction(self.actionAddDataType)
self.toolBar.addAction(self.actionAddVariableType)
self.toolBar.addAction(self.actionAddReferenceType)
self.retranslateUi(UaModeler)
QtCore.QMetaObject.connectSlotsByName(UaModeler)
def retranslateUi(self, UaModeler):
_translate = QtCore.QCoreApplication.translate
UaModeler.setWindowTitle(_translate("UaModeler", "FreeOpcUa Modeler"))
self.label.setText(_translate("UaModeler", "Attributes Editor"))
self.label_2.setText(_translate("UaModeler", "References Editor"))
self.menuOPC_UA_Client.setTitle(_translate("UaModeler", "Act&ions"))
self.menuRecentFiles.setTitle(_translate("UaModeler", "Recent files"))
self.toolBar.setWindowTitle(_translate("UaModeler", "toolBar"))
self.actionAddObject.setText(_translate("UaModeler", "Add Object"))
self.actionAddObject.setToolTip(_translate("UaModeler", "add child object to current node"))
self.actionAddVariable.setText(_translate("UaModeler", "Add Variable"))
self.actionAddObjectType.setText(_translate("UaModeler", "Add Object Type"))
self.actionAddObjectType.setToolTip(_translate("UaModeler", "add new object type as subtype of current node"))
self.actionAddFolder.setText(_translate("UaModeler", "Add Folder"))
self.actionAddProperty.setText(_translate("UaModeler", "Add Property"))
self.actionAddDataType.setText(_translate("UaModeler", "Add Data Type"))
self.actionAddVariableType.setText(_translate("UaModeler", "Add Variable Type"))
self.actionAddReferenceType.setText(_translate("UaModeler", "Add Reference Type"))
self.actionOpen.setText(_translate("UaModeler", "&Open"))
self.actionSave.setText(_translate("UaModeler", "&Save"))
self.actionQuit.setText(_translate("UaModeler", "&Quit"))
self.actionNew.setText(_translate("UaModeler", "&New Model"))
self.actionSaveAs.setText(_translate("UaModeler", "Sa&ve As"))
self.actionImport.setText(_translate("UaModeler", "&Import XML"))
self.actionImport.setToolTip(_translate("UaModeler", "Import XML into current model"))
self.actionCloseModel.setText(_translate("UaModeler", "&Close Model"))
self.actionCloseModel.setToolTip(_translate("UaModeler", "Close current model"))
self.actionCopy.setText(_translate("UaModeler", "Copy"))
self.actionPaste.setText(_translate("UaModeler", "Paste"))
self.actionDelete.setText(_translate("UaModeler", "Delete Node"))
self.actionInstantiate.setText(_translate("UaModeler", "Instantiate"))
self.actionAddMethod.setText(_translate("UaModeler", "Add Method"))
self.actionAddEnum.setText(_translate("UaModeler", "Add Enum Type"))
self.actionAddEnum.setToolTip(_translate("UaModeler", "Add Enum Type"))
self.actionUseOpenUa.setText(_translate("UaModeler", "Use Open62541 Server"))
self.actionUseOpenUa.setToolTip(_translate("UaModeler", "User Open62541 Server"))
| gpl-3.0 | -2,622,760,638,634,183,700 | 57.231818 | 118 | 0.722738 | false |
jahanzebk/python-text-classifier | FileHandler.py | 1 | 1037 | import os
import Document as Doc
class FileHandler():
def makeLog(self, myFile, msg):
logDir = "logs/"
os.chdir(logDir)
fo = open(myFile, 'a')
fo.write(msg + "\n")
fo.close()
def loadDirs(self, myDir, labelled = False):
docs = []
basepath = os.getcwd()
print "Loading test data..." if labelled == False else "Loading training data..."
for subdir, dirs, files in os.walk(myDir):
os.chdir(subdir)
for file in files:
fo = open(file, 'r')
content = fo.read()
if not content:
continue
doc = Doc.Document(file, content)
if (labelled):
doc.category = subdir.split("/")[-1]
docs.append(doc)
fo.close()
os.chdir(basepath)
print "Loaded Documents."
return docs
| mit | 5,380,007,130,982,352,000 | 27.054054 | 89 | 0.43973 | false |
magaum/python_dos_irmao | python para zumbis - lista 4.py | 1 | 3133 | #exercício 1
import random
vetor = []
contador = 0
num_maior = 0
num_menor = 100
while contador <= 9:
#biblioteca para gerar números aleatórios
c = random.randint(0,100)
#adicionando valor a lista 'vetor'
vetor.append(c)
c = vetor [contador]
contador = contador + 1
if c >= num_maior:
num_maior = c
if c <= num_menor:
num_menor = c
#sort organiza a lista em ordem crescente
vetor.sort()
#print (vetor,'\nO menor número da lista é: ',vetor[0],'\nO maior número da lista é: ',vetor[9])
print ('\n',vetor, '\nnumero menor: ',num_menor,'\nnumero maior: ', num_maior)
#exercício 2
import random
vetor = []
par = []
impar = []
for reprtição in range (19):
c = random.randint(0,100)
b = vetor.append(c)
if c % 2 == 0:
par.append(c)
else:
impar.append(c)
print ('\nLista inteira', vetor[:], '\nLista com números pares', par[:],'\nListas com números impares', impar[:])
#exercício 3
# criando vetores com números aleatórios de 1 a 100 e um terceiro vetor com o conteudo dos dois anteriores intercalados
# importando biblioteca random
import random
# definindo variáveis
primeira_lista = []
segunda_lista = []
terceira_lista = []
#repetição
for contador in range (9):
# jogando valores aleatórios na lista
lista = primeira_lista.append(random.randint(0,100))
lista = segunda_lista.append(random.randint(0,100))
# pegando valores das listas 1 e 2 e jogando na 3
lista = primeira_lista[contador]
terceira_lista.append(lista)
lista = segunda_lista[contador]
terceira_lista.append(lista)
print ('\nprimeira lista' ,primeira_lista [:], '\nsegunda lista', segunda_lista [:], '\nterceira lista', terceira_lista[:])
#exercício 4
lista_python = []
lista = '''the python software foundation and the global python community welcome and encourage participation by everyone
our community is based on mutual respect tolerance and encouragement and we are working to help each other live up to these principles
we want our community to be more diverse whoever you are and whatever your background we welcome you'''.split()
#contador = 0
print ('\n',lista)
for contador in range (len (lista)):
#while contador < len(lista):
palavra_lista = lista [contador]
if palavra_lista [0] in "python":
lista_python.append(palavra_lista)
if palavra_lista [-1] in "python":
lista_python.append(palavra_lista)
# contador += 1
print('\n',lista_python)
#exercício 5
lista_python = []
lista = '''the python software foundation and the global python community welcome and encourage participation by everyone
our community is based on mutual respect tolerance and encouragement and we are working to help each other live up to these principles
we want our community to be more diverse whoever you are and whatever your background we welcome you'''.split()
#contador = 0
print ('\n',lista)
for contador in range (len (lista)):
#while contador < len(lista):
palavra_lista = lista [contador]
if palavra_lista [0:-0] in 'python' and (len(palavra_lista))> 4:
lista_python.append(palavra_lista)
# contador += 1
print('\nO número de palavras com as letras de "python" e com mais de 4 letras é',len(lista_python),'\n')
| gpl-3.0 | 6,929,219,987,105,890,000 | 33.175824 | 134 | 0.727331 | false |
basecrm/basecrm-python | basecrm/test/testutils.py | 1 | 10199 | import unittest
import os
import random
import munch
import basecrm
def rand():
return str(random.randint(1, 1000000000))
def lazyproperty(function):
attribute = '__lazy__' + function.__name__
@property
def __lazyproperty(self):
if not hasattr(self.__class__, attribute):
setattr(self.__class__, attribute, function(self))
return getattr(self.__class__, attribute)
return __lazyproperty
if hasattr(unittest.TestCase, 'assertIsInstance'):
class UnittestCompat:
pass
else:
class UnittestCompat:
def assertIsInstance(self, obj, cls):
if not isinstance(obj, cls):
self.fail("%s is not an instance of %r" % (safe_repr(obj), cls))
def assertGreaterEqual(self, given, expected):
if not (given >= expected):
self.fail("%s given mut be greater than or equal to %s" % (given, expected))
class BaseTestCase(unittest.TestCase, UnittestCompat):
@lazyproperty
def client(self):
return basecrm.Client(access_token=self.access_token,
base_url=self.base_url,
user_agent=self.user_agent,
verbose=True)
@property
def user_agent(self):
return "BaseCRM/V2 Python/{version}+tests".format(version=basecrm.VERSION)
@property
def access_token(self):
token = os.environ.get('BASECRM_ACCESS_TOKEN')
if token is None:
raise Exception("'BASECRM_ACCESS_TOKEN' environment variable has not been found.")
return token
@property
def base_url(self):
url = os.environ.get('BASECRM_BASE_URL')
return url or "https://api.getbase.com"
@lazyproperty
def account(self):
return self.client.accounts.self()
@lazyproperty
def associated_contact(self):
return self.create_associated_contact()
@lazyproperty
def contact(self):
return self.create_contact()
@lazyproperty
def deal(self):
return self.create_deal()
@lazyproperty
def deal_source(self):
return self.create_deal_source()
@lazyproperty
def deal_unqualified_reason(self):
return self.create_deal_unqualified_reason()
@lazyproperty
def lead(self):
return self.create_lead()
@lazyproperty
def lead_source(self):
return self.create_lead_source()
@lazyproperty
def line_item(self):
return self.create_line_item()
@lazyproperty
def loss_reason(self):
return self.create_loss_reason()
@lazyproperty
def note(self):
return self.create_note()
@lazyproperty
def order(self):
return self.create_order()
@lazyproperty
def product(self):
return self.create_product()
@lazyproperty
def source(self):
return self.create_source()
@lazyproperty
def tag(self):
return self.create_tag()
@lazyproperty
def task(self):
return self.create_task()
@lazyproperty
def user(self):
return self.client.users.self()
def create_associated_contact(self, **attributes):
deal_id = self.create_deal().id;
associated_contact = {
'role': "involved",
'contact_id': self.create_contact().id,
}
associated_contact.update(attributes)
associated_contact = self.client.associated_contacts.create(deal_id, **associated_contact);
associated_contact['deal_id'] = deal_id;
return associated_contact;
def create_contact(self, **attributes):
contact = {
'description': "I know him via Tom",
'email': "[email protected]",
'facebook': "mjohnson",
'fax': "+44-208-1234567",
'first_name': 'Mark' + rand(),
'industry': "Design Services",
'is_organization': False,
'last_name': 'Johnson' + rand(),
'linkedin': "mjohnson",
'mobile': "508-778-6516",
'name': 'Design Services Company' + rand(),
'phone': "508-778-6516",
'skype': "mjohnson",
'tags': ["important"],
'title': "CEO",
'twitter': "mjohnson",
'website': "www.designservices.com",
}
contact.update(attributes)
contact = self.client.contacts.create(**contact);
return contact;
def create_deal(self, **attributes):
deal = {
'currency': "EUR",
'dropbox_email': "[email protected]",
'hot': True,
'name': 'Website Redesign' + rand(),
'tags': ["important"],
'value': 1000,
'contact_id': self.create_contact().id,
}
deal.update(attributes)
deal = self.client.deals.create(**deal);
return deal;
def create_deal_source(self, **attributes):
deal_source = {
'name': 'Word of mouth' + rand(),
}
deal_source.update(attributes)
deal_source = self.client.deal_sources.create(**deal_source);
return deal_source;
def create_deal_unqualified_reason(self, **attributes):
deal_unqualified_reason = {
'name': 'We were too expensive' + rand(),
}
deal_unqualified_reason.update(attributes)
deal_unqualified_reason = self.client.deal_unqualified_reasons.create(**deal_unqualified_reason);
return deal_unqualified_reason;
def create_lead(self, **attributes):
lead = {
'description': "I know him via Tom",
'email': "[email protected]",
'facebook': "mjohnson",
'fax': "+44-208-1234567",
'first_name': 'Mark' + rand(),
'industry': "Design Services",
'last_name': 'Johnson' + rand(),
'linkedin': "mjohnson",
'mobile': "508-778-6516",
'phone': "508-778-6516",
'skype': "mjohnson",
'status': "Unqualified",
'tags': ["important"],
'title': "CEO",
'twitter': "mjohnson",
'website': "www.designservices.com",
}
lead.update(attributes)
lead = self.client.leads.create(**lead);
return lead;
def create_lead_source(self, **attributes):
lead_source = {
'name': 'Word of mouth' + rand(),
}
lead_source.update(attributes)
lead_source = self.client.lead_sources.create(**lead_source);
return lead_source;
def create_line_item(self, **attributes):
product_id = self.create_product().id;
order_id = self.create_order().id;
line_item = {
'product_id': product_id,
'value': 1599.99,
'variation': 0,
'currency': "USD",
'quantity': 1,
'price': 1599.99,
}
line_item.update(attributes)
line_item = self.client.line_items.create(order_id, **line_item);
line_item['order_id'] = order_id;
return line_item;
def create_loss_reason(self, **attributes):
loss_reason = {
'name': 'We were too expensive' + rand(),
}
loss_reason.update(attributes)
loss_reason = self.client.loss_reasons.create(**loss_reason);
return loss_reason;
def create_note(self, **attributes):
note = {
'content': "Highly important.",
'resource_id': self.create_contact().id,
'resource_type': 'contact',
}
note.update(attributes)
note = self.client.notes.create(**note);
return note;
def create_order(self, **attributes):
deal = self.create_deal()
order = {
'deal_id': deal.id,
'discount': 4,
}
order.update(attributes)
order = self.client.orders.create(**order);
return order;
def create_product(self, **attributes):
product = {
'name': 'Enterprise Plan' + rand(),
'description': 'Includes more storage options',
'sku': 'enterprise-plan',
'active': True,
'max_discount': 4,
'max_markup': 4,
'cost': 2,
'cost_currency': 'USD',
'prices': [{'amount': '1599.99', 'currency': 'USD'}, {'amount': '3599.99', 'currency': 'PLN'}],
}
product.update(attributes)
product = self.client.products.create(**product);
return product;
def create_source(self, **attributes):
source = {
'name': 'Word of mouth' + rand(),
}
source.update(attributes)
source = self.client.sources.create(**source);
return source;
def create_tag(self, **attributes):
tag = {
'name': 'publisher' + rand(),
'resource_type': 'contact',
}
tag.update(attributes)
tag = self.client.tags.create(**tag);
return tag;
def create_task(self, **attributes):
task = {
'content': "Contact Tom",
'due_date': "2014-09-27T16:32:56+00:00",
'remind_at': "2014-09-29T15:32:56+00:00",
'resource_id': self.create_contact().id,
'resource_type': 'contact',
}
task.update(attributes)
task = self.client.tasks.create(**task);
return task;
def create_deal_with_decimal_value(self, **attributes):
deal = {
'id': rand(),
'currency': "EUR",
'hot': True,
'name': 'Website Redesign' + rand(),
'tags': ["important"],
'value': '11.12',
'contact_id': self.create_contact().id,
}
deal.update(attributes)
client = self.client
original_request_func = client.http_client.request
client.http_client.request = lambda *args, **kwargs: (200, {}, munch.Munch(deal))
deal = self.client.deals.create(**deal);
client.http_client.request = original_request_func
return deal;
| mit | -4,958,831,288,083,135,000 | 27.096419 | 107 | 0.547505 | false |
baloo/shinken | shinken/modules/dummy_scheduler.py | 1 | 2365 | #!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, [email protected]
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#This Class is an example of an Scheduler module
#Here for the configuration phase AND running one
from shinken.basemodule import BaseModule
properties = {
'daemons' : ['scheduler'],
'type' : 'dummy_scheduler',
'external' : False,
'phases' : ['retention'],
}
#called by the plugin manager to get a broker
def get_instance(mod_conf):
print "Get a Dummy scheduler module for plugin %s" % mod_conf.get_name()
instance = Dummy_scheduler(mod_conf, foo="bar")
return instance
#Just print some stuff
class Dummy_scheduler(BaseModule):
def __init__(self, mod_conf, foo):
BaseModule.__init__(self, mod_conf)
self.myfoo = foo
#Called by Scheduler to say 'let's prepare yourself guy'
def init(self):
print "Initilisation of the dummy scheduler module"
#self.return_queue = self.properties['from_queue']
#Ok, main function that is called in the retention creation pass
def update_retention_objects(self, sched, log_mgr):
print "[Dummy] asking me to update the retention objects"
#Should return if it succeed in the retention load or not
def load_retention_objects(self, sched, log_mrg):
print "[Dummy] asking me to load the retention objects"
return False
#From now external is not used in the scheduler job
# #When you are in "external" mode, that is the main loop of your process
# def main(self):
# while True:
# print "Raise a external command as example"
# e = ExternalCommand('Viva la revolution')
# self.return_queue.put(e)
# time.sleep(1)
| agpl-3.0 | 7,313,433,403,838,823,000 | 30.959459 | 76 | 0.693869 | false |
UKTradeInvestment/export-wins-data | wins/management/commands/generate_customer_emailses.py | 1 | 3735 | from django.conf import settings
from django.core.mail import send_mail
from django.core.management.base import BaseCommand
from ...models import Win, Notification
from ...notifications import generate_customer_email, generate_officer_email
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("--ids", type=str)
parser.add_argument("--change_ids", type=str)
def thing(self, given_win_ids):
wins = Win.objects.filter(id__in=given_win_ids)
found_win_ids = set(str(win.id).replace('-', '') for win in wins)
missing_win_ids = set(given_win_ids) - found_win_ids
assert not missing_win_ids, "missing win ids: %s" % missing_win_ids
return wins
def make_customer_email_dict(self, win):
url = ("https://www.exportwins.service.trade.gov.uk/wins/review/" +
str(win.pk)) # should not be hardcoded
return generate_customer_email(url, win)
def send_win_customer_email(self, win):
customer_email_dict = self.make_customer_email_dict(win)
send_mail(
customer_email_dict['subject'],
customer_email_dict['body'],
settings.FEEDBACK_ADDRESS,
customer_email_dict['to'],
html_message=customer_email_dict['html_body'],
)
customer_notification = Notification(
type=Notification.TYPE_CUSTOMER,
win=win,
recipient=customer_email_dict['to'][0],
)
customer_notification.save()
# mark win complete
win.complete = True
win.save()
def send_officer_email(self, win):
officer_email_dict = generate_officer_email(win)
officer_email_dict['to'] = win.target_addresses
send_mail(
officer_email_dict['subject'],
officer_email_dict['body'],
settings.SENDING_ADDRESS,
officer_email_dict['to'],
)
for officer_email in officer_email_dict['to']:
officer_notification = Notification(
type=Notification.TYPE_OFFICER,
win=win,
recipient=officer_email,
)
officer_notification.save()
def handle(self, *args, **options):
if options['ids']:
given_win_ids = [w.replace('-', '')
for w in options['ids'].split(',')]
wins = self.thing(given_win_ids)
elif options['change_ids']:
idemails = options['change_ids']
id_emails = [idemail.split(':')
for idemail in idemails.split(',') if idemail]
id_to_email = {
wid.replace('-', ''): email.lower()
for wid, email in id_emails
}
wins = self.thing(list(id_to_email.keys()))
for win in wins:
win_id = str(win.id)
new_customer_email = id_to_email[win_id.replace('-', '')]
if win.customer_email_address != new_customer_email:
print(
'win', win_id,
'changing email from ',
win.customer_email_address,
'to',
new_customer_email,
'new email sent'
)
win.customer_email_address = new_customer_email
win.save()
else:
print('win', win_id, 'customer email unchanged, email re-sent')
else:
assert False, 'no valid flag given'
for win in wins:
self.send_win_customer_email(win)
self.send_officer_email(win)
| gpl-3.0 | -8,274,257,299,536,668,000 | 35.262136 | 83 | 0.533869 | false |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_title02.py | 1 | 1498 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_title02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with default title."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [73655040, 73656576]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.set_title({'name': 'Title!'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | 4,182,182,808,397,589,000 | 24.389831 | 79 | 0.552069 | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv4_unicast/prefix_limit/__init__.py | 1 | 18934 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-unicast/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "prefix-limit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"ipv4-unicast",
"prefix-limit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv4_unicast/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv4_unicast/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class prefix_limit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/ipv4-unicast/prefix-limit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "prefix-limit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"ipv4-unicast",
"prefix-limit",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv4_unicast/prefix_limit/config (container)
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv4_unicast/prefix_limit/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state (container)
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/ipv4_unicast/prefix_limit/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the prefix-limit for the
AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| apache-2.0 | 3,081,041,359,454,356,500 | 37.87885 | 377 | 0.581916 | false |
RedhawkSDR/integration-gnuhawk | qa/tests/qa_boolean_operators.py | 1 | 13128 | #!/usr/bin/env python
#
# Copyright 2004,2007,2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import struct
class test_boolean_operators (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def help_ss (self, src_data, exp_data, op, port_prefix='data_in_'):
for s in zip (range (len (src_data)), src_data):
src = gr.vector_source_s (s[1])
src.source.connect(op,providesPortName=port_prefix+str(s[0]))
src.streamID = str(s[0])
self.tb.sources.append(src)
dst = gr.vector_sink_s ()
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
def help_const_ss (self, src_data, exp_data, op):
for s in zip (range (len (src_data)), src_data):
src = gr.vector_source_s (s[1])
self.tb.connect (src, (op, s[0]))
dst = gr.vector_sink_s ()
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
def help_bb (self, src_data, exp_data, op, port_prefix='data_in_'):
for s in zip (range (len (src_data)), src_data):
src = gr.vector_source_b (s[1])
src.source.connect(op,providesPortName=port_prefix+str(s[0]))
src.streamID = str(s[0])
self.tb.sources.append(src)
dst = gr.vector_sink_b ()
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
def help_const_bb (self, src_data, exp_data, op):
for s in zip (range (len (src_data)), src_data):
src = gr.vector_source_b (s[1])
self.tb.connect (src, (op, s[0]))
dst = gr.vector_sink_b ()
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
def help_ii (self, src_data, exp_data, op, port_prefix='data_in_'):
for s in zip (range (len (src_data)), src_data):
src = gr.vector_source_i (s[1])
src.source.connect(op,providesPortName=port_prefix+str(s[0]))
src.streamID = str(s[0])
self.tb.sources.append(src)
dst = gr.vector_sink_i ()
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
def help_const_ii (self, src_data, exp_data, op):
for s in zip (range (len (src_data)), src_data):
src = gr.vector_source_i (s[1])
self.tb.connect (src, (op, s[0]))
dst = gr.vector_sink_i ()
self.tb.connect (op, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
def test_xor_ss_2i (self):
src1_data = (1, 2, 3, 0x5004, 0x1150)
src2_data = (8, 2, 1 , 0x0508, 0x1105)
expected_result = (9, 0, 2, 0x550C, 0x0055)
op = gr.xor_ss (2)
self.help_ss ((src1_data, src2_data),
expected_result, op, port_prefix='short_in_')
def test_xor_ss_3i (self):
src1_data = (1, 2, 3, 4, 0x50)
src2_data = (8, 2, 1 , 8, 0x05)
src3_data = (2, 2, 15 , 0, 0x1100)
expected_result = (11, 2, 13, 0xC, 0x1155)
op = gr.xor_ss (3)
self.help_ss ((src1_data, src2_data, src3_data),
expected_result, op, port_prefix='short_in_')
def test_xor_ss_4i (self):
src1_data = (1, 2, 3, 4, 0x50)
src2_data = (8, 2, 1 , 8, 0x05)
src3_data = (2, 2, 15 , 0, 0x1100)
src4_data = (11, 2, 13, 0xC, 0x1155)
expected_result = (0, 0, 0, 0, 0)
op = gr.xor_ss (4)
self.help_ss ((src1_data, src2_data, src3_data, src4_data),
expected_result, op, port_prefix='short_in_')
def test_xor_bb_2i (self):
src1_data = (1, 2, 3, 4, 0x50)
src2_data = (8, 2, 1 , 8, 0x05)
expected_result = (9, 0, 2, 0xC, 0x55)
op = gr.xor_bb (2)
self.help_bb ((src1_data, src2_data),
expected_result, op, port_prefix='byte_in_')
def test_xor_bb_3i (self):
src1_data = (1, 2, 3, 4, 0x50)
src2_data = (8, 2, 1 , 8, 0x05)
src3_data = (2, 2, 15 , 0, 0x00)
expected_result = (11, 2, 13, 0xC, 0x55)
op = gr.xor_bb (3)
self.help_bb ((src1_data, src2_data, src3_data),
expected_result, op, port_prefix='byte_in_')
def test_xor_bb_4i (self):
src1_data = (1, 2, 3, 4, 0x50)
src2_data = (8, 2, 1 , 8, 0x05)
src3_data = (2, 2, 15 , 0, 0x00)
src4_data = (11, 2, 13, 0xC, 0x55)
expected_result = (0, 0, 0, 0, 0)
op = gr.xor_bb (4)
self.help_bb ((src1_data, src2_data, src3_data, src4_data),
expected_result, op, port_prefix='byte_in_')
def test_xor_ii_2i (self):
src1_data = (1, 2, 3, 0x5000004, 0x11000050)
src2_data = (8, 2, 1 , 0x0500008, 0x11000005)
expected_result = (9, 0, 2, 0x550000C, 0x00000055)
op = gr.xor_ii (2)
self.help_ii ((src1_data, src2_data),
expected_result, op, port_prefix='long_in_')
def test_xor_ii_3i (self):
src1_data = (1, 2, 3, 4, 0x50)
src2_data = (8, 2, 1 , 8, 0x05)
src3_data = (2, 2, 15 , 0, 0x1100)
expected_result = (11, 2, 13, 0xC, 0x1155)
op = gr.xor_ii (3)
self.help_ii ((src1_data, src2_data, src3_data),
expected_result, op, port_prefix='long_in_')
def test_xor_ii_4i (self):
src1_data = (1, 2, 3, 4, 0x50)
src2_data = (8, 2, 1 , 8, 0x05)
src3_data = (2, 2, 15 , 0, 0x841100)
src4_data = (11, 2, 13, 0xC, 0x841155)
expected_result = (0, 0, 0, 0, 0)
op = gr.xor_ii (4)
self.help_ii ((src1_data, src2_data, src3_data, src4_data),
expected_result, op, port_prefix='long_in_')
def test_and_ss (self):
src1_data = (1, 2, 3, 0x5004, 0x1150)
src2_data = (8, 2, 1 , 0x0508, 0x1105)
expected_result = (0, 2, 1, 0x0000, 0x1100)
op = gr.and_ss ()
self.help_ss ((src1_data, src2_data),
expected_result, op)
def test_and_bb (self):
src1_data = (1, 2, 2, 3, 0x04, 0x50)
src2_data = (8, 2, 2, 1, 0x08, 0x05)
expected_result = (0, 2, 2, 1, 0x00, 0x00)
op = gr.and_bb ()
self.help_bb ((src1_data, src2_data),
expected_result, op)
def test_and_ii (self):
src1_data = (1, 2, 3, 0x50005004, 0x11001150)
src2_data = (8, 2, 1 , 0x05000508, 0x11001105)
expected_result = (0, 2, 1, 0x00000000, 0x11001100)
op = gr.and_ii ()
self.help_ii ((src1_data, src2_data),
expected_result, op)
def test_and_const_bb (self):
src1_data = (0xf1, 0x82, 0x03, 0x40, 0xff)
expected_result = (1, 2, 3, 0, 0x0f)
op = gr.and_const_bb(0x0f)
self.help_const_bb ((src1_data,),
expected_result, op)
def test_or_ss_2i (self):
src1_data = (1, 2, 3, 0x5004, 0x1150)
src2_data = (8, 2, 1 , 0x0508, 0x1105)
expected_result = (9, 2, 3, 0x550C, 0x1155)
op = gr.or_ss (2)
self.help_ss ((src1_data, src2_data),
expected_result, op, port_prefix='short_in_')
def test_or_ss_3i (self):
src1_data = (1, 2, 2, 3, 0x04, 0x50)
src2_data = (8, 2, 2, 1 , 0x08, 0x05)
src3_data = (8, 2, 1, 1 , 0x08, 0x05)
expected_result = (9, 2, 3, 3, 0x0C, 0x55)
op = gr.or_ss (3)
self.help_ss ((src1_data, src2_data, src3_data),
expected_result, op, port_prefix='short_in_')
def test_or_ss_4i (self):
src1_data = (1, 2, 2, 3 , 0x04, 0x50)
src2_data = (8, 2, 2, 1 , 0x08, 0x05)
src3_data = (8, 2, 1, 1 , 0x08, 0x05)
src4_data = (8, 2, 1, 5 , 0x3508, 0x4105)
expected_result = (9, 2, 3, 7, 0x350C, 0x4155)
op = gr.or_ss (4)
self.help_ss ((src1_data, src2_data, src3_data, src4_data),
expected_result, op, port_prefix='short_in_')
def test_or_bb_2i (self):
src1_data = (1, 2, 3, 0x04, 0x50)
src2_data = (8, 2, 1 , 0x08, 0x05)
expected_result = (9, 2, 3, 0x0C, 0x55)
op = gr.or_bb (2)
self.help_bb ((src1_data, src2_data),
expected_result, op, port_prefix='byte_in_')
def test_or_bb_3i (self):
src1_data = (1, 2, 2, 3, 0x04, 0x50)
src2_data = (8, 2, 2, 1 , 0x08, 0x05)
src3_data = (8, 2, 1, 1 , 0x08, 0x05)
expected_result = (9, 2, 3, 3, 0x0C, 0x55)
op = gr.or_bb (3)
self.help_bb ((src1_data, src2_data, src3_data),
expected_result, op, port_prefix='byte_in_')
def test_or_bb_4i (self):
src1_data = (1, 2, 2, 3 , 0x04, 0x50)
src2_data = (8, 2, 2, 1 , 0x08, 0x05)
src3_data = (8, 2, 1, 1 , 0x18, 0x05)
src4_data = (8, 2, 1, 5 , 0x58, 0x15)
expected_result = (9, 2, 3, 7, 0x5C, 0x55)
op = gr.or_bb (4)
self.help_bb ((src1_data, src2_data, src3_data, src4_data),
expected_result, op, port_prefix='byte_in_')
def test_or_ii_2i (self):
src1_data = (1, 2, 3, 0x50005004, 0x11001150)
src2_data = (8, 2, 1 , 0x05000508, 0x11001105)
expected_result = (9, 2, 3, 0x5500550C, 0x11001155)
op = gr.or_ii (2)
self.help_ii ((src1_data, src2_data),
expected_result, op, port_prefix='long_in_')
def test_or_ii_3i (self):
src1_data = (1, 2, 2, 3, 0x04, 0x50)
src2_data = (8, 2, 2, 1 , 0x08, 0x05)
src3_data = (8, 2, 1, 1 , 0x08, 0x05)
expected_result = (9, 2, 3, 3, 0x0C, 0x55)
op = gr.or_ii (3)
self.help_ii ((src1_data, src2_data, src3_data),
expected_result, op, port_prefix='long_in_')
def test_or_ii_4i (self):
src1_data = (1, 2, 2, 3, 0x04, 0x50)
src2_data = (8, 2, 2, 1 , 0x08, 0x05)
src3_data = (8, 2, 1, 1 , 0x08, 0x05)
src4_data = (8, 2, 1, 5 , 0x05000508, 0x11001105)
expected_result = (9, 2, 3, 7, 0x0500050C, 0x11001155)
op = gr.or_ii (4)
self.help_ii ((src1_data, src2_data, src3_data, src4_data),
expected_result, op, port_prefix='long_in_')
def test_not_ss (self):
src1_data = (1, 2, 3, 0x5004, 0x1150)
expected_result = (~1, ~2, ~3, ~0x5004, ~0x1150)
op = gr.not_ss ()
self.help_const_ss ((((src1_data),)),
expected_result, op)
def test_not_bb (self):
src1_data = (1, 2, 2, 3, 0x04, 0x50)
expected_result = (0xFE, 0xFD, 0xFD, 0xFC, 0xFB, 0xAF)
op = gr.not_bb ()
self.help_const_bb (((src1_data), ),
expected_result, op)
def test_not_ii (self):
src1_data = (1, 2, 3, 0x50005004, 0x11001150)
expected_result = (~1 , ~2, ~3, ~0x50005004, ~0x11001150)
op = gr.not_ii ()
self.help_const_ii (((src1_data),),
expected_result, op)
if __name__ == '__main__':
gr_unittest.run(test_boolean_operators, "test_boolean_operators.xml")
| gpl-3.0 | -8,088,243,925,242,206,000 | 40.413249 | 73 | 0.490402 | false |
glujan/lapka | lapka/models.py | 1 | 2407 | """Łapka's persistence layer.
Represent and operate on data stored in a persistence storage (like a database).
"""
import pickle
_pickle_path = 'fetched_data.pickle'
class AnimalBase:
"""Base class for Animals persistence."""
def __init__(self, **kwargs):
"""Create an animal instance."""
self.category = kwargs.pop('category', '')
self.name = kwargs.pop('name', '')
self.a_id = kwargs.pop('id', '')
self.since = kwargs.pop('since', '')
self.photos = kwargs.pop('photos', list())
self.description = kwargs.pop('description', list())
self.url = kwargs.pop('url', '')
self.place = kwargs.pop('place', '')
def to_dict(self):
"""Serialize an instance to a dictionary format."""
return {
'name': self.name,
'id': self.a_id,
'since': self.since,
'category': self.category,
'photos': self.photos,
'description': self.description,
'url': self.url,
'place': self.place,
}
def save(self):
"""Store an instance data in a persistance layer."""
raise NotImplementedError
def remove(self):
"""Remove an instance data from persistence layer."""
raise NotImplementedError
@classmethod
def find(cls, animal_id):
"""Find an animal by a given identifier."""
raise NotImplementedError
class AnimalDummy(AnimalBase):
"""Animal dummy model using a pickle storage."""
_data = []
@staticmethod
def _load():
try:
with open(_pickle_path, 'rb') as fp:
_data = pickle.load(fp)
except FileNotFoundError:
_data = []
return _data
def save(self):
"""Store an instance data in a persistance layer."""
self.remove()
self._data.append(self)
def remove(self):
"""Remove an instance data from persistence layer."""
try:
self._data.remove(self)
except ValueError:
pass
@classmethod
def find(cls, animal_id):
"""Find an animal by a given identifier."""
if not cls._data:
cls._data = cls._load()
try:
animal = next(filter(lambda a: a.a_id == animal_id, cls._data))
except StopIteration:
animal = None
return animal
| mit | 5,686,128,399,900,805,000 | 26.033708 | 80 | 0.551953 | false |
thatch45/console | curse.py | 1 | 1193 | '''
The Salt Console, this system is used to display a curses console that posts
event and job data as it happens from within Salt
'''
# Import python libs
import curses
# Import salt libs
import salt.utils.event
class SaltConsole(object):
'''
'''
def __init__(self, opts):
self.opts = opts
self.scr = curses.initscr()
self.event = salt.utils.event()
self.__setup_screen()
def __setup_screen(self):
'''
'''
# Prep Curses
curses.noecho()
curses.cbreak()
curses.curs_set(0)
# Prep the screen
self.scr.keypad(1)
self.scr.box()
self.scr.addstr(1, 1, 'Salt Console')
self.scr.addstr(1, 2, '='*34)
# Turn it all on!
self.scr.refresh()
def term(self):
'''
'''
curses.curs_set(1)
curses.nocbreak()
self.scr.keypad(0)
curses.echo()
curses.endwin()
def run(self):
'''
'''
while True:
try:
pass
except Exception:
self.term()
if __name__ == '__main__':
console = SaltConsole()
console.run()
| apache-2.0 | 1,075,680,271,039,162,400 | 19.220339 | 76 | 0.506287 | false |
ActiveState/code | recipes/Python/280653_Efficient_database_trees/recipe-280653.py | 1 | 6437 | #!/usr/bin/python
import MySQLdb
class Tree(object):
class Anon: pass
def __init__(self, conn):
self.conn = conn
def insert_siblings(self, names, siblingname):
self.conn.begin()
sibling = self.retrieve(siblingname)
cur = self.conn.cursor()
cur.execute("UPDATE tree SET lhs = lhs + %s WHERE lhs > %s", (len(names)*2, sibling.rhs))
cur.execute("UPDATE tree SET rhs = rhs + %s WHERE rhs > %s", (len(names)*2, sibling.rhs))
cur.executemany("INSERT INTO tree SET (lhs, rhs, parent, name) VALUES (%s, %s, %s, %s)",
[(sibling.rhs + 2*offset + 1,
sibling.rhs + 2*offset + 2,
sibling.parent, name) for offset, name in enumerate(names)])
self.conn.commit()
def insert_children(self, names, parentname):
self.conn.begin()
parent = self.retrieve(parentname)
cur = self.conn.cursor()
cur.execute("UPDATE tree SET lhs = lhs + %s WHERE lhs >= %s", (len(names)*2, parent.rhs))
cur.execute("UPDATE tree SET rhs = rhs + %s WHERE rhs >= %s", (len(names)*2, parent.rhs))
cur.executemany("INSERT INTO tree (lhs, rhs, parent, name) VALUES (%s, %s, %s, %s)",
[(parent.rhs + 2*offset,
parent.rhs + 2*offset + 1,
parent.ref, name) for offset, name in enumerate(names)])
self.conn.commit()
def delete(self, nodename):
self.conn.begin()
node = self.retrieve(nodename)
cur = self.conn.cursor()
cur.execute("DELETE FROM tree WHERE lhs BETWEEN %s AND %s", (node.lhs, node.rhs))
diff = node.rhs - node.lhs + 1;
cur.execute("UPDATE tree SET lhs = lhs - %s WHERE lhs > %s", (diff, node.rhs))
cur.execute("UPDATE tree SET rhs = rhs - %s WHERE rhs > %s", (diff, node.rhs))
self.conn.commit()
def create_root(self, name):
self.conn.begin()
cur = self.conn.cursor()
cur.execute("SELECT MAX(rhs) FROM tree");
maxrhs = cur.fetchall()[0][0]
if maxrhs is None: maxrhs = 1
else: maxrhs = int(maxrhs)
cur.execute("INSERT INTO tree (lhs, rhs, parent, name) VALUES (%s, %s, NULL, %s)", (maxrhs+1, maxrhs+2,name))
self.conn.commit()
def retrieve(self, name):
cur = self.conn.cursor()
cur.execute("SELECT ref, lhs, rhs, parent FROM tree WHERE name = %s", (name,))
result = cur.fetchall()[0]
retval = self.Anon()
retval.name = name
retval.ref = int(result[0])
retval.lhs = int(result[1])
retval.rhs = int(result[2])
if(result[3]):
retval.parent = int(result[3])
else:
retval.parent = None
return retval
def all_children_of(self, rootname):
cur = self.conn.cursor()
cur.execute(
"""SELECT t1.name FROM tree AS t1, tree AS t2
WHERE t1.lhs BETWEEN t2.lhs AND t2.rhs AND t1.name != t2.name AND t2.name = %s
ORDER BY t1.lhs""", (rootname,))
return [result[0] for result in cur.fetchall()]
def exact_children_of(self, rootname):
cur = self.conn.cursor()
cur.execute(
"""SELECT t1.name FROM tree AS t1, tree AS t2
WHERE t1.parent = t2.ref AND t2.name = %s
ORDER BY t1.lhs""", (rootname,))
return [result[0] for result in cur.fetchall()]
def all_siblings_of(self, siblingname):
cur = self.conn.cursor()
cur.execute(
"""SELECT t1.name FROM tree AS t1, tree AS t2
WHERE t1.parent = t2.parent AND t2.name = %s AND t1.name != %s
ORDER BY t1.lhs""", (siblingname, siblingname))
return [result[0] for result in cur.fetchall()]
def leaves_below(self, rootname):
cur = self.conn.cursor()
cur.execute(
"""SELECT t1.name FROM tree AS t1, tree AS t2
WHERE t1.lhs BETWEEN t2.lhs AND t2.rhs AND t1.lhs + 1 = t1.rhs AND t2.name = %s
ORDER BY t1.lhs""", (rootname,))
return [result[0] for result in cur.fetchall()]
def parent_of(self, childname):
cur = self.conn.cursor()
cur.execute(
"""SELECT t1.name FROM tree AS t1, tree AS t2
WHERE t1.ref = t2.parent AND t2.name = %s""", (childname,))
return cur.fetchall()[0][0]
def path_to(self, childname):
cur = self.conn.cursor()
cur.execute(
"""SELECT t1.name FROM tree AS t1, tree AS t2
WHERE t2.lhs BETWEEN t1.lhs AND t1.rhs AND t2.name = %s
ORDER BY t1.lhs""", (childname,))
return [result[0] for result in cur.fetchall()]
################
# Demo functions
################
def draw_tree(tree, rootname):
root = tree.retrieve(rootname)
cur = tree.conn.cursor()
cur.execute(
"""SELECT COUNT(t2.name) AS indentation, t1.name
FROM tree AS t1, tree AS t2
WHERE t1.lhs BETWEEN t2.lhs AND t2.rhs
AND t2.lhs BETWEEN %s AND %s
GROUP BY t1.name
ORDER BY t1.lhs""", (root.lhs, root.rhs))
for result in cur.fetchall():
print " " * (int(result[0])-1) + result[1]
def create_tree(tree, children_of, nameprefix = "", recursion_depth = 5):
names = [nameprefix + str(i) for i in xrange(recursion_depth)]
tree.insert_children(names, children_of)
for name in names:
create_tree(tree, name, name, recursion_depth-1)
if __name__ == "__main__":
import sys
conn = MySQLdb.Connect(user = sys.argv[1], passwd = sys.argv[2], db = sys.argv[3])
cur = conn.cursor()
try:
cur.execute("DROP TABLE tree")
except:
pass
cur.execute(
"""CREATE TABLE tree(ref int PRIMARY KEY AUTO_INCREMENT, parent int,
lhs int, rhs int, name varchar(255), UNIQUE INDEX(name)) TYPE=InnoDB""")
tree = Tree(conn)
tree.create_root("root")
create_tree(tree, "root")
draw_tree(tree, "root")
print "Number of children of root:", len(tree.all_children_of("root"))
print "Number of leaves below root:", len(tree.leaves_below("root"))
print "Exact children of root:", tree.exact_children_of("root")
print "All siblings of 1:", tree.all_siblings_of("1")
print "Parent of 11:", tree.parent_of("11")
print "Path to 1220:", tree.path_to("1220")
| mit | -1,672,295,614,083,886,300 | 37.54491 | 117 | 0.56284 | false |
auduny/chains | lib/chains/services/integra/__init__.py | 1 | 3831 | #!/usr/bin/python2
import sys
import time
from chains.service import Service
from chains.common import log
import serial
from . import iscp
class IntegraService(Service):
def onInit(self):
log('IntegraService init.')
self.model = self.config.get('model')
self.ser_dev = self.config.get('serial')
if self.ser_dev and self.model:
self.topics, self.cmds = iscp.model_cmds(self.model)
else:
log.error('Service needs serial service and model to work.')
sys.exit(1)
self.act_desc = {
'info': 'Integra/Onkyo model %s' % self.model,
'actions': []
}
for cmd in self.cmds:
for subcmd in cmd:
newcmd = {
'name': cmd + subcmd,
'info': self.cmds[cmd][subcmd]['description'],
'args': [
]
}
if 'type' in self.cmds[cmd][subcmd]:
param = self._type_desc(self.cmds[cmd][subcmd]['type'])
if param:
newcmd['args'].append(param)
self.act_desc['actions'].append(newcmd)
self.ser = serial.Serial(port=self.ser_dev,
baudrate=9600,
timeout=0.05, # 50ms reponse time according to spec
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
)
def onStart(self):
log('IntegraService starting.')
while not self._shutdown:
# TODO: check for self.command variable, and run command here
# rather than from runAction()?
line = self.ser.readline()
if line:
self.sendEvent(line[1:4], {'value': line[4:]})
# TODO: sleep probably not needed?
time.sleep(0.1)
def _write_cmd(self, command):
self.ser.write("!1" + command + '\r\n')
def _type_desc(self, tdesc):
# Create dict like this:
# {'info': 'Volume value.', 'default': None, 'required': True, 'key': 'volume', 'type': 'int'}
# TODO: Figure out info, get from main cmd?
arg_dict = {'info': '', 'default': None, 'required': True}
if tdesc['type'] == 'noarg':
return None
elif tdesc['type'] == 'string':
arg_dict.update({'type': 'string'})
elif tdesc['type'] == 'range':
arg_dict.update({'type': 'int(min=%d, max=%d)' % (tdesc['min'], tdesc['max']) })
else:
return None
return arg_dict
# Start of runAction command, from Stians pastebin example
def runAction(self, action, args):
if action[:3] in self.topics:
command = False
if action[3:] in self.cmds[action[:3]]:
if not self.cmds[action[:3]][action[3:]]['type']
command = action
else:
# At this point we know that action[3:] is a placeholder for args[0]
# Since no commands take more than one arg, always args[0]
command = iscp.check_cmd(action[:3], args[0], self.cmds[action[:3]][action[3:]]['type'])
elif action == 'raw':
command = self._write_cmd(args[0])
# TODO: this?
# elif action == 'multi':
# for arg in args:
# # do something with each arg
# pass
elif action == 'describe':
return self.act_desc
else:
raise NoSuchActionException(action)
if not command:
raise NoSuchActionException(action)
self._write_cmd(command)
| gpl-2.0 | -4,593,960,065,196,200,400 | 36.558824 | 108 | 0.497259 | false |
andrewraharjo/SDCND_Behavioral_Cloning | drive.py | 1 | 2467 | import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
image_array = image_array[65:135:4, 0:-1:4, 0]
image_array = image_array / 255 - 0.5
transformed_image_array = image_array.reshape(( 1,
image_array.shape[0],
image_array.shape[1],
1))
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = 0.2
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
model = model_from_json(json.load(jfile))
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app) | mit | -1,027,597,746,887,725,800 | 30.240506 | 112 | 0.685853 | false |
mvidner/cnetworkmanager | networkmanager/applet/service/connection.py | 1 | 1949 | import dbus
import os
from networkmanager.base import Bus
from networkmanager.applet.settings import Settings
# server analog of cConnection
class Connection(dbus.service.Object):
def __init__(self, opath, conmap):
assert isinstance(conmap, dict)
bus = Bus()
dbus.service.Object.__init__(self, bus, opath)
self.settings = Settings(conmap)
@dbus.service.method(dbus_interface='org.freedesktop.NetworkManagerSettings.Connection',
sender_keyword='sender',
in_signature='', out_signature='a{sa{sv}}')
def GetSettings(self, sender):
#print "Getting settings:", self. __dbus_object_path__
# return self.settings.ConMap()
# grr, censoring secrets makes NM complain!?
# bnc#479566#c3: Until I figure out how to make it work with
# censored secrets, only pass the settings to the same user.
sender_uid = self.connection.get_unix_user(sender)
if sender_uid != 0 and sender_uid != os.geteuid():
e = "User %u is not permitted to read the settings" % sender_uid
print e
raise dbus.exceptions.DBusException(e) # could do NM_SETTINGS_ERROR_* instead
return self.settings.conmap
@dbus.service.method(dbus_interface='org.freedesktop.NetworkManagerSettings.Connection.Secrets',
in_signature='sasb', out_signature='a{sa{sv}}')
def GetSecrets(self, tag, hints, ask):
# FIXME respect args
print "Getting secrets:", self.__dbus_object_path__
return self.settings.SecMap()
@dbus.service.method(dbus_interface='org.freedesktop.NetworkManagerSettings.Connection',
in_signature='', out_signature='s')
def ID(self):
return self.settings.ID()
def Ssid(self):
return self.settings.Ssid()
def isNet(self, net_name):
return self.settings.isNet(net_name)
| gpl-2.0 | 258,665,625,584,957,630 | 40.468085 | 100 | 0.63725 | false |
sapcc/monasca-persister | monasca_persister/repositories/influxdb/abstract_repository.py | 1 | 1908 | # (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import influxdb
from monasca_common.repositories.exceptions import InvalidUpdateException
from oslo_config import cfg
import six
from monasca_persister.monitoring import client
from monasca_persister.monitoring.metrics import INFLUXDB_INSERT_TIME
from monasca_persister.repositories import abstract_repository
STATSD_CLIENT = client.get_client()
STATSD_TIMER = STATSD_CLIENT.get_timer()
@six.add_metaclass(abc.ABCMeta)
class AbstractInfluxdbRepository(abstract_repository.AbstractRepository):
def __init__(self):
super(AbstractInfluxdbRepository, self).__init__()
self.conf = cfg.CONF
self._influxdb_client = influxdb.InfluxDBClient(
self.conf.influxdb.ip_address,
self.conf.influxdb.port,
self.conf.influxdb.user,
self.conf.influxdb.password,
self.conf.influxdb.database_name)
@STATSD_TIMER.timed(INFLUXDB_INSERT_TIME, sample_rate=0.1)
def write_batch(self, data_points):
try:
self._influxdb_client.write_points(data_points, 'ms')
except influxdb.exceptions.InfluxDBClientError as e:
if int(e.code) == 400:
raise InvalidUpdateException("InfluxDB insert failed: {}".format(repr(e)))
else:
raise
| apache-2.0 | 8,951,167,555,298,036,000 | 36.411765 | 90 | 0.718029 | false |
GinnyN/Team-Fortress-RPG-Generators | tf2rpg/tf2rpg/wsgi.py | 1 | 1134 | """
WSGI config for tf2rpg project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tf2rpg.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause | -913,370,350,527,203,500 | 39.5 | 79 | 0.799824 | false |
gouravshenoy/airavata | sandbox/simstream/example/openmm_example/openmm_streamer.py | 4 | 4719 | #
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from simstream import SimStream, DataReporter
import sys, json
class LogMonitor(object):
"""
A callable class that returns unprocessed lines in an open logfile.
Instance Variables:
logfile -- the path to the logfile to monitor
"""
def __init__(self, logfile):
"""
Set up a monitor for a logfile.
Arguments:
logfile -- the path to the logfile to monitor
"""
self.logfile = logfile
self._generator = None
self._version = sys.version_info[0]
def __call__(self):
"""
Get the next line from the logfile.
"""
if not self._generator:
self._generator = self._monitor_logfile()
lines = []
line = self._next()
while line is not None:
lines.append(line)
line = self._next()
print(lines)
return lines
def _monitor_logfile(self):
"""
Yield the next set of lines from the logfile.
"""
try:
# Make the file persistent for the lifetime of the generator
with open(self.logfile) as f:
f.seek(0,2) # Move to the end of the file
while True:
# Get the next line or indicate the end of the file
line = f.readline()
if line:
yield line.strip()
else:
yield None
except EnvironmentError as e:
# Handle I/O exceptions in an OS-agnostic way
print("Error: Could not open file %s: %s" % (self.logfile, e))
def _next(self):
"""
Python 2/3 agnostic retrieval of generator values.
"""
return self._generator.__next__() if self._version == 3 else self._generator.next()
def get_relevant_log_lines(log_lines):
import re
relevant_lines = []
pattern = r'^\[.+\]'
for line in log_lines:
if re.match(pattern, line) is not None:
relevant_lines.append(line)
return relevant_lines
def calculate_rmsd(trajectory, topology, reference):
import mdtraj
traj = mdtraj.load(trajectory, top=topology)
ref = mdtraj.load(reference)
rmsd = mdtraj.rmsd(traj, ref)
data = {"step": str(traj.n_frames), "rmsd": str(rmsd[-1])}
return data
settings = {}
with open("../settings.json", 'r') as f:
settings = json.load(f)
if __name__ == "__main__":
logfile = sys.argv[1]
trajectory = sys.argv[2]
topology = sys.argv[3]
reference = sys.argv[4]
open(logfile, 'a').close()
open(trajectory, 'a').close()
log_reporter = DataReporter()
log_reporter.add_collector("logger",
LogMonitor(logfile),
settings["url"],
settings["exchange"],
limit=10,
interval=2,
exchange_type="direct", # settings["exchange_type"],
postprocessor=get_relevant_log_lines)
log_reporter.start_streaming("logger", "openmm.log")
rmsd_reporter = DataReporter()
rmsd_reporter.add_collector("rmsd",
calculate_rmsd,
settings["url"],
settings["exchange"],
limit=1,
interval=2,
exchange_type="direct", # settings["exchange_type"],
callback_args=[trajectory, topology, reference])
rmsd_reporter.start_streaming("rmsd", "openmm.rmsd")
streamer = SimStream(config=settings, reporters={"log_reporter": log_reporter, "rmsd_reporter": rmsd_reporter})
streamer.setup()
try:
streamer.start()
except KeyboardInterrupt:
streamer.stop()
| apache-2.0 | -140,607,918,609,995,520 | 30.46 | 115 | 0.562195 | false |
HackBulgaria/Odin | students/south_migrations/0013_auto__chg_field_user_avatar__add_unique_user_email__add_field_courseas.py | 1 | 7890 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'User.avatar'
db.alter_column(u'students_user', 'avatar', self.gf('django_resized.forms.ResizedImageField')(default='', max_length=100, max_width=200))
# Adding unique constraint on 'User', fields ['email']
db.create_unique(u'students_user', ['email'])
# Adding field 'CourseAssignment.group_time'
db.add_column(u'students_courseassignment', 'group_time',
self.gf('django.db.models.fields.SmallIntegerField')(default=1),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'User', fields ['email']
db.delete_unique(u'students_user', ['email'])
# Changing field 'User.avatar'
db.alter_column(u'students_user', 'avatar', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Deleting field 'CourseAssignment.group_time'
db.delete_column(u'students_courseassignment', 'group_time')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'courses.course': {
'Meta': {'object_name': 'Course'},
'SEO_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'SEO_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'application_until': ('django.db.models.fields.DateField', [], {}),
'applications_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {}),
'git_repository': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'next_season_mail_list': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'show_on_index': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80'})
},
u'students.courseassignment': {
'Meta': {'unique_together': "(('user', 'course'),)", 'object_name': 'CourseAssignment'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['courses.Course']"}),
'group_time': ('django.db.models.fields.SmallIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.PositiveIntegerField', [], {'default': "'0'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.User']"})
},
u'students.user': {
'Meta': {'object_name': 'User'},
'avatar': ('django_resized.forms.ResizedImageField', [], {'max_length': '100', 'max_width': '200'}),
'courses': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['courses.Course']", 'through': u"orm['students.CourseAssignment']", 'symmetrical': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'faculty_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'github_account': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'linkedin_account': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'students.usernote': {
'Meta': {'object_name': 'UserNote'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.CourseAssignment']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['students.User']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['students'] | agpl-3.0 | -788,657,566,017,647,900 | 70.09009 | 195 | 0.566413 | false |
dan-cristian/haiot | main/logger_helper.py | 1 | 4021 | import socket
import logging
import inspect
import threading
from datetime import datetime
__author__ = 'Dan Cristian <[email protected]>'
class L:
def __init__(self):
pass
l = None
# KEY setting, this filters out message priority from being logged
LOGGING_LEVEL = logging.INFO
LOG_FILE = None
# logging output will go to syslog
LOG_TO_SYSLOG = False
# on systems without remote logging access like openshift use transport to perform logging by a proxy node
LOG_TO_TRANSPORT = False
# this logger is used to log remote logs messages using a different formatter
remote_logger = None
# this is to enable remote syslog like papertrail
SYSLOG_ADDRESS = None
SYSLOG_PORT = None
# reduce amount of logging when running in LIVE prod
RUN_IN_LIVE = False
@staticmethod
def init_logging():
import logging.handlers
class ContextFilter(logging.Filter):
hostname = socket.gethostname()
def filter(self, record):
record.hostname = ContextFilter.hostname
return True
# global LOGGING_LEVEL, LOG_FILE, LOG_TO_SYSLOG, SYSLOG_ADDRESS, SYSLOG_PORT, RUN_IN_LIVE
# global logger, remote_logger
# set logging general formatting
logging.basicConfig(format='%(asctime)s haiot %(levelname)s %(thread)d %(module)s:%(funcName)s %(message)s')
# %(threadName)s
L.l = logging.getLogger('haiot-' + socket.gethostname())
L.remote_logger = logging.getLogger('haiot-remote-' + socket.gethostname())
L.l.setLevel(L.LOGGING_LEVEL)
L.remote_logger.setLevel(L.LOGGING_LEVEL)
# init logger to cloud papertrail services
if (L.SYSLOG_ADDRESS is not None) and (L.SYSLOG_PORT is not None):
filter_log = ContextFilter()
L.l.addFilter(filter_log)
L.remote_logger.addFilter(filter_log)
syslog_papertrail = logging.handlers.SysLogHandler(address=(L.SYSLOG_ADDRESS, int(L.SYSLOG_PORT)))
pap_formatter = logging.Formatter(
'%(asctime)s %(hostname)s haiot %(levelname)s %(module)s:%(funcName)s %(message)s',
datefmt='%Y-%m-%dT%H:%M:%S')
syslog_papertrail.setFormatter(pap_formatter)
L.l.addHandler(syslog_papertrail)
remote_syslog_papertrail = logging.handlers.SysLogHandler(
address=(L.SYSLOG_ADDRESS, int(L.SYSLOG_PORT)))
remote_pap_formatter = logging.Formatter('')
remote_syslog_papertrail.setFormatter(remote_pap_formatter)
L.remote_logger.addHandler(remote_syslog_papertrail)
L.l.info('Initialised syslog with {}:{}'.format(L.SYSLOG_ADDRESS, L.SYSLOG_PORT))
# log to syslog standard file
if L.LOG_TO_SYSLOG:
try:
handler = logging.handlers.SysLogHandler(address='/dev/log')
L.l.addHandler(handler)
L.l.info('Syslog program started at {}'.format(socket.gethostname()))
except Exception as ex:
try:
ntl = logging.handlers.NTEventLogHandler(appname='haiot')
L.l.addHandler(ntl)
except Exception as ex:
print('Unable to init syslog handler err={}'.format(ex))
else:
if L.LOG_FILE is not None:
file_handler = logging.handlers.RotatingFileHandler(L.LOG_FILE, maxBytes=1024 * 1024 * 1, backupCount=3)
L.l.addHandler(file_handler)
L.l.info('Logging level is {}'.format(L.LOGGING_LEVEL))
# todo: remove annoying info messages, but only for few cases, efect unclear
logging.getLogger("requests").setLevel(logging.INFO)
# propagate False stops log writes to standard output. Set to True to show log in Pycharm
if L.RUN_IN_LIVE:
L.l.info('Logger is set to live mode, disabling log propagation')
L.l.propagate = not L.RUN_IN_LIVE
| gpl-2.0 | 1,799,262,671,442,808,000 | 40.885417 | 120 | 0.626212 | false |
lucaswiman/parsimonious | parsimonious/grammar.py | 1 | 19426 | """A convenience which constructs expression trees from an easy-to-read syntax
Use this unless you have a compelling reason not to; it performs some
optimizations that would be tedious to do when constructing an expression tree
by hand.
"""
from collections import OrderedDict
from inspect import isfunction, ismethod
from six import (text_type, itervalues, iteritems, python_2_unicode_compatible, PY2)
from parsimonious.exceptions import BadGrammar, UndefinedLabel
from parsimonious.expressions import (Literal, Regex, Sequence, OneOf,
Lookahead, Optional, ZeroOrMore, OneOrMore, Not, TokenMatcher,
expression)
from parsimonious.nodes import NodeVisitor
from parsimonious.utils import evaluate_string
@python_2_unicode_compatible
class Grammar(OrderedDict):
"""A collection of rules that describe a language
You can start parsing from the default rule by calling ``parse()``
directly on the ``Grammar`` object::
g = Grammar('''
polite_greeting = greeting ", my good " title
greeting = "Hi" / "Hello"
title = "madam" / "sir"
''')
g.parse('Hello, my good sir')
Or start parsing from any of the other rules; you can pull them out of the
grammar as if it were a dictionary::
g['title'].parse('sir')
You could also just construct a bunch of ``Expression`` objects yourself
and stitch them together into a language, but using a ``Grammar`` has some
important advantages:
* Languages are much easier to define in the nice syntax it provides.
* Circular references aren't a pain.
* It does all kinds of whizzy space- and time-saving optimizations, like
factoring up repeated subexpressions into a single object, which should
increase cache hit ratio. [Is this implemented yet?]
"""
def __init__(self, rules='', **more_rules):
"""Construct a grammar.
:arg rules: A string of production rules, one per line.
:arg default_rule: The name of the rule invoked when you call
:meth:`parse()` or :meth:`match()` on the grammar. Defaults to the
first rule. Falls back to None if there are no string-based rules
in this grammar.
:arg more_rules: Additional kwargs whose names are rule names and
values are Expressions or custom-coded callables which accomplish
things the built-in rule syntax cannot. These take precedence over
``rules`` in case of naming conflicts.
"""
decorated_custom_rules = {
k: (expression(v, k, self) if isfunction(v) or ismethod(v) else v)
for k, v in iteritems(more_rules)}
exprs, first = self._expressions_from_rules(rules, decorated_custom_rules)
super(Grammar, self).__init__(exprs.items())
self.default_rule = first # may be None
def default(self, rule_name):
"""Return a new Grammar whose :term:`default rule` is ``rule_name``."""
new = self._copy()
new.default_rule = new[rule_name]
return new
def _copy(self):
"""Return a shallow copy of myself.
Deep is unnecessary, since Expression trees are immutable. Subgrammars
recreate all the Expressions from scratch, and AbstractGrammars have
no Expressions.
"""
new = Grammar.__new__(Grammar)
super(Grammar, new).__init__(iteritems(self))
new.default_rule = self.default_rule
return new
def _expressions_from_rules(self, rules, custom_rules):
"""Return a 2-tuple: a dict of rule names pointing to their
expressions, and then the first rule.
It's a web of expressions, all referencing each other. Typically,
there's a single root to the web of references, and that root is the
starting symbol for parsing, but there's nothing saying you can't have
multiple roots.
:arg custom_rules: A map of rule names to custom-coded rules:
Expressions
"""
tree = rule_grammar.parse(rules)
return RuleVisitor(custom_rules).visit(tree)
def parse(self, text, pos=0):
"""Parse some text with the :term:`default rule`.
:arg pos: The index at which to start parsing
"""
self._check_default_rule()
return self.default_rule.parse(text, pos=pos)
def match(self, text, pos=0):
"""Parse some text with the :term:`default rule` but not necessarily
all the way to the end.
:arg pos: The index at which to start parsing
"""
self._check_default_rule()
return self.default_rule.match(text, pos=pos)
def _check_default_rule(self):
"""Raise RuntimeError if there is no default rule defined."""
if not self.default_rule:
raise RuntimeError("Can't call parse() on a Grammar that has no "
"default rule. Choose a specific rule instead, "
"like some_grammar['some_rule'].parse(...).")
def __str__(self):
"""Return a rule string that, when passed to the constructor, would
reconstitute the grammar."""
exprs = [self.default_rule] if self.default_rule else []
exprs.extend(expr for expr in itervalues(self) if
expr is not self.default_rule)
return '\n'.join(expr.as_rule() for expr in exprs)
def __repr__(self):
"""Return an expression that will reconstitute the grammar."""
codec = 'string_escape' if PY2 else 'unicode_escape'
return "Grammar('%s')" % str(self).encode(codec)
class TokenGrammar(Grammar):
"""A Grammar which takes a list of pre-lexed tokens instead of text
This is useful if you want to do the lexing yourself, as a separate pass:
for example, to implement indentation-based languages.
"""
def _expressions_from_rules(self, rules, custom_rules):
tree = rule_grammar.parse(rules)
return TokenRuleVisitor(custom_rules).visit(tree)
class BootstrappingGrammar(Grammar):
"""The grammar used to recognize the textual rules that describe other
grammars
This grammar gets its start from some hard-coded Expressions and claws its
way from there to an expression tree that describes how to parse the
grammar description syntax.
"""
def _expressions_from_rules(self, rule_syntax, custom_rules):
"""Return the rules for parsing the grammar definition syntax.
Return a 2-tuple: a dict of rule names pointing to their expressions,
and then the top-level expression for the first rule.
"""
# Hard-code enough of the rules to parse the grammar that describes the
# grammar description language, to bootstrap:
comment = Regex(r'#[^\r\n]*', name='comment')
meaninglessness = OneOf(Regex(r'\s+'), comment, name='meaninglessness')
_ = ZeroOrMore(meaninglessness, name='_')
equals = Sequence(Literal('='), _, name='equals')
label = Sequence(Regex(r'[a-zA-Z_][a-zA-Z_0-9]*'), _, name='label')
reference = Sequence(label, Not(equals), name='reference')
quantifier = Sequence(Regex(r'[*+?]'), _, name='quantifier')
# This pattern supports empty literals. TODO: A problem?
spaceless_literal = Regex(r'u?r?"[^"\\]*(?:\\.[^"\\]*)*"',
ignore_case=True,
dot_all=True,
name='spaceless_literal')
literal = Sequence(spaceless_literal, _, name='literal')
regex = Sequence(Literal('~'),
literal,
Regex('[ilmsux]*', ignore_case=True),
_,
name='regex')
atom = OneOf(reference, literal, regex, name='atom')
quantified = Sequence(atom, quantifier, name='quantified')
term = OneOf(quantified, atom, name='term')
not_term = Sequence(Literal('!'), term, _, name='not_term')
term.members = (not_term,) + term.members
sequence = Sequence(term, OneOrMore(term), name='sequence')
or_term = Sequence(Literal('/'), _, term, name='or_term')
ored = Sequence(term, OneOrMore(or_term), name='ored')
expression = OneOf(ored, sequence, term, name='expression')
rule = Sequence(label, equals, expression, name='rule')
rules = Sequence(_, OneOrMore(rule), name='rules')
# Use those hard-coded rules to parse the (more extensive) rule syntax.
# (For example, unless I start using parentheses in the rule language
# definition itself, I should never have to hard-code expressions for
# those above.)
rule_tree = rules.parse(rule_syntax)
# Turn the parse tree into a map of expressions:
return RuleVisitor().visit(rule_tree)
# The grammar for parsing PEG grammar definitions:
# This is a nice, simple grammar. We may someday add to it, but it's a safe bet
# that the future will always be a superset of this.
rule_syntax = (r'''
# Ignored things (represented by _) are typically hung off the end of the
# leafmost kinds of nodes. Literals like "/" count as leaves.
rules = _ rule*
rule = label equals expression
equals = "=" _
literal = spaceless_literal _
# So you can't spell a regex like `~"..." ilm`:
spaceless_literal = ~"u?r?\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\""is /
~"u?r?'[^'\\\\]*(?:\\\\.[^'\\\\]*)*'"is
expression = ored / sequence / term
or_term = "/" _ term
ored = term or_term+
sequence = term term+
not_term = "!" term _
lookahead_term = "&" term _
term = not_term / lookahead_term / quantified / atom
quantified = atom quantifier
atom = reference / literal / regex / parenthesized
regex = "~" spaceless_literal ~"[ilmsux]*"i _
parenthesized = "(" _ expression ")" _
quantifier = ~"[*+?]" _
reference = label !equals
# A subsequent equal sign is the only thing that distinguishes a label
# (which begins a new rule) from a reference (which is just a pointer to a
# rule defined somewhere else):
label = ~"[a-zA-Z_][a-zA-Z_0-9]*" _
# _ = ~r"\s*(?:#[^\r\n]*)?\s*"
_ = meaninglessness*
meaninglessness = ~r"\s+" / comment
comment = ~r"#[^\r\n]*"
''')
class LazyReference(text_type):
"""A lazy reference to a rule, which we resolve after grokking all the
rules"""
name = u''
# Just for debugging:
def _as_rhs(self):
return u'<LazyReference to %s>' % self
class RuleVisitor(NodeVisitor):
"""Turns a parse tree of a grammar definition into a map of ``Expression``
objects
This is the magic piece that breathes life into a parsed bunch of parse
rules, allowing them to go forth and parse other things.
"""
quantifier_classes = {'?': Optional, '*': ZeroOrMore, '+': OneOrMore}
visit_expression = visit_term = visit_atom = NodeVisitor.lift_child
def __init__(self, custom_rules=None):
"""Construct.
:arg custom_rules: A dict of {rule name: expression} holding custom
rules which will take precedence over the others
"""
self.custom_rules = custom_rules or {}
def visit_parenthesized(self, node, parenthesized):
"""Treat a parenthesized subexpression as just its contents.
Its position in the tree suffices to maintain its grouping semantics.
"""
left_paren, _, expression, right_paren, _ = parenthesized
return expression
def visit_quantifier(self, node, quantifier):
"""Turn a quantifier into just its symbol-matching node."""
symbol, _ = quantifier
return symbol
def visit_quantified(self, node, quantified):
atom, quantifier = quantified
return self.quantifier_classes[quantifier.text](atom)
def visit_lookahead_term(self, node, lookahead_term):
ampersand, term, _ = lookahead_term
return Lookahead(term)
def visit_not_term(self, node, not_term):
exclamation, term, _ = not_term
return Not(term)
def visit_rule(self, node, rule):
"""Assign a name to the Expression and return it."""
label, equals, expression = rule
expression.name = label # Assign a name to the expr.
return expression
def visit_sequence(self, node, sequence):
"""A parsed Sequence looks like [term node, OneOrMore node of
``another_term``s]. Flatten it out."""
term, other_terms = sequence
return Sequence(term, *other_terms)
def visit_ored(self, node, ored):
first_term, other_terms = ored
return OneOf(first_term, *other_terms)
def visit_or_term(self, node, or_term):
"""Return just the term from an ``or_term``.
We already know it's going to be ored, from the containing ``ored``.
"""
slash, _, term = or_term
return term
def visit_label(self, node, label):
"""Turn a label into a unicode string."""
name, _ = label
return name.text
def visit_reference(self, node, reference):
"""Stick a :class:`LazyReference` in the tree as a placeholder.
We resolve them all later.
"""
label, not_equals = reference
return LazyReference(label)
def visit_regex(self, node, regex):
"""Return a ``Regex`` expression."""
tilde, literal, flags, _ = regex
flags = flags.text.upper()
pattern = literal.literal # Pull the string back out of the Literal
# object.
return Regex(pattern, ignore_case='I' in flags,
locale='L' in flags,
multiline='M' in flags,
dot_all='S' in flags,
unicode='U' in flags,
verbose='X' in flags)
def visit_spaceless_literal(self, spaceless_literal, visited_children):
"""Turn a string literal into a ``Literal`` that recognizes it."""
return Literal(evaluate_string(spaceless_literal.text))
def visit_literal(self, node, literal):
"""Pick just the literal out of a literal-and-junk combo."""
spaceless_literal, _ = literal
return spaceless_literal
def generic_visit(self, node, visited_children):
"""Replace childbearing nodes with a list of their children; keep
others untouched.
For our case, if a node has children, only the children are important.
Otherwise, keep the node around for (for example) the flags of the
regex rule. Most of these kept-around nodes are subsequently thrown
away by the other visitor methods.
We can't simply hang the visited children off the original node; that
would be disastrous if the node occurred in more than one place in the
tree.
"""
return visited_children or node # should semantically be a tuple
def _resolve_refs(self, rule_map, expr, done):
"""Return an expression with all its lazy references recursively
resolved.
Resolve any lazy references in the expression ``expr``, recursing into
all subexpressions.
:arg done: The set of Expressions that have already been or are
currently being resolved, to ward off redundant work and prevent
infinite recursion for circular refs
"""
if isinstance(expr, LazyReference):
label = text_type(expr)
try:
reffed_expr = rule_map[label]
except KeyError:
raise UndefinedLabel(expr)
return self._resolve_refs(rule_map, reffed_expr, done)
else:
if getattr(expr, 'members', ()) and expr not in done:
# Prevents infinite recursion for circular refs. At worst, one
# of `expr.members` can refer back to `expr`, but it can't go
# any farther.
done.add(expr)
expr.members = [self._resolve_refs(rule_map, member, done)
for member in expr.members]
return expr
def visit_rules(self, node, rules_list):
"""Collate all the rules into a map. Return (map, default rule).
The default rule is the first one. Or, if you have more than one rule
of that name, it's the last-occurring rule of that name. (This lets you
override the default rule when you extend a grammar.) If there are no
string-based rules, the default rule is None, because the custom rules,
due to being kwarg-based, are unordered.
"""
_, rules = rules_list
# Map each rule's name to its Expression. Later rules of the same name
# override earlier ones. This lets us define rules multiple times and
# have the last declaration win, so you can extend grammars by
# concatenation.
rule_map = OrderedDict((expr.name, expr) for expr in rules)
# And custom rules override string-based rules. This is the least
# surprising choice when you compare the dict constructor:
# dict({'x': 5}, x=6).
rule_map.update(self.custom_rules)
# Resolve references. This tolerates forward references.
done = set()
rule_map = OrderedDict((expr.name, self._resolve_refs(rule_map, expr, done))
for expr in itervalues(rule_map))
# isinstance() is a temporary hack around the fact that * rules don't
# always get transformed into lists by NodeVisitor. We should fix that;
# it's surprising and requires writing lame branches like this.
return rule_map, (rule_map[rules[0].name]
if isinstance(rules, list) and rules else None)
class TokenRuleVisitor(RuleVisitor):
"""A visitor which builds expression trees meant to work on sequences of
pre-lexed tokens rather than strings"""
def visit_spaceless_literal(self, spaceless_literal, visited_children):
"""Turn a string literal into a ``TokenMatcher`` that matches
``Token`` objects by their ``type`` attributes."""
return TokenMatcher(evaluate_string(spaceless_literal.text))
def visit_regex(self, node, regex):
tilde, literal, flags, _ = regex
raise BadGrammar('Regexes do not make sense in TokenGrammars, since '
'TokenGrammars operate on pre-lexed tokens rather '
'than characters.')
# Bootstrap to level 1...
rule_grammar = BootstrappingGrammar(rule_syntax)
# ...and then to level 2. This establishes that the node tree of our rule
# syntax is built by the same machinery that will build trees of our users'
# grammars. And the correctness of that tree is tested, indirectly, in
# test_grammar.
rule_grammar = Grammar(rule_syntax)
# TODO: Teach Expression trees how to spit out Python representations of
# themselves. Then we can just paste that in above, and we won't have to
# bootstrap on import. Though it'll be a little less DRY. [Ah, but this is not
# so clean, because it would have to output multiple statements to get multiple
# refs to a single expression hooked up.]
| mit | 2,163,358,077,710,063,600 | 38.564155 | 84 | 0.621435 | false |
Cloudzero/cloudzero-reactor-aws | test/unit/aws/test_unit_cloudtrail.py | 1 | 4413 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-present, CloudZero, Inc. All rights reserved.
# Licensed under the BSD-style license. See LICENSE file in the project root for full license information.
import pytest
from reactor.aws.cloudtrail import traverse_map, CloudTrailEvent
def test_traverse_map():
d = {'x': {'x': 7},
'y': 8,
't': [{'t': 10}, {'b': 11}],
'z': {'z': {'z': 9}}}
m = {'x': {'x': 'x_field'},
'y': 'y_field',
't': {'t': 't_field'},
'z': {'z': {'z': 'z_field'}}}
realish_data = {
'eventversion': '1.06',
'useridentity': {
'type': 'AWSService',
'invokedBy': 'lambda.amazonaws.com'
},
'eventtime': '2018-05-19T16:15:32Z',
'eventsource': 'lambda.amazonaws.com',
'eventname': 'Invoke',
'awsregion': 'us-east-1',
'sourceipaddress': 'lambda.amazonaws.com',
'useragent': 'lambda.amazonaws.com',
'requestparameters': {
'functionName': 'arn:aws:lambda:us-east-1:123456789012:function:function-name',
'contentType': None,
'logType': None
},
'responseelements': None,
'additionaleventdata': {
'functionVersion': 'arn:aws:lambda:us-east-1:1234567890123:function:function-name:$LATEST'
},
'requestid': '6a733515-cc52-412b-bb7d-70766f30f5d0',
'eventid': 'a250f917-32ef-4174-ab0e-49d9fc955243',
'readonly': False,
'resources': [
{
'accountId': '1234567890123',
'type': 'AWS::Lambda::Function',
'ARN': 'arn:aws:lambda:us-east-1:998146006915:function:function-name'
},
{
'accountId': '1234567890123',
'type': 'AWS::DynamoDB::Stream',
'ARN': 'arn:aws:dynamodb:us-east-1:123456789012:table/table_name/stream/2018-02-19T05:56:02.100'
}
],
'eventtype': 'AwsApiCall',
'managementevent': False,
'recipientaccountid': '1234567890123',
'sharedeventid': '8b9f0853-6518-47c8-a8da-b648fc27d528'
}
realish_map = {
'resources': 'res_field',
'requestparameters': {
'functionName': 'arn:aws:lambda:us-east-1:123456789012:function:function-name'
}
}
res = traverse_map(d, m)
assert res['x_field'] == 7
assert res['y_field'] == 8
assert res['z_field'] == 9
assert res['t_field'] == 10
m = {'q': 'q_field'} # mapping for field not present in obj
res = traverse_map(d, m)
assert res == {}
# specifying a mapping of wrong shape for field that does exist.
# traverse_map doesn't support mapping a nested obj to a cz field.
m = {'x': 'wrong_shape'}
with pytest.raises(AttributeError):
traverse_map(d, m)
res = traverse_map(realish_data, realish_map)
assert res['res_field'] == [{'accountId': '1234567890123', 'type': 'AWS::Lambda::Function',
'ARN': 'arn:aws:lambda:us-east-1:998146006915:function:function-name'},
{'accountId': '1234567890123', 'type': 'AWS::DynamoDB::Stream',
'ARN': 'arn:aws:dynamodb:us-east-1:123456789012:table/'
'table_name/stream/2018-02-19T05:56:02.100'}]
def test_CloudTrailEvent():
raw_event = {'eventName': 'name',
'eventType': 'type',
'userIdentity': {'type': 'identity_type'}}
cte = CloudTrailEvent(raw_event)
assert cte.identity_type == 'identity_type'
assert cte.name == 'name'
assert cte.type == 'type'
# path is <identity type>.<event name>
path = cte.path
peel_a_level = CloudTrailEvent.peel_path(path)
peel_two_levels = CloudTrailEvent.peel_path(peel_a_level)
peel_three_levels = CloudTrailEvent.peel_path(peel_two_levels)
assert peel_a_level == 'identity_type'
assert peel_two_levels == 'ROOT'
assert peel_three_levels is None
# path may sometimes be <event name> if there is no
# event identity type.
raw_event_no_id_type = {'eventName': 'name',
'eventType': 'type',
'userIdentity': {'no_field': 'named_type'}}
cte_no_id_type = CloudTrailEvent(raw_event_no_id_type)
assert cte_no_id_type.path == 'name'
| bsd-3-clause | 2,382,189,271,826,306,000 | 35.471074 | 112 | 0.554725 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.