max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
qlearn.py | vyom1911/Q_learning_game | 1 | 6631651 | <reponame>vyom1911/Q_learning_game<gh_stars>1-10
#!/usr/bin/env python
from __future__ import print_function
import argparse
import skimage as skimage
from skimage import transform, color, exposure
from skimage.transform import rotate
from skimage.viewer import ImageViewer
import sys
sys.path.append("game/")
import wrapped_flappy_bird as game
import random
import numpy as np
from collections import deque
import json
from keras import initializers
from keras.initializers import normal, identity
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD , Adam
import tensorflow as tf
GAME = 'bird' # the name of the game being played for log files
CONFIG = 'nothreshold'
ACTIONS = 2 # number of valid actions
GAMMA = 0.99 # decay rate of past observations
OBSERVATION = 3200. # timesteps to observe before training
EXPLORE = 3000000. # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # final value of epsilon
INITIAL_EPSILON = 0.1 # starting value of epsilon
REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH = 32 # size of minibatch
FRAME_PER_ACTION = 1
LEARNING_RATE = 1e-4
img_rows , img_cols = 80, 80
#Convert image into Black and white
img_channels = 4 #We stack 4 frames
def buildmodel():
print("Now we build the model")
model = Sequential()
model.add(Convolution2D(32, 8, 8, subsample=(4, 4), border_mode='same',input_shape=(img_rows,img_cols,img_channels))) #80*80*4
model.add(Activation('relu'))
model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(2))
adam = Adam(lr=LEARNING_RATE)
model.compile(loss='mse',optimizer=adam)
print("We finish building the model")
return model
def trainNetwork(model,args):
# open up a game state to communicate with emulator
game_state = game.GameState()
# store the previous observations in replay memory
D = deque()
# get the first state by doing nothing and preprocess the image to 80x80x4
do_nothing = np.zeros(ACTIONS)
do_nothing[0] = 1
x_t, r_0, terminal = game_state.frame_step(do_nothing)
x_t = skimage.color.rgb2gray(x_t)
x_t = skimage.transform.resize(x_t,(80,80))
x_t = skimage.exposure.rescale_intensity(x_t,out_range=(0,255))
s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)
#print (s_t.shape)
#In Keras, need to reshape
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) #1*80*80*4
if args['mode'] == 'Run':
OBSERVE = 999999999 #We keep observe, never train
epsilon = FINAL_EPSILON
print ("Now we load weight")
model.load_weights("model.h5")
adam = Adam(lr=LEARNING_RATE)
model.compile(loss='mse',optimizer=adam)
print ("Weight load successfully")
else: #We go to training mode
OBSERVE = OBSERVATION
epsilon = INITIAL_EPSILON
t = 0
while (True):
loss = 0
Q_sa = 0
action_index = 0
r_t = 0
a_t = np.zeros([ACTIONS])
#choose an action epsilon greedy
if t % FRAME_PER_ACTION == 0:
if random.random() <= epsilon:
print("----------Random Action----------")
action_index = random.randrange(ACTIONS)
a_t[action_index] = 1
else:
q = model.predict(s_t) #input a stack of 4 images, get the prediction
max_Q = np.argmax(q)
action_index = max_Q
a_t[max_Q] = 1
#We reduced the epsilon gradually
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
#run the selected action and observed next state and reward
x_t1_colored, r_t, terminal = game_state.frame_step(a_t)
x_t1 = skimage.color.rgb2gray(x_t1_colored)
x_t1 = skimage.transform.resize(x_t1,(80,80))
x_t1 = skimage.exposure.rescale_intensity(x_t1, out_range=(0, 255))
x_t1 = x_t1.reshape(1, x_t1.shape[0], x_t1.shape[1], 1) #1x80x80x1
s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3)
# store the transition in D
D.append((s_t, action_index, r_t, s_t1, terminal))
if len(D) > REPLAY_MEMORY:
D.popleft()
#only train if done observing
if t > OBSERVE:
#sample a minibatch to train on
minibatch = random.sample(D, BATCH)
inputs = np.zeros((BATCH, s_t.shape[1], s_t.shape[2], s_t.shape[3])) #32, 80, 80, 4
print (inputs.shape)
targets = np.zeros((inputs.shape[0], ACTIONS)) #32, 2
#Now we do the experience replay
for i in range(0, len(minibatch)):
state_t = minibatch[i][0]
action_t = minibatch[i][1] #This is action index
reward_t = minibatch[i][2]
state_t1 = minibatch[i][3]
terminal = minibatch[i][4]
# if terminated, only equals reward
inputs[i:i + 1] = state_t #I saved down s_t
targets[i] = model.predict(state_t) # Hitting each buttom probability
Q_sa = model.predict(state_t1)
if terminal:
targets[i, action_t] = reward_t
else:
targets[i, action_t] = reward_t + GAMMA * np.max(Q_sa)
# targets2 = normalize(targets)
loss += model.train_on_batch(inputs, targets)
s_t = s_t1
t = t + 1
# save progress every 10000 iterations
if t % 1000 == 0:
print("Now we save model")
model.save_weights("model.h5", overwrite=True)
with open("model.json", "w") as outfile:
json.dump(model.to_json(), outfile)
# print info
state = ""
if t <= OBSERVE:
state = "observe"
elif t > OBSERVE and t <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
print("TIMESTEP", t, "/ STATE", state, \
"/ EPSILON", epsilon, "/ ACTION", action_index, "/ REWARD", r_t, \
"/ Q_MAX " , np.max(Q_sa), "/ Loss ", loss)
print("Episode finished!")
print("************************")
def playGame(args):
model = buildmodel()
trainNetwork(model,args)
def main():
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-m','--mode', help='Train / Run', required=True)
args = vars(parser.parse_args())
playGame(args)
if __name__ == "__main__":
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
main()
| #!/usr/bin/env python
from __future__ import print_function
import argparse
import skimage as skimage
from skimage import transform, color, exposure
from skimage.transform import rotate
from skimage.viewer import ImageViewer
import sys
sys.path.append("game/")
import wrapped_flappy_bird as game
import random
import numpy as np
from collections import deque
import json
from keras import initializers
from keras.initializers import normal, identity
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD , Adam
import tensorflow as tf
GAME = 'bird' # the name of the game being played for log files
CONFIG = 'nothreshold'
ACTIONS = 2 # number of valid actions
GAMMA = 0.99 # decay rate of past observations
OBSERVATION = 3200. # timesteps to observe before training
EXPLORE = 3000000. # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # final value of epsilon
INITIAL_EPSILON = 0.1 # starting value of epsilon
REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH = 32 # size of minibatch
FRAME_PER_ACTION = 1
LEARNING_RATE = 1e-4
img_rows , img_cols = 80, 80
#Convert image into Black and white
img_channels = 4 #We stack 4 frames
def buildmodel():
print("Now we build the model")
model = Sequential()
model.add(Convolution2D(32, 8, 8, subsample=(4, 4), border_mode='same',input_shape=(img_rows,img_cols,img_channels))) #80*80*4
model.add(Activation('relu'))
model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(2))
adam = Adam(lr=LEARNING_RATE)
model.compile(loss='mse',optimizer=adam)
print("We finish building the model")
return model
def trainNetwork(model,args):
# open up a game state to communicate with emulator
game_state = game.GameState()
# store the previous observations in replay memory
D = deque()
# get the first state by doing nothing and preprocess the image to 80x80x4
do_nothing = np.zeros(ACTIONS)
do_nothing[0] = 1
x_t, r_0, terminal = game_state.frame_step(do_nothing)
x_t = skimage.color.rgb2gray(x_t)
x_t = skimage.transform.resize(x_t,(80,80))
x_t = skimage.exposure.rescale_intensity(x_t,out_range=(0,255))
s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)
#print (s_t.shape)
#In Keras, need to reshape
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) #1*80*80*4
if args['mode'] == 'Run':
OBSERVE = 999999999 #We keep observe, never train
epsilon = FINAL_EPSILON
print ("Now we load weight")
model.load_weights("model.h5")
adam = Adam(lr=LEARNING_RATE)
model.compile(loss='mse',optimizer=adam)
print ("Weight load successfully")
else: #We go to training mode
OBSERVE = OBSERVATION
epsilon = INITIAL_EPSILON
t = 0
while (True):
loss = 0
Q_sa = 0
action_index = 0
r_t = 0
a_t = np.zeros([ACTIONS])
#choose an action epsilon greedy
if t % FRAME_PER_ACTION == 0:
if random.random() <= epsilon:
print("----------Random Action----------")
action_index = random.randrange(ACTIONS)
a_t[action_index] = 1
else:
q = model.predict(s_t) #input a stack of 4 images, get the prediction
max_Q = np.argmax(q)
action_index = max_Q
a_t[max_Q] = 1
#We reduced the epsilon gradually
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
#run the selected action and observed next state and reward
x_t1_colored, r_t, terminal = game_state.frame_step(a_t)
x_t1 = skimage.color.rgb2gray(x_t1_colored)
x_t1 = skimage.transform.resize(x_t1,(80,80))
x_t1 = skimage.exposure.rescale_intensity(x_t1, out_range=(0, 255))
x_t1 = x_t1.reshape(1, x_t1.shape[0], x_t1.shape[1], 1) #1x80x80x1
s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3)
# store the transition in D
D.append((s_t, action_index, r_t, s_t1, terminal))
if len(D) > REPLAY_MEMORY:
D.popleft()
#only train if done observing
if t > OBSERVE:
#sample a minibatch to train on
minibatch = random.sample(D, BATCH)
inputs = np.zeros((BATCH, s_t.shape[1], s_t.shape[2], s_t.shape[3])) #32, 80, 80, 4
print (inputs.shape)
targets = np.zeros((inputs.shape[0], ACTIONS)) #32, 2
#Now we do the experience replay
for i in range(0, len(minibatch)):
state_t = minibatch[i][0]
action_t = minibatch[i][1] #This is action index
reward_t = minibatch[i][2]
state_t1 = minibatch[i][3]
terminal = minibatch[i][4]
# if terminated, only equals reward
inputs[i:i + 1] = state_t #I saved down s_t
targets[i] = model.predict(state_t) # Hitting each buttom probability
Q_sa = model.predict(state_t1)
if terminal:
targets[i, action_t] = reward_t
else:
targets[i, action_t] = reward_t + GAMMA * np.max(Q_sa)
# targets2 = normalize(targets)
loss += model.train_on_batch(inputs, targets)
s_t = s_t1
t = t + 1
# save progress every 10000 iterations
if t % 1000 == 0:
print("Now we save model")
model.save_weights("model.h5", overwrite=True)
with open("model.json", "w") as outfile:
json.dump(model.to_json(), outfile)
# print info
state = ""
if t <= OBSERVE:
state = "observe"
elif t > OBSERVE and t <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
print("TIMESTEP", t, "/ STATE", state, \
"/ EPSILON", epsilon, "/ ACTION", action_index, "/ REWARD", r_t, \
"/ Q_MAX " , np.max(Q_sa), "/ Loss ", loss)
print("Episode finished!")
print("************************")
def playGame(args):
model = buildmodel()
trainNetwork(model,args)
def main():
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-m','--mode', help='Train / Run', required=True)
args = vars(parser.parse_args())
playGame(args)
if __name__ == "__main__":
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
from keras import backend as K
K.set_session(sess)
main() | en | 0.809686 | #!/usr/bin/env python # the name of the game being played for log files # number of valid actions # decay rate of past observations # timesteps to observe before training # frames over which to anneal epsilon # final value of epsilon # starting value of epsilon # number of previous transitions to remember # size of minibatch #Convert image into Black and white #We stack 4 frames #80*80*4 # open up a game state to communicate with emulator # store the previous observations in replay memory # get the first state by doing nothing and preprocess the image to 80x80x4 #print (s_t.shape) #In Keras, need to reshape #1*80*80*4 #We keep observe, never train #We go to training mode #choose an action epsilon greedy #input a stack of 4 images, get the prediction #We reduced the epsilon gradually #run the selected action and observed next state and reward #1x80x80x1 # store the transition in D #only train if done observing #sample a minibatch to train on #32, 80, 80, 4 #32, 2 #Now we do the experience replay #This is action index # if terminated, only equals reward #I saved down s_t # Hitting each buttom probability # targets2 = normalize(targets) # save progress every 10000 iterations # print info | 2.5336 | 3 |
cdhweb/people/tests/conftest.py | bwhicks/cdh-web | 1 | 6631652 | <filename>cdhweb/people/tests/conftest.py
from datetime import date, timedelta
import pytest
from cdhweb.pages.models import RelatedLinkType
from cdhweb.pages.tests.conftest import to_streamfield_safe
from cdhweb.people.models import (
PeopleLandingPage,
Person,
PersonRelatedLink,
Position,
Profile,
Title,
)
from cdhweb.projects.models import Grant, GrantType, Membership, Project, Role
def create_person_with_position(
position,
start_date=None,
end_date=None,
**person_opts,
):
"""factory method to create person with position for fixtures"""
position = Title.objects.get_or_create(title=position)[0]
person = Person.objects.create(**person_opts)
Position.objects.create(
person=person, title=position, start_date=start_date, end_date=end_date
)
return person
def make_people_landing_page(homepage):
"""Create a test people landing page underneath the homepage."""
landing = PeopleLandingPage(title="people", slug="people", tagline="cdh people")
homepage.add_child(instance=landing)
homepage.save()
return landing
def make_staffer():
"""fixture to create a staff person with two staff positions"""
staff_person = create_person_with_position(
"DH Developer",
start_date=date(2016, 3, 1),
end_date=date(2018, 3, 1),
first_name="Staffer",
cdh_staff=True,
pu_status="stf",
)
rse = Title.objects.get_or_create(title="Research Software Engineer")[0]
# give the staffer a second position
Position.objects.create(person=staff_person, title=rse, start_date=date(2018, 3, 2))
return staff_person
def make_postdoc():
"""fixture to create a postdoc person"""
return create_person_with_position(
"Postdoctoral Fellow",
start_date=date(2018, 3, 1),
first_name="Postdoc",
cdh_staff=True,
pu_status="stf",
)
def make_student():
"""fixture to create a student person record"""
return create_person_with_position(
"Undergraduate Assistant",
start_date=date(2018, 3, 1),
first_name="student",
cdh_staff=True,
pu_status="undergraduate",
)
def make_grad_pi(projects_landing_page):
"""Create a grad student person with PI role on an associated project."""
person = Person.objects.create(
first_name="Tom", cdh_staff=False, pu_status="graduate"
)
project = Project(title="Chinese Exchange Poems")
projects_landing_page.add_child(instance=project)
projects_landing_page.save()
project_director = Role.objects.get_or_create(title="Project Director")[0]
Membership.objects.create(
project=project,
person=person,
role=project_director,
start_date=date(2015, 9, 1),
)
dataset_curation = GrantType.objects.get_or_create(grant_type="Dataset Curation")[0]
Grant.objects.create(
grant_type=dataset_curation,
project=project,
start_date=date(2015, 9, 1),
end_date=date.today() + timedelta(days=30),
)
return person
def make_grad_pm(projects_landing_page):
"""Create a grad student person with PM role on an associated project."""
person = Person.objects.create(
first_name="Tom",
cdh_staff=False,
pu_status="graduate",
email="<EMAIL>",
)
project = Project(title="Reconstructing the Past")
projects_landing_page.add_child(instance=project)
projects_landing_page.save()
project_manager = Role.objects.get_or_create(title="Project Manager")[0]
Membership.objects.create(
project=project,
person=person,
role=project_manager,
start_date=date(2015, 9, 1),
)
dataset_curation = GrantType.objects.get_or_create(grant_type="Dataset Curation")[0]
Grant.objects.create(
grant_type=dataset_curation,
project=project,
start_date=date(2015, 9, 1),
end_date=date.today() + timedelta(days=30),
)
return person
def make_faculty_pi(projects_landing_page):
"""Create a faculty person with PI role on an associated project."""
person = Person.objects.create(first_name="Josh", cdh_staff=False, pu_status="fac")
project = Project(title="MEP")
projects_landing_page.add_child(instance=project)
projects_landing_page.save()
project_director = Role.objects.get_or_create(title="Project Director")[0]
dataset_curation = GrantType.objects.get_or_create(grant_type="Dataset Curation")[0]
Grant.objects.create(
grant_type=dataset_curation,
project=project,
start_date=date(2019, 9, 1),
end_date=date.today() + timedelta(days=30),
)
Membership.objects.create(
project=project,
person=person,
role=project_director,
start_date=date(2016, 9, 1),
)
website = RelatedLinkType.objects.get_or_create(name="Website")[0]
PersonRelatedLink.objects.create(person=person, type=website, url="example.com")
return person
def make_staff_pi(projects_landing_page):
"""Create a staff (PUL) person with PI role on an associated project."""
person = Person.objects.create(
first_name="Thomas", cdh_staff=False, pu_status="stf"
)
project = Project(title="SVP")
projects_landing_page.add_child(instance=project)
projects_landing_page.save()
project_director = Role.objects.get_or_create(title="Project Director")[0]
dataset_curation = GrantType.objects.get_or_create(grant_type="Dataset Curation")[0]
Grant.objects.create(
grant_type=dataset_curation,
project=project,
start_date=date(2020, 9, 1),
end_date=date.today() + timedelta(days=30),
)
Membership.objects.create(
project=project,
person=person,
role=project_director,
start_date=date(2016, 9, 1),
)
return person
def make_faculty_exec():
"""Create a faculty person with executive committee position."""
return create_person_with_position(
"Executive Committee Member",
start_date=date(2018, 3, 1),
first_name="Anna",
cdh_staff=False,
pu_status="fac",
)
def make_staff_exec():
"""Create a staff (OIT) person who sits with the executive committee."""
return create_person_with_position(
"Sits with Executive Committee",
start_date=date(2010, 3, 1),
first_name="Jay",
cdh_staff=False,
pu_status="stf",
)
def make_people(projects_landing_page):
"""Create a variety of people and associated projects for testing."""
return {
"staffer": make_staffer(),
"postdoc": make_postdoc(),
"student": make_student(),
"grad_pi": make_grad_pi(projects_landing_page),
"grad_pm": make_grad_pm(projects_landing_page),
"faculty_pi": make_faculty_pi(projects_landing_page),
"staff_pi": make_staff_pi(projects_landing_page),
"faculty_exec": make_faculty_exec(),
"staff_exec": make_staff_exec(),
}
def make_staffer_profile(people_landing_page, staffer):
"""Create a profile page for a given staff person."""
profile = Profile(
person=staffer,
title="Staffer",
education="Princeton University",
body=to_streamfield_safe(
"<p>I'm a member of the CDH staff. I do digital humanities.</p>"
),
)
people_landing_page.add_child(instance=profile)
people_landing_page.save()
return profile
@pytest.fixture
def people_landing_page(db, homepage):
return make_people_landing_page(homepage)
@pytest.fixture
def staffer(db):
return make_staffer()
@pytest.fixture
def postdoc(db):
return make_postdoc()
@pytest.fixture
def student(db):
return make_student()
@pytest.fixture
def grad_pi(db, projects_landing_page):
return make_grad_pi(projects_landing_page)
@pytest.fixture
def grad_pm(db, projects_landing_page):
return make_grad_pm(projects_landing_page)
@pytest.fixture
def faculty_pi(db, projects_landing_page):
return make_faculty_pi(projects_landing_page)
@pytest.fixture
def staff_pi(db, projects_landing_page):
return make_staff_pi(projects_landing_page)
@pytest.fixture
def faculty_exec(db):
return make_faculty_exec()
@pytest.fixture
def staff_exec(db):
return make_staff_exec()
@pytest.fixture
def staffer_profile(db, people_landing_page, staffer):
return make_staffer_profile(people_landing_page, staffer)
| <filename>cdhweb/people/tests/conftest.py
from datetime import date, timedelta
import pytest
from cdhweb.pages.models import RelatedLinkType
from cdhweb.pages.tests.conftest import to_streamfield_safe
from cdhweb.people.models import (
PeopleLandingPage,
Person,
PersonRelatedLink,
Position,
Profile,
Title,
)
from cdhweb.projects.models import Grant, GrantType, Membership, Project, Role
def create_person_with_position(
position,
start_date=None,
end_date=None,
**person_opts,
):
"""factory method to create person with position for fixtures"""
position = Title.objects.get_or_create(title=position)[0]
person = Person.objects.create(**person_opts)
Position.objects.create(
person=person, title=position, start_date=start_date, end_date=end_date
)
return person
def make_people_landing_page(homepage):
"""Create a test people landing page underneath the homepage."""
landing = PeopleLandingPage(title="people", slug="people", tagline="cdh people")
homepage.add_child(instance=landing)
homepage.save()
return landing
def make_staffer():
"""fixture to create a staff person with two staff positions"""
staff_person = create_person_with_position(
"DH Developer",
start_date=date(2016, 3, 1),
end_date=date(2018, 3, 1),
first_name="Staffer",
cdh_staff=True,
pu_status="stf",
)
rse = Title.objects.get_or_create(title="Research Software Engineer")[0]
# give the staffer a second position
Position.objects.create(person=staff_person, title=rse, start_date=date(2018, 3, 2))
return staff_person
def make_postdoc():
"""fixture to create a postdoc person"""
return create_person_with_position(
"Postdoctoral Fellow",
start_date=date(2018, 3, 1),
first_name="Postdoc",
cdh_staff=True,
pu_status="stf",
)
def make_student():
"""fixture to create a student person record"""
return create_person_with_position(
"Undergraduate Assistant",
start_date=date(2018, 3, 1),
first_name="student",
cdh_staff=True,
pu_status="undergraduate",
)
def make_grad_pi(projects_landing_page):
"""Create a grad student person with PI role on an associated project."""
person = Person.objects.create(
first_name="Tom", cdh_staff=False, pu_status="graduate"
)
project = Project(title="Chinese Exchange Poems")
projects_landing_page.add_child(instance=project)
projects_landing_page.save()
project_director = Role.objects.get_or_create(title="Project Director")[0]
Membership.objects.create(
project=project,
person=person,
role=project_director,
start_date=date(2015, 9, 1),
)
dataset_curation = GrantType.objects.get_or_create(grant_type="Dataset Curation")[0]
Grant.objects.create(
grant_type=dataset_curation,
project=project,
start_date=date(2015, 9, 1),
end_date=date.today() + timedelta(days=30),
)
return person
def make_grad_pm(projects_landing_page):
"""Create a grad student person with PM role on an associated project."""
person = Person.objects.create(
first_name="Tom",
cdh_staff=False,
pu_status="graduate",
email="<EMAIL>",
)
project = Project(title="Reconstructing the Past")
projects_landing_page.add_child(instance=project)
projects_landing_page.save()
project_manager = Role.objects.get_or_create(title="Project Manager")[0]
Membership.objects.create(
project=project,
person=person,
role=project_manager,
start_date=date(2015, 9, 1),
)
dataset_curation = GrantType.objects.get_or_create(grant_type="Dataset Curation")[0]
Grant.objects.create(
grant_type=dataset_curation,
project=project,
start_date=date(2015, 9, 1),
end_date=date.today() + timedelta(days=30),
)
return person
def make_faculty_pi(projects_landing_page):
"""Create a faculty person with PI role on an associated project."""
person = Person.objects.create(first_name="Josh", cdh_staff=False, pu_status="fac")
project = Project(title="MEP")
projects_landing_page.add_child(instance=project)
projects_landing_page.save()
project_director = Role.objects.get_or_create(title="Project Director")[0]
dataset_curation = GrantType.objects.get_or_create(grant_type="Dataset Curation")[0]
Grant.objects.create(
grant_type=dataset_curation,
project=project,
start_date=date(2019, 9, 1),
end_date=date.today() + timedelta(days=30),
)
Membership.objects.create(
project=project,
person=person,
role=project_director,
start_date=date(2016, 9, 1),
)
website = RelatedLinkType.objects.get_or_create(name="Website")[0]
PersonRelatedLink.objects.create(person=person, type=website, url="example.com")
return person
def make_staff_pi(projects_landing_page):
"""Create a staff (PUL) person with PI role on an associated project."""
person = Person.objects.create(
first_name="Thomas", cdh_staff=False, pu_status="stf"
)
project = Project(title="SVP")
projects_landing_page.add_child(instance=project)
projects_landing_page.save()
project_director = Role.objects.get_or_create(title="Project Director")[0]
dataset_curation = GrantType.objects.get_or_create(grant_type="Dataset Curation")[0]
Grant.objects.create(
grant_type=dataset_curation,
project=project,
start_date=date(2020, 9, 1),
end_date=date.today() + timedelta(days=30),
)
Membership.objects.create(
project=project,
person=person,
role=project_director,
start_date=date(2016, 9, 1),
)
return person
def make_faculty_exec():
"""Create a faculty person with executive committee position."""
return create_person_with_position(
"Executive Committee Member",
start_date=date(2018, 3, 1),
first_name="Anna",
cdh_staff=False,
pu_status="fac",
)
def make_staff_exec():
"""Create a staff (OIT) person who sits with the executive committee."""
return create_person_with_position(
"Sits with Executive Committee",
start_date=date(2010, 3, 1),
first_name="Jay",
cdh_staff=False,
pu_status="stf",
)
def make_people(projects_landing_page):
"""Create a variety of people and associated projects for testing."""
return {
"staffer": make_staffer(),
"postdoc": make_postdoc(),
"student": make_student(),
"grad_pi": make_grad_pi(projects_landing_page),
"grad_pm": make_grad_pm(projects_landing_page),
"faculty_pi": make_faculty_pi(projects_landing_page),
"staff_pi": make_staff_pi(projects_landing_page),
"faculty_exec": make_faculty_exec(),
"staff_exec": make_staff_exec(),
}
def make_staffer_profile(people_landing_page, staffer):
"""Create a profile page for a given staff person."""
profile = Profile(
person=staffer,
title="Staffer",
education="Princeton University",
body=to_streamfield_safe(
"<p>I'm a member of the CDH staff. I do digital humanities.</p>"
),
)
people_landing_page.add_child(instance=profile)
people_landing_page.save()
return profile
@pytest.fixture
def people_landing_page(db, homepage):
return make_people_landing_page(homepage)
@pytest.fixture
def staffer(db):
return make_staffer()
@pytest.fixture
def postdoc(db):
return make_postdoc()
@pytest.fixture
def student(db):
return make_student()
@pytest.fixture
def grad_pi(db, projects_landing_page):
return make_grad_pi(projects_landing_page)
@pytest.fixture
def grad_pm(db, projects_landing_page):
return make_grad_pm(projects_landing_page)
@pytest.fixture
def faculty_pi(db, projects_landing_page):
return make_faculty_pi(projects_landing_page)
@pytest.fixture
def staff_pi(db, projects_landing_page):
return make_staff_pi(projects_landing_page)
@pytest.fixture
def faculty_exec(db):
return make_faculty_exec()
@pytest.fixture
def staff_exec(db):
return make_staff_exec()
@pytest.fixture
def staffer_profile(db, people_landing_page, staffer):
return make_staffer_profile(people_landing_page, staffer)
| en | 0.927932 | factory method to create person with position for fixtures Create a test people landing page underneath the homepage. fixture to create a staff person with two staff positions # give the staffer a second position fixture to create a postdoc person fixture to create a student person record Create a grad student person with PI role on an associated project. Create a grad student person with PM role on an associated project. Create a faculty person with PI role on an associated project. Create a staff (PUL) person with PI role on an associated project. Create a faculty person with executive committee position. Create a staff (OIT) person who sits with the executive committee. Create a variety of people and associated projects for testing. Create a profile page for a given staff person. | 2.206507 | 2 |
hackerrank-python/np-min-and-max.py | fmelihh/competitive-programming-solutions | 2 | 6631653 |
# https://www.hackerrank.com/challenges//problem
import numpy
if __name__ == '__main__':
N, M = map(int, input().split())
arr = list()
for _ in range(N):
arr.append(list(map(int, input().split())))
numpy_arr = numpy.array(arr)
min_elements = numpy.min(arr, axis = 1)
print(max(min_elements))
|
# https://www.hackerrank.com/challenges//problem
import numpy
if __name__ == '__main__':
N, M = map(int, input().split())
arr = list()
for _ in range(N):
arr.append(list(map(int, input().split())))
numpy_arr = numpy.array(arr)
min_elements = numpy.min(arr, axis = 1)
print(max(min_elements))
| en | 0.524992 | # https://www.hackerrank.com/challenges//problem | 3.553104 | 4 |
cwe_relation_cve/cwe_mitre_scrapper.py | pawlaczyk/sarenka_tools | 3 | 6631654 | from typing import List, Dict
from bs4 import BeautifulSoup
import requests
import re
class CWEMitreScraper:
cwe_mitre_url = "https://cwe.mitre.org/data/definitions/"
def __init__(self, id_cwe: str):
if "-" in id_cwe:
id_cwe = id_cwe.split("-")[1] # dla postaci CWE-79
self.id_cwe = id_cwe
self.cwe_url = self.generate_definition_url()
source = requests.get(self.cwe_url).text
self.soup = BeautifulSoup(source, 'lxml')
def generate_definition_url(self) -> str:
return self.cwe_mitre_url + self.id_cwe + ".html"
def get_title(self):
"""Zwraca tytuł slabości."""
try:
title = self.soup.find("h2").string
title = title.split(":")[1]
except AttributeError:
title = self.soup.find("h2")
try:
return title.strip()
except Exception:
print("Tu sie nie udało get_title: ", self.id_cwe)
return None
def get_description(self) -> str:
try:
description = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Description"})
return description.string
except Exception as ex:
# bez duplikatow - wydajniej zamienic na slwonik
print("Tu sie nie udało get_technical_impact: ", self.id_cwe)
return None
def get_likelihood(self) -> str:
"""Poziom prawdopodobieństwa istnienia exploitów i samej exploitacji słabości."""
likehood = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Likelihood_Of_Exploit"})
return likehood.string
def get_likelihood(self) -> str:
"""Poziom prawdopodobieństwa istnienia exploitów i samej exploitacji słabości."""
likehood = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Likelihood_Of_Exploit"})
if likehood:
return likehood.string
return None
def get_technical_impact(self) -> List[str]:
"""Częsre konswekwencje exploitacji słabości."""
result = []
try:
div = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Common_Consequences"})
table = div.find("table")
tr = table.findAll("tr")
for i in tr[1:]: # be zpierwszego wiersza bo tam nie ma danych
row = i.find("p", {"class": "smaller"})
# tylko jeden wynik - re.findall zwraca listę
impact = re.findall("<i>(.*?)</i>", str(row))[0]
impact = impact.split(";")
impact = [i.strip() for i in impact]
result.extend(impact)
except Exception as ex:
# bez duplikatow - wydajniej zamienic na slwonik
print("Tu sie nie udało get_technical_impact: ", self.id_cwe)
return list(set(result))
def get_caused_by(self):
"""
Etap podczas którego powstaje podatność. Np. podczas implementacji.
"""
try:
div_main = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Modes_Of_Introduction"})
table = div_main.find("table")
tr = table.findAll("tr")
field = tr[1].text # np.: Architecture and Design
all_td = tr[-1].findAll("td")
process = all_td[0].text # np.: Implementation
# This weakness is caused during implementation of an architectural security tactic.
description = all_td[1].text
description = description.split(":")[-1].strip()
return {
"field": field,
"process": process,
"description": description
}
# jak nie ma tabelki
except Exception:
print("Tu sie nie udało get_caused_by: ", self.id_cwe)
return {
"field": None,
"process": None,
"description": None
}
def get_cve_examples(self) -> List[Dict]:
"""
Przykładowe podatności bezpieczeństwa w konkretnych oprogramowanaich dla tego typu słabości oprogramowania.
"""
result = []
try:
div_main = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Observed_Examples"})
table = div_main.find("table", {"class": "Detail"})
tr_list = table.findAll("tr")
for tr in tr_list[1:]: # w pierwszym wierszu nie ma danych
id_CVE = None
description = None
if tr.find("div", {"class": "indent"}):
description = tr.find("div", {"class": "indent"}).text
if tr.find("a"):
id_CVE = tr.find("a").text
mitre_url = tr.find("a")["href"]
result.append({
"id_CVE": id_CVE,
"description": description,
"mitre_url": mitre_url, # https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2006-3568
})
except Exception as ex:
print("Tu sie nie udało get_cve_examples: ", self.id_cwe)
result.append({
"id_CVE": None,
"description": None,
"mitre_url": None, # https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2006-3568
})
return result
def get_data(self) -> Dict:
"""
Zwraca wszystkie dane wyciągniete podczas scrapowania.
"""
source = requests.get(self.cwe_url).text
soup = BeautifulSoup(source, 'lxml')
result = {
"cwe_id": "CWE-"+self.id_cwe,
"title": self.get_title(),
"description": self.get_description(),
"likehood": self.get_likelihood(),
"technical_impact": self.get_technical_impact(),
"caused_by": self.get_caused_by(),
"cve_examples": self.get_cve_examples()
}
return result
| from typing import List, Dict
from bs4 import BeautifulSoup
import requests
import re
class CWEMitreScraper:
cwe_mitre_url = "https://cwe.mitre.org/data/definitions/"
def __init__(self, id_cwe: str):
if "-" in id_cwe:
id_cwe = id_cwe.split("-")[1] # dla postaci CWE-79
self.id_cwe = id_cwe
self.cwe_url = self.generate_definition_url()
source = requests.get(self.cwe_url).text
self.soup = BeautifulSoup(source, 'lxml')
def generate_definition_url(self) -> str:
return self.cwe_mitre_url + self.id_cwe + ".html"
def get_title(self):
"""Zwraca tytuł slabości."""
try:
title = self.soup.find("h2").string
title = title.split(":")[1]
except AttributeError:
title = self.soup.find("h2")
try:
return title.strip()
except Exception:
print("Tu sie nie udało get_title: ", self.id_cwe)
return None
def get_description(self) -> str:
try:
description = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Description"})
return description.string
except Exception as ex:
# bez duplikatow - wydajniej zamienic na slwonik
print("Tu sie nie udało get_technical_impact: ", self.id_cwe)
return None
def get_likelihood(self) -> str:
"""Poziom prawdopodobieństwa istnienia exploitów i samej exploitacji słabości."""
likehood = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Likelihood_Of_Exploit"})
return likehood.string
def get_likelihood(self) -> str:
"""Poziom prawdopodobieństwa istnienia exploitów i samej exploitacji słabości."""
likehood = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Likelihood_Of_Exploit"})
if likehood:
return likehood.string
return None
def get_technical_impact(self) -> List[str]:
"""Częsre konswekwencje exploitacji słabości."""
result = []
try:
div = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Common_Consequences"})
table = div.find("table")
tr = table.findAll("tr")
for i in tr[1:]: # be zpierwszego wiersza bo tam nie ma danych
row = i.find("p", {"class": "smaller"})
# tylko jeden wynik - re.findall zwraca listę
impact = re.findall("<i>(.*?)</i>", str(row))[0]
impact = impact.split(";")
impact = [i.strip() for i in impact]
result.extend(impact)
except Exception as ex:
# bez duplikatow - wydajniej zamienic na slwonik
print("Tu sie nie udało get_technical_impact: ", self.id_cwe)
return list(set(result))
def get_caused_by(self):
"""
Etap podczas którego powstaje podatność. Np. podczas implementacji.
"""
try:
div_main = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Modes_Of_Introduction"})
table = div_main.find("table")
tr = table.findAll("tr")
field = tr[1].text # np.: Architecture and Design
all_td = tr[-1].findAll("td")
process = all_td[0].text # np.: Implementation
# This weakness is caused during implementation of an architectural security tactic.
description = all_td[1].text
description = description.split(":")[-1].strip()
return {
"field": field,
"process": process,
"description": description
}
# jak nie ma tabelki
except Exception:
print("Tu sie nie udało get_caused_by: ", self.id_cwe)
return {
"field": None,
"process": None,
"description": None
}
def get_cve_examples(self) -> List[Dict]:
"""
Przykładowe podatności bezpieczeństwa w konkretnych oprogramowanaich dla tego typu słabości oprogramowania.
"""
result = []
try:
div_main = self.soup.find(
"div", {"id": "oc_" + self.id_cwe + "_Observed_Examples"})
table = div_main.find("table", {"class": "Detail"})
tr_list = table.findAll("tr")
for tr in tr_list[1:]: # w pierwszym wierszu nie ma danych
id_CVE = None
description = None
if tr.find("div", {"class": "indent"}):
description = tr.find("div", {"class": "indent"}).text
if tr.find("a"):
id_CVE = tr.find("a").text
mitre_url = tr.find("a")["href"]
result.append({
"id_CVE": id_CVE,
"description": description,
"mitre_url": mitre_url, # https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2006-3568
})
except Exception as ex:
print("Tu sie nie udało get_cve_examples: ", self.id_cwe)
result.append({
"id_CVE": None,
"description": None,
"mitre_url": None, # https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2006-3568
})
return result
def get_data(self) -> Dict:
"""
Zwraca wszystkie dane wyciągniete podczas scrapowania.
"""
source = requests.get(self.cwe_url).text
soup = BeautifulSoup(source, 'lxml')
result = {
"cwe_id": "CWE-"+self.id_cwe,
"title": self.get_title(),
"description": self.get_description(),
"likehood": self.get_likelihood(),
"technical_impact": self.get_technical_impact(),
"caused_by": self.get_caused_by(),
"cve_examples": self.get_cve_examples()
}
return result
| pl | 0.980958 | # dla postaci CWE-79 Zwraca tytuł slabości. # bez duplikatow - wydajniej zamienic na slwonik Poziom prawdopodobieństwa istnienia exploitów i samej exploitacji słabości. Poziom prawdopodobieństwa istnienia exploitów i samej exploitacji słabości. Częsre konswekwencje exploitacji słabości. # be zpierwszego wiersza bo tam nie ma danych # tylko jeden wynik - re.findall zwraca listę # bez duplikatow - wydajniej zamienic na slwonik Etap podczas którego powstaje podatność. Np. podczas implementacji. # np.: Architecture and Design # np.: Implementation # This weakness is caused during implementation of an architectural security tactic. # jak nie ma tabelki Przykładowe podatności bezpieczeństwa w konkretnych oprogramowanaich dla tego typu słabości oprogramowania. # w pierwszym wierszu nie ma danych # https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2006-3568 # https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2006-3568 Zwraca wszystkie dane wyciągniete podczas scrapowania. | 2.956787 | 3 |
testsuite/test_cint4c1e.py | Minyan910/libcint | 0 | 6631655 | <reponame>Minyan910/libcint
#!/usr/bin/env python
# $Id$
# -*- coding: utf-8
'''
test libcint
'''
__author__ = "<NAME> <<EMAIL>>"
import sys
import os
import ctypes
import numpy
_cint = numpy.ctypeslib.load_library('libcint', '.')
PTR_LIGHT_SPEED = 0
PTR_COMMON_ORIG = 1
PTR_SHIELDING_ORIG = 4
PTR_RINV_ORIG = 4
PTR_RINV_ZETA = 7
PTR_ENV_START = 20
CHARGE_OF = 0
PTR_COORD = 1
NUC_MOD_OF = 2
PTR_ZETA = 3
RAD_GRIDS = 4
ANG_GRIDS = 5
ATM_SLOTS = 6
ATOM_OF = 0
ANG_OF = 1
NPRIM_OF = 2
NCTR_OF = 3
KAPPA_OF = 4
PTR_EXP = 5
PTR_COEFF = 6
BAS_SLOTS = 8
natm = 4
nbas = 0
atm = numpy.zeros((natm,ATM_SLOTS), dtype=numpy.int32)
bas = numpy.zeros((1000,BAS_SLOTS), dtype=numpy.int32)
env = numpy.zeros(10000)
off = PTR_ENV_START
for i in range(natm):
atm[i, CHARGE_OF] = (i+1)*2
atm[i, PTR_COORD] = off
env[off+0] = .2 * (i+1)
env[off+1] = .3 + (i+1) * .5
env[off+2] = .1 - (i+1) * .5
off += 3
off0 = off
# basis with kappa > 0
nh = 0
bas[nh,ATOM_OF ] = 0
bas[nh,ANG_OF ] = 1
bas[nh,KAPPA_OF] = 1
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 1
bas[nh,ANG_OF ] = 2
bas[nh,KAPPA_OF] = 2
bas[nh,NPRIM_OF] = 2
bas[nh,NCTR_OF ] = 2
bas[nh,PTR_EXP] = off
env[off+0] = 5
env[off+1] = 3
bas[nh,PTR_COEFF] = off + 2
env[off+2] = 1
env[off+3] = 2
env[off+4] = 4
env[off+5] = 1
off += 6
nh += 1
bas[nh,ATOM_OF ] = 2
bas[nh,ANG_OF ] = 3
bas[nh,KAPPA_OF] = 3
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 3
bas[nh,ANG_OF ] = 4
bas[nh,KAPPA_OF] = 4
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = .5
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1.
off = off + 2
nh += 1
nbas = nh
# basis with kappa < 0
n = off - off0
for i in range(n):
env[off+i] = env[off0+i]
for i in range(nh):
bas[i+nh,ATOM_OF ] = bas[i,ATOM_OF ]
bas[i+nh,ANG_OF ] = bas[i,ANG_OF ] - 1
bas[i+nh,KAPPA_OF] =-bas[i,KAPPA_OF]
bas[i+nh,NPRIM_OF] = bas[i,NPRIM_OF]
bas[i+nh,NCTR_OF ] = bas[i,NCTR_OF ]
bas[i+nh,PTR_EXP ] = bas[i,PTR_EXP ] + n
bas[i+nh,PTR_COEFF]= bas[i,PTR_COEFF] + n
env[bas[i+nh,PTR_COEFF]] /= 2 * env[bas[i,PTR_EXP]]
env[bas[5,PTR_COEFF]+0] = env[bas[1,PTR_COEFF]+0] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+1] = env[bas[1,PTR_COEFF]+1] / (2 * env[bas[1,PTR_EXP]+1])
env[bas[5,PTR_COEFF]+2] = env[bas[1,PTR_COEFF]+2] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+3] = env[bas[1,PTR_COEFF]+3] / (2 * env[bas[1,PTR_EXP]+1])
natm = ctypes.c_int(natm)
nbas = ctypes.c_int(nbas)
c_atm = atm.ctypes.data_as(ctypes.c_void_p)
c_bas = bas.ctypes.data_as(ctypes.c_void_p)
c_env = env.ctypes.data_as(ctypes.c_void_p)
opt = ctypes.POINTER(ctypes.c_void_p)()
_cint.CINTlen_spinor.restype = ctypes.c_int
from pyscf import gto
mol = gto.M()
mol._atm = atm[:natm.value]
mol._bas = bas[:nbas.value]
mol._env = env
coords = mol.atom_coords()
ao = mol.eval_gto('GTOval_sph', coords)
def test_int2c1e_sph():
fnpp1 = _cint.cint1e_ipiprinv_sph
fnp1p = _cint.cint1e_iprinvip_sph
nullptr = ctypes.POINTER(ctypes.c_void_p)()
def by_pp(shls, shape):
buf = numpy.empty(shape+(9,), order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref = buf[:,:,0] + buf[:,:,4] + buf[:,:,8]
fnp1p(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+=(buf[:,:,0] + buf[:,:,4] + buf[:,:,8])*2
shls = (shls[1], shls[0])
shape = (shape[1], shape[0]) + (9,)
buf = numpy.empty(shape, order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+= (buf[:,:,0] + buf[:,:,4] + buf[:,:,8]).transpose(1,0)
return ref * (-.25/numpy.pi)
#intor = _cint.cint4c1e_sph
ao_loc = mol.ao_loc_nr()
for nucid in range(mol.natm):
mol.set_rinv_orig(coords[nucid])
for j in range(nbas.value):
j0 = ao_loc[j]
j1 = ao_loc[j+1]
for i in range(j+1):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
shls = (i, j)
i0 = ao_loc[i]
i1 = ao_loc[i+1]
buf = numpy.einsum('i,j->ij', ao[nucid,i0:i1], ao[nucid,j0:j1])
ref = by_pp(shls, (di,dj))
dd = abs(ref - buf).sum()
if dd > 1e-8:
print "* FAIL: cint2c1e", " shell:", i, j, "err:", dd
return
print 'cint1e_ipiprinv_sph cint1e_iprinvip_sph pass'
def test_int4c1e_sph():
fnpp1 = _cint.cint2e_ipip1_sph
fnp1p = _cint.cint2e_ipvip1_sph
nullptr = ctypes.POINTER(ctypes.c_void_p)()
def by_pp(shls, shape):
buf = numpy.empty(shape+(9,), order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref = buf[:,:,:,:,0] + buf[:,:,:,:,4] + buf[:,:,:,:,8]
fnp1p(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+=(buf[:,:,:,:,0] + buf[:,:,:,:,4] + buf[:,:,:,:,8])*2
shls = (shls[1], shls[0]) + shls[2:]
shape = (shape[1], shape[0]) + shape[2:] + (9,)
buf = numpy.empty(shape, order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+= (buf[:,:,:,:,0] + buf[:,:,:,:,4] + buf[:,:,:,:,8]).transpose(1,0,2,3)
return ref * (-.25/numpy.pi)
intor = _cint.cint4c1e_sph
for l in range(nbas.value):
for k in range(l+1):
for j in range(nbas.value):
for i in range(j+1):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
dl = (bas[l,ANG_OF] * 2 + 1) * bas[l,NCTR_OF]
shls = (i, j, k, l)
buf = numpy.empty((di,dj,dk,dl), order='F')
intor(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref = by_pp(shls, (di,dj,dk,dl))
dd = abs(ref - buf).max()
if dd > 1e-6:
print "* FAIL: cint4c1e", " shell:", i, j, k, l, "err:", dd
return
print 'cint4c1e_sph pass'
test_int2c1e_sph()
test_int4c1e_sph()
| #!/usr/bin/env python
# $Id$
# -*- coding: utf-8
'''
test libcint
'''
__author__ = "<NAME> <<EMAIL>>"
import sys
import os
import ctypes
import numpy
_cint = numpy.ctypeslib.load_library('libcint', '.')
PTR_LIGHT_SPEED = 0
PTR_COMMON_ORIG = 1
PTR_SHIELDING_ORIG = 4
PTR_RINV_ORIG = 4
PTR_RINV_ZETA = 7
PTR_ENV_START = 20
CHARGE_OF = 0
PTR_COORD = 1
NUC_MOD_OF = 2
PTR_ZETA = 3
RAD_GRIDS = 4
ANG_GRIDS = 5
ATM_SLOTS = 6
ATOM_OF = 0
ANG_OF = 1
NPRIM_OF = 2
NCTR_OF = 3
KAPPA_OF = 4
PTR_EXP = 5
PTR_COEFF = 6
BAS_SLOTS = 8
natm = 4
nbas = 0
atm = numpy.zeros((natm,ATM_SLOTS), dtype=numpy.int32)
bas = numpy.zeros((1000,BAS_SLOTS), dtype=numpy.int32)
env = numpy.zeros(10000)
off = PTR_ENV_START
for i in range(natm):
atm[i, CHARGE_OF] = (i+1)*2
atm[i, PTR_COORD] = off
env[off+0] = .2 * (i+1)
env[off+1] = .3 + (i+1) * .5
env[off+2] = .1 - (i+1) * .5
off += 3
off0 = off
# basis with kappa > 0
nh = 0
bas[nh,ATOM_OF ] = 0
bas[nh,ANG_OF ] = 1
bas[nh,KAPPA_OF] = 1
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 1
bas[nh,ANG_OF ] = 2
bas[nh,KAPPA_OF] = 2
bas[nh,NPRIM_OF] = 2
bas[nh,NCTR_OF ] = 2
bas[nh,PTR_EXP] = off
env[off+0] = 5
env[off+1] = 3
bas[nh,PTR_COEFF] = off + 2
env[off+2] = 1
env[off+3] = 2
env[off+4] = 4
env[off+5] = 1
off += 6
nh += 1
bas[nh,ATOM_OF ] = 2
bas[nh,ANG_OF ] = 3
bas[nh,KAPPA_OF] = 3
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = 1
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1
off += 2
nh += 1
bas[nh,ATOM_OF ] = 3
bas[nh,ANG_OF ] = 4
bas[nh,KAPPA_OF] = 4
bas[nh,NPRIM_OF] = 1
bas[nh,NCTR_OF ] = 1
bas[nh,PTR_EXP ] = off
env[off+0] = .5
bas[nh,PTR_COEFF] = off + 1
env[off+1] = 1.
off = off + 2
nh += 1
nbas = nh
# basis with kappa < 0
n = off - off0
for i in range(n):
env[off+i] = env[off0+i]
for i in range(nh):
bas[i+nh,ATOM_OF ] = bas[i,ATOM_OF ]
bas[i+nh,ANG_OF ] = bas[i,ANG_OF ] - 1
bas[i+nh,KAPPA_OF] =-bas[i,KAPPA_OF]
bas[i+nh,NPRIM_OF] = bas[i,NPRIM_OF]
bas[i+nh,NCTR_OF ] = bas[i,NCTR_OF ]
bas[i+nh,PTR_EXP ] = bas[i,PTR_EXP ] + n
bas[i+nh,PTR_COEFF]= bas[i,PTR_COEFF] + n
env[bas[i+nh,PTR_COEFF]] /= 2 * env[bas[i,PTR_EXP]]
env[bas[5,PTR_COEFF]+0] = env[bas[1,PTR_COEFF]+0] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+1] = env[bas[1,PTR_COEFF]+1] / (2 * env[bas[1,PTR_EXP]+1])
env[bas[5,PTR_COEFF]+2] = env[bas[1,PTR_COEFF]+2] / (2 * env[bas[1,PTR_EXP]+0])
env[bas[5,PTR_COEFF]+3] = env[bas[1,PTR_COEFF]+3] / (2 * env[bas[1,PTR_EXP]+1])
natm = ctypes.c_int(natm)
nbas = ctypes.c_int(nbas)
c_atm = atm.ctypes.data_as(ctypes.c_void_p)
c_bas = bas.ctypes.data_as(ctypes.c_void_p)
c_env = env.ctypes.data_as(ctypes.c_void_p)
opt = ctypes.POINTER(ctypes.c_void_p)()
_cint.CINTlen_spinor.restype = ctypes.c_int
from pyscf import gto
mol = gto.M()
mol._atm = atm[:natm.value]
mol._bas = bas[:nbas.value]
mol._env = env
coords = mol.atom_coords()
ao = mol.eval_gto('GTOval_sph', coords)
def test_int2c1e_sph():
fnpp1 = _cint.cint1e_ipiprinv_sph
fnp1p = _cint.cint1e_iprinvip_sph
nullptr = ctypes.POINTER(ctypes.c_void_p)()
def by_pp(shls, shape):
buf = numpy.empty(shape+(9,), order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref = buf[:,:,0] + buf[:,:,4] + buf[:,:,8]
fnp1p(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+=(buf[:,:,0] + buf[:,:,4] + buf[:,:,8])*2
shls = (shls[1], shls[0])
shape = (shape[1], shape[0]) + (9,)
buf = numpy.empty(shape, order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+= (buf[:,:,0] + buf[:,:,4] + buf[:,:,8]).transpose(1,0)
return ref * (-.25/numpy.pi)
#intor = _cint.cint4c1e_sph
ao_loc = mol.ao_loc_nr()
for nucid in range(mol.natm):
mol.set_rinv_orig(coords[nucid])
for j in range(nbas.value):
j0 = ao_loc[j]
j1 = ao_loc[j+1]
for i in range(j+1):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
shls = (i, j)
i0 = ao_loc[i]
i1 = ao_loc[i+1]
buf = numpy.einsum('i,j->ij', ao[nucid,i0:i1], ao[nucid,j0:j1])
ref = by_pp(shls, (di,dj))
dd = abs(ref - buf).sum()
if dd > 1e-8:
print "* FAIL: cint2c1e", " shell:", i, j, "err:", dd
return
print 'cint1e_ipiprinv_sph cint1e_iprinvip_sph pass'
def test_int4c1e_sph():
fnpp1 = _cint.cint2e_ipip1_sph
fnp1p = _cint.cint2e_ipvip1_sph
nullptr = ctypes.POINTER(ctypes.c_void_p)()
def by_pp(shls, shape):
buf = numpy.empty(shape+(9,), order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref = buf[:,:,:,:,0] + buf[:,:,:,:,4] + buf[:,:,:,:,8]
fnp1p(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+=(buf[:,:,:,:,0] + buf[:,:,:,:,4] + buf[:,:,:,:,8])*2
shls = (shls[1], shls[0]) + shls[2:]
shape = (shape[1], shape[0]) + shape[2:] + (9,)
buf = numpy.empty(shape, order='F')
fnpp1(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref+= (buf[:,:,:,:,0] + buf[:,:,:,:,4] + buf[:,:,:,:,8]).transpose(1,0,2,3)
return ref * (-.25/numpy.pi)
intor = _cint.cint4c1e_sph
for l in range(nbas.value):
for k in range(l+1):
for j in range(nbas.value):
for i in range(j+1):
di = (bas[i,ANG_OF] * 2 + 1) * bas[i,NCTR_OF]
dj = (bas[j,ANG_OF] * 2 + 1) * bas[j,NCTR_OF]
dk = (bas[k,ANG_OF] * 2 + 1) * bas[k,NCTR_OF]
dl = (bas[l,ANG_OF] * 2 + 1) * bas[l,NCTR_OF]
shls = (i, j, k, l)
buf = numpy.empty((di,dj,dk,dl), order='F')
intor(buf.ctypes.data_as(ctypes.c_void_p), (ctypes.c_int*4)(*shls),
c_atm, natm, c_bas, nbas, c_env, nullptr)
ref = by_pp(shls, (di,dj,dk,dl))
dd = abs(ref - buf).max()
if dd > 1e-6:
print "* FAIL: cint4c1e", " shell:", i, j, k, l, "err:", dd
return
print 'cint4c1e_sph pass'
test_int2c1e_sph()
test_int4c1e_sph() | en | 0.49638 | #!/usr/bin/env python # $Id$ # -*- coding: utf-8 test libcint # basis with kappa > 0 # basis with kappa < 0 #intor = _cint.cint4c1e_sph | 1.957984 | 2 |
polyglotjsonnlp/__init__.py | dcavar/Polyglot-JSON-NLP | 4 | 6631656 | #!/usr/bin/env python3
"""
(C) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Wrappers for Polyglot to JSON-NLP output format.
Licensed under the Apache License 2.0, see the file LICENSE for more details.
Brought to you by the NLP-Lab.org (https://nlp-lab.org/)!
"""
import functools
from collections import OrderedDict
from typing import Dict, Tuple
import polyglot
import pyjsonnlp
from polyglot.text import Text
from pyjsonnlp.pipeline import Pipeline
name = "polyglotjsonnlp"
__cache = {}
__version__ = "0.2.6"
def cache_it(func):
"""A decorator to cache function response based on params. Add it to top of function as @cache_it."""
global __cache
@functools.wraps(func)
def cached(*args):
f_name = func.__name__
s = ''.join(map(str, args))
if s not in __cache[f_name]:
__cache[f_name][s] = func(*args)
return __cache[f_name][s]
return cached
class PolyglotPipeline(Pipeline):
@staticmethod
def get_polyglot_sentences(text, neighbors, d, doc):
"""
Process a text using polyglot, returning language, named entities, pos tags, morphology, and optionally synonyms
:param text: The text to process
:param neighbors: Whether or not to include neighbors
"""
token_id = 1
token_lookup: Dict[Tuple[int, int], int] = {} # map (sent_id, polyglot token index) to our token index
for sent_num, sent in enumerate(doc.sentences):
current_sent = {
'id': str(sent_num),
'tokenFrom': token_id,
'tokenTo': token_id + len(sent), # begin inclusive, end exclusive
'tokens': []
}
#d['sentences'] = current_sent
#print(current_sent)
d['id'] = sent_num
d['text'] = str(sent)
d['sentences'][current_sent['id']] = current_sent
entities = {}
for ent in sent.entities:
for i in range(ent.start, ent.end):
entities[i] = ent.tag
tags = dict((i, tag[1]) for i, tag in enumerate(sent.pos_tags))
for token_idx, token in enumerate(sent.words):
token_lookup[(sent_num, token_idx)] = token_id
t = {
'id': token_id,
'text': token,
'upos': tags[token_idx],
'lang': token.language,
'morphemes': list(token.morphemes),
'labels': [{
'type': 'sentiment',
'label': str(token.polarity)
}],
'features': {
'Overt': True
}
}
# match wordnet format
if neighbors:
try:
s = {'neighbors': [w for w in token.neighbors]}
if len(s['neighbors']) > 0:
t['synsets'] = [s]
except KeyError:
pass # OOV words, e.g. contractions, will throw errors
# named entities
if token_idx in entities:
t['entity'] = entities[token_idx] # todo map to common entity types? e.g. No I-LOC, etc.
# check if this is the first or an internal token in an entity
t['entity_iob'] = 'B' if token_idx-1 not in entities or entities[token_idx] != entities[token_idx-1] else 'I'
else:
t['entity_iob'] = 'O'
current_sent['tokens'].append(token_id)
token_id += 1
d['tokenList'].append(t)
#d['tokenList'][t['id']] = t
# multi-word expressions
expression_id = 0
for ent in sent.entities:
if ent.end - ent.start > 1:
d['expressions'].append({
'id': expression_id,
'type': entities[ent.start],
'tokens': [token_lookup[(sent_num, t)] for t in range(ent.start, ent.end)]
})
expression_id += 1
@staticmethod
def get_nlp_json(text, neighbors) -> OrderedDict:
"""Process the Polyglot output into JSON"""
j: OrderedDict = pyjsonnlp.get_base()
j['DC.source'] = 'polyglot {}'.format(polyglot.__version__)
d : OrderedDict = pyjsonnlp.get_base_document(1)
#j['documents'] = get_base_document(text)
#d = j.get('documents')[len(j.get('documents'))-1]
#print(d['id'])
#j['documents'][d['id']] = d
j['documents'].append(d)
d['meta']['DC.source'] = 'polyglot {}'.format(polyglot.__version__)
doc = Text(text)
d['meta']['DC.language'] = doc.language.code
PolyglotPipeline.get_polyglot_sentences(text, neighbors, d, doc)
return pyjsonnlp.remove_empty_fields(j)
@staticmethod
def process(text: str, neighbors=False, coreferences=False, constituents=False, dependencies=False, expressions=False, **kwargs):
"""Process the text into JSON-NLP
:param **kwargs:
"""
return PolyglotPipeline.get_nlp_json(text, neighbors)
if __name__ == "__main__":
test_text = "The Mueller Report is a very long report. We spent a long time analyzing it. Trump wishes we didn't, but that didn't stop the intrepid NlpLab."
print(PolyglotPipeline.process(test_text, coreferences=True, constituents=False))
| #!/usr/bin/env python3
"""
(C) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Wrappers for Polyglot to JSON-NLP output format.
Licensed under the Apache License 2.0, see the file LICENSE for more details.
Brought to you by the NLP-Lab.org (https://nlp-lab.org/)!
"""
import functools
from collections import OrderedDict
from typing import Dict, Tuple
import polyglot
import pyjsonnlp
from polyglot.text import Text
from pyjsonnlp.pipeline import Pipeline
name = "polyglotjsonnlp"
__cache = {}
__version__ = "0.2.6"
def cache_it(func):
"""A decorator to cache function response based on params. Add it to top of function as @cache_it."""
global __cache
@functools.wraps(func)
def cached(*args):
f_name = func.__name__
s = ''.join(map(str, args))
if s not in __cache[f_name]:
__cache[f_name][s] = func(*args)
return __cache[f_name][s]
return cached
class PolyglotPipeline(Pipeline):
@staticmethod
def get_polyglot_sentences(text, neighbors, d, doc):
"""
Process a text using polyglot, returning language, named entities, pos tags, morphology, and optionally synonyms
:param text: The text to process
:param neighbors: Whether or not to include neighbors
"""
token_id = 1
token_lookup: Dict[Tuple[int, int], int] = {} # map (sent_id, polyglot token index) to our token index
for sent_num, sent in enumerate(doc.sentences):
current_sent = {
'id': str(sent_num),
'tokenFrom': token_id,
'tokenTo': token_id + len(sent), # begin inclusive, end exclusive
'tokens': []
}
#d['sentences'] = current_sent
#print(current_sent)
d['id'] = sent_num
d['text'] = str(sent)
d['sentences'][current_sent['id']] = current_sent
entities = {}
for ent in sent.entities:
for i in range(ent.start, ent.end):
entities[i] = ent.tag
tags = dict((i, tag[1]) for i, tag in enumerate(sent.pos_tags))
for token_idx, token in enumerate(sent.words):
token_lookup[(sent_num, token_idx)] = token_id
t = {
'id': token_id,
'text': token,
'upos': tags[token_idx],
'lang': token.language,
'morphemes': list(token.morphemes),
'labels': [{
'type': 'sentiment',
'label': str(token.polarity)
}],
'features': {
'Overt': True
}
}
# match wordnet format
if neighbors:
try:
s = {'neighbors': [w for w in token.neighbors]}
if len(s['neighbors']) > 0:
t['synsets'] = [s]
except KeyError:
pass # OOV words, e.g. contractions, will throw errors
# named entities
if token_idx in entities:
t['entity'] = entities[token_idx] # todo map to common entity types? e.g. No I-LOC, etc.
# check if this is the first or an internal token in an entity
t['entity_iob'] = 'B' if token_idx-1 not in entities or entities[token_idx] != entities[token_idx-1] else 'I'
else:
t['entity_iob'] = 'O'
current_sent['tokens'].append(token_id)
token_id += 1
d['tokenList'].append(t)
#d['tokenList'][t['id']] = t
# multi-word expressions
expression_id = 0
for ent in sent.entities:
if ent.end - ent.start > 1:
d['expressions'].append({
'id': expression_id,
'type': entities[ent.start],
'tokens': [token_lookup[(sent_num, t)] for t in range(ent.start, ent.end)]
})
expression_id += 1
@staticmethod
def get_nlp_json(text, neighbors) -> OrderedDict:
"""Process the Polyglot output into JSON"""
j: OrderedDict = pyjsonnlp.get_base()
j['DC.source'] = 'polyglot {}'.format(polyglot.__version__)
d : OrderedDict = pyjsonnlp.get_base_document(1)
#j['documents'] = get_base_document(text)
#d = j.get('documents')[len(j.get('documents'))-1]
#print(d['id'])
#j['documents'][d['id']] = d
j['documents'].append(d)
d['meta']['DC.source'] = 'polyglot {}'.format(polyglot.__version__)
doc = Text(text)
d['meta']['DC.language'] = doc.language.code
PolyglotPipeline.get_polyglot_sentences(text, neighbors, d, doc)
return pyjsonnlp.remove_empty_fields(j)
@staticmethod
def process(text: str, neighbors=False, coreferences=False, constituents=False, dependencies=False, expressions=False, **kwargs):
"""Process the text into JSON-NLP
:param **kwargs:
"""
return PolyglotPipeline.get_nlp_json(text, neighbors)
if __name__ == "__main__":
test_text = "The Mueller Report is a very long report. We spent a long time analyzing it. Trump wishes we didn't, but that didn't stop the intrepid NlpLab."
print(PolyglotPipeline.process(test_text, coreferences=True, constituents=False))
| en | 0.702827 | #!/usr/bin/env python3 (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> Wrappers for Polyglot to JSON-NLP output format. Licensed under the Apache License 2.0, see the file LICENSE for more details. Brought to you by the NLP-Lab.org (https://nlp-lab.org/)! A decorator to cache function response based on params. Add it to top of function as @cache_it. Process a text using polyglot, returning language, named entities, pos tags, morphology, and optionally synonyms :param text: The text to process :param neighbors: Whether or not to include neighbors # map (sent_id, polyglot token index) to our token index # begin inclusive, end exclusive #d['sentences'] = current_sent #print(current_sent) # match wordnet format # OOV words, e.g. contractions, will throw errors # named entities # todo map to common entity types? e.g. No I-LOC, etc. # check if this is the first or an internal token in an entity #d['tokenList'][t['id']] = t # multi-word expressions Process the Polyglot output into JSON #j['documents'] = get_base_document(text) #d = j.get('documents')[len(j.get('documents'))-1] #print(d['id']) #j['documents'][d['id']] = d Process the text into JSON-NLP :param **kwargs: | 2.729683 | 3 |
online_recommend/user_portrait/user_tohive.py | hfhfn/db_recommend | 0 | 6631657 | from user_portrait.merge_action import get_merge_action
from user_portrait.pre_user import merge_user_data, get_pre_table, pre_user_data
from user_portrait.user_profile import get_action_topic_sort, get_action_weight, get_action_topic_weight, \
get_user_profile, normal_action_weight
from utils.default import user_pre_db, user_portrait_db, u_spark, uf_topK
from utils.save_tohive import RetToHive
class SaveUserProfile(object):
pre_db = user_pre_db
portrait_db = user_portrait_db
spark_app = u_spark
topK = uf_topK
def save_pre_userdata(self, table_type, table_num, start_num=0):
"""
保存用户点击数据
3873, 15328, 69706, 48092, 54080, 73413, 92161, 115959, 164695, 119800, 131214, 133817, 156526
156044, 168976, 209320, 247980, 275296, 271935, 318472, 369976, 394286
保存用户收藏数据
50, 357, 1748, 666, 893, 1247, 1307, 1650, 2095, 1544, 1691, 1835, 1940
2153, 2093, 3477, 3027, 4424, 3232, 5411, 4346, 4410
保存用户播放数据
7 -> 2886, 8-> 82663, 9-> 104271, 10-> 139787, 11-> 204070, 12-> 276867, 13-> 346466, 14-> 477115
15-> 580029, 16-> 733859, 17-> 878207, 18-> 964412, 19-> 1293973, 20-> 1989563, 21-> 2385531
:return:
"""
# table_type = 'click'
# table_type = 'top'
# table_type = 'play'
i = start_num
# tmp = []
while i < table_num: # 修改 i 初始值和while循环的结束值来控制循环表的数量
pre_table = get_pre_table(self.spark_app, table_type, i)
try:
ret = pre_user_data(self.spark_app, table_type, pre_table)
# count = ret.count()
# if count == 0:
# print("=" * 50 + table_type + '第 {} 个表处理后没有数据'.format(i) + "=" * 50)
# i += 1
# continue
table = 'user_{}_{}'.format(table_type, i)
try:
RetToHive(self.spark_app, ret, self.pre_db, table)
except:
print("=" * 50 + table_type + '第 {} 个表处理后没有数据'.format(i) + "=" * 50)
except:
print("此数据表处理出现错误,直接跳过")
i += 1
def save_merge_userdata(self, table_type, table_num, start_num=0):
"""
保存 点击数据总表 1月10号以前3590949 8356704
保存 收藏数据总表 1月10号以前49596 114538
保存 播放数据总表 1月10号以前6192026 16995825
:return:
"""
# table_type = 'click'
# table_type = 'top'
# table_type = 'play'
merge_ret = merge_user_data(self.spark_app, table_type, table_num, start_num)
merge_table = 'merge_{}'.format(table_type)
RetToHive(self.spark_app, merge_ret, self.pre_db, merge_table)
import gc
del merge_ret
gc.collect()
def save_merge_action(self, start_time=0):
"""
保存 所有行为数据表 25467028
:return:
"""
merge_action_ret = get_merge_action(self.pre_db, start_time)
merge_action_table = 'merge_action'
RetToHive(self.spark_app, merge_action_ret, self.pre_db, merge_action_table)
import gc
del merge_action_ret
gc.collect()
def save_action_weight(self):
"""
保存 用户对电影的行为权重 3397006
:return:
"""
action_weight_ret = get_action_weight(self.pre_db)
action_weight_table = 'action_weight'
RetToHive(self.spark_app, action_weight_ret, self.portrait_db, action_weight_table)
import gc
del action_weight_ret
gc.collect()
def save_action_weight_normal(self):
"""
保存 归一化用户对电影的行为权重
:return:
"""
action_weight_ret = normal_action_weight(self.portrait_db)
action_weight_table = 'action_weight_normal'
RetToHive(self.spark_app, action_weight_ret, self.portrait_db, action_weight_table)
import gc
del action_weight_ret
gc.collect()
def save_action_topic_weight(self):
"""
保存 行为+主题词+权重数据表 79722719 统计有用户行为的电影数量: 旧数据(18501)
:return:
"""
action_topic_ret = get_action_topic_weight(self.portrait_db)
action_topic_table = 'action_topic_weight'
RetToHive(self.spark_app, action_topic_ret, self.portrait_db, action_topic_table)
import gc
del action_topic_ret
gc.collect()
def save_action_topic_sort(self):
"""
保存 行为+主题词权重排序数据表 58960371
用户电影类别画像词:旧数据 5640599 统计有用户行为的电影数量: 旧数据 18501
用户平均画像词数量: 旧数据 5640599/18501 = 305
看过电影类别的用户数量: 旧数据 44206
:return:
"""
action_topic_sort_ret = get_action_topic_sort(self.portrait_db)
action_topic_sort_table = 'action_topic_sort'
RetToHive(self.spark_app, action_topic_sort_ret, self.portrait_db, action_topic_sort_table)
import gc
del action_topic_sort_ret
gc.collect()
def save_user_profile(self):
"""
用户画像 topK: 11361313
:return:
"""
user_profile_ret = get_user_profile(self.portrait_db, self.topK)
user_profile_table = 'user_profile'
RetToHive(self.spark_app, user_profile_ret, self.portrait_db, user_profile_table)
import gc
del user_profile_ret
gc.collect()
if __name__ == '__main__':
pass
| from user_portrait.merge_action import get_merge_action
from user_portrait.pre_user import merge_user_data, get_pre_table, pre_user_data
from user_portrait.user_profile import get_action_topic_sort, get_action_weight, get_action_topic_weight, \
get_user_profile, normal_action_weight
from utils.default import user_pre_db, user_portrait_db, u_spark, uf_topK
from utils.save_tohive import RetToHive
class SaveUserProfile(object):
pre_db = user_pre_db
portrait_db = user_portrait_db
spark_app = u_spark
topK = uf_topK
def save_pre_userdata(self, table_type, table_num, start_num=0):
"""
保存用户点击数据
3873, 15328, 69706, 48092, 54080, 73413, 92161, 115959, 164695, 119800, 131214, 133817, 156526
156044, 168976, 209320, 247980, 275296, 271935, 318472, 369976, 394286
保存用户收藏数据
50, 357, 1748, 666, 893, 1247, 1307, 1650, 2095, 1544, 1691, 1835, 1940
2153, 2093, 3477, 3027, 4424, 3232, 5411, 4346, 4410
保存用户播放数据
7 -> 2886, 8-> 82663, 9-> 104271, 10-> 139787, 11-> 204070, 12-> 276867, 13-> 346466, 14-> 477115
15-> 580029, 16-> 733859, 17-> 878207, 18-> 964412, 19-> 1293973, 20-> 1989563, 21-> 2385531
:return:
"""
# table_type = 'click'
# table_type = 'top'
# table_type = 'play'
i = start_num
# tmp = []
while i < table_num: # 修改 i 初始值和while循环的结束值来控制循环表的数量
pre_table = get_pre_table(self.spark_app, table_type, i)
try:
ret = pre_user_data(self.spark_app, table_type, pre_table)
# count = ret.count()
# if count == 0:
# print("=" * 50 + table_type + '第 {} 个表处理后没有数据'.format(i) + "=" * 50)
# i += 1
# continue
table = 'user_{}_{}'.format(table_type, i)
try:
RetToHive(self.spark_app, ret, self.pre_db, table)
except:
print("=" * 50 + table_type + '第 {} 个表处理后没有数据'.format(i) + "=" * 50)
except:
print("此数据表处理出现错误,直接跳过")
i += 1
def save_merge_userdata(self, table_type, table_num, start_num=0):
"""
保存 点击数据总表 1月10号以前3590949 8356704
保存 收藏数据总表 1月10号以前49596 114538
保存 播放数据总表 1月10号以前6192026 16995825
:return:
"""
# table_type = 'click'
# table_type = 'top'
# table_type = 'play'
merge_ret = merge_user_data(self.spark_app, table_type, table_num, start_num)
merge_table = 'merge_{}'.format(table_type)
RetToHive(self.spark_app, merge_ret, self.pre_db, merge_table)
import gc
del merge_ret
gc.collect()
def save_merge_action(self, start_time=0):
"""
保存 所有行为数据表 25467028
:return:
"""
merge_action_ret = get_merge_action(self.pre_db, start_time)
merge_action_table = 'merge_action'
RetToHive(self.spark_app, merge_action_ret, self.pre_db, merge_action_table)
import gc
del merge_action_ret
gc.collect()
def save_action_weight(self):
"""
保存 用户对电影的行为权重 3397006
:return:
"""
action_weight_ret = get_action_weight(self.pre_db)
action_weight_table = 'action_weight'
RetToHive(self.spark_app, action_weight_ret, self.portrait_db, action_weight_table)
import gc
del action_weight_ret
gc.collect()
def save_action_weight_normal(self):
"""
保存 归一化用户对电影的行为权重
:return:
"""
action_weight_ret = normal_action_weight(self.portrait_db)
action_weight_table = 'action_weight_normal'
RetToHive(self.spark_app, action_weight_ret, self.portrait_db, action_weight_table)
import gc
del action_weight_ret
gc.collect()
def save_action_topic_weight(self):
"""
保存 行为+主题词+权重数据表 79722719 统计有用户行为的电影数量: 旧数据(18501)
:return:
"""
action_topic_ret = get_action_topic_weight(self.portrait_db)
action_topic_table = 'action_topic_weight'
RetToHive(self.spark_app, action_topic_ret, self.portrait_db, action_topic_table)
import gc
del action_topic_ret
gc.collect()
def save_action_topic_sort(self):
"""
保存 行为+主题词权重排序数据表 58960371
用户电影类别画像词:旧数据 5640599 统计有用户行为的电影数量: 旧数据 18501
用户平均画像词数量: 旧数据 5640599/18501 = 305
看过电影类别的用户数量: 旧数据 44206
:return:
"""
action_topic_sort_ret = get_action_topic_sort(self.portrait_db)
action_topic_sort_table = 'action_topic_sort'
RetToHive(self.spark_app, action_topic_sort_ret, self.portrait_db, action_topic_sort_table)
import gc
del action_topic_sort_ret
gc.collect()
def save_user_profile(self):
"""
用户画像 topK: 11361313
:return:
"""
user_profile_ret = get_user_profile(self.portrait_db, self.topK)
user_profile_table = 'user_profile'
RetToHive(self.spark_app, user_profile_ret, self.portrait_db, user_profile_table)
import gc
del user_profile_ret
gc.collect()
if __name__ == '__main__':
pass
| zh | 0.586154 | 保存用户点击数据 3873, 15328, 69706, 48092, 54080, 73413, 92161, 115959, 164695, 119800, 131214, 133817, 156526 156044, 168976, 209320, 247980, 275296, 271935, 318472, 369976, 394286 保存用户收藏数据 50, 357, 1748, 666, 893, 1247, 1307, 1650, 2095, 1544, 1691, 1835, 1940 2153, 2093, 3477, 3027, 4424, 3232, 5411, 4346, 4410 保存用户播放数据 7 -> 2886, 8-> 82663, 9-> 104271, 10-> 139787, 11-> 204070, 12-> 276867, 13-> 346466, 14-> 477115 15-> 580029, 16-> 733859, 17-> 878207, 18-> 964412, 19-> 1293973, 20-> 1989563, 21-> 2385531 :return: # table_type = 'click' # table_type = 'top' # table_type = 'play' # tmp = [] # 修改 i 初始值和while循环的结束值来控制循环表的数量 # count = ret.count() # if count == 0: # print("=" * 50 + table_type + '第 {} 个表处理后没有数据'.format(i) + "=" * 50) # i += 1 # continue 保存 点击数据总表 1月10号以前3590949 8356704 保存 收藏数据总表 1月10号以前49596 114538 保存 播放数据总表 1月10号以前6192026 16995825 :return: # table_type = 'click' # table_type = 'top' # table_type = 'play' 保存 所有行为数据表 25467028 :return: 保存 用户对电影的行为权重 3397006 :return: 保存 归一化用户对电影的行为权重 :return: 保存 行为+主题词+权重数据表 79722719 统计有用户行为的电影数量: 旧数据(18501) :return: 保存 行为+主题词权重排序数据表 58960371 用户电影类别画像词:旧数据 5640599 统计有用户行为的电影数量: 旧数据 18501 用户平均画像词数量: 旧数据 5640599/18501 = 305 看过电影类别的用户数量: 旧数据 44206 :return: 用户画像 topK: 11361313 :return: | 1.984988 | 2 |
rebuild.py | vazrupe/fake-og-server | 0 | 6631658 | <filename>rebuild.py
import re
from urllib.request import urlopen
from urllib.parse import urljoin
from bs4 import BeautifulSoup
open_graph_rgx = re.compile('^og:(?P<meta>.+)')
html_template = '''
<html>
<head>
{og_tags}
<title>{title}</title>
<meta http-equiv="refresh" content="0; {target_url}" />
</head>
</html>
'''
open_graph_template = '<meta property="og:{metadata}" content="{content}" />'
def rebuild_page(source_url, target_url):
source_open_graph_data = ogp_get(source_url)
if source_open_graph_data is None:
return None
og_meta_tags = []
for metadata, content in source_open_graph_data['og'].items():
og_tag = open_graph_template.format(
metadata=metadata,
content=content
)
og_meta_tags.append(og_tag)
og_tags_str = '\n '.join(og_meta_tags)
page_title = source_open_graph_data.get('page_title', '')
return html_template.format(
title=page_title,
og_tags=og_tags_str,
target_url=target_url
)
def ogp_get(url, checked_url=None):
result = {
'og': {}
}
page_html = safe_get_url(url)
if page_html is None:
return None
soup = BeautifulSoup(page_html, 'lxml')
if soup.title:
result['page_title'] = soup.title.string
ogps = soup.find_all('meta', property=open_graph_rgx)
for ogp in ogps:
content = ogp.get('content')
ogp_type = ogp.get('property')
if content and ogp_type:
match = open_graph_rgx.match(ogp_type)
if match:
meta = match.group('meta')
result['og'][meta] = content
frames = soup.find_all('frame')
iframes = soup.find_all('iframe')
checked_url = set([url]) if checked_url is None else checked_url
in_pages = frames + iframes
for in_page in in_pages:
in_page_uri = in_page.get('src')
if in_page_uri is None:
continue
in_page_url = urljoin(url, in_page_uri)
if in_page_url in checked_url:
continue
checked_url.add(in_page_url)
sub_result = ogp_get(in_page_url, checked_url)
if sub_result:
not_set_page_title = 'page_title' not in result
if not_set_page_title and 'page_title' in sub_result:
result['page_title'] = sub_result['page_title']
update_og = {
k: v
for k, v in sub_result['og'].items()
if k not in result['og'].keys()
}
result['og'].update(update_og)
return result
def safe_get_url(url):
try:
url = urlopen(url)
return url.read()
except:
pass
return None
if __name__ == '__main__':
rebuilding_page = rebuild_page('SOURCE_URL', 'TARGET_URL')
print(rebuilding_page)
| <filename>rebuild.py
import re
from urllib.request import urlopen
from urllib.parse import urljoin
from bs4 import BeautifulSoup
open_graph_rgx = re.compile('^og:(?P<meta>.+)')
html_template = '''
<html>
<head>
{og_tags}
<title>{title}</title>
<meta http-equiv="refresh" content="0; {target_url}" />
</head>
</html>
'''
open_graph_template = '<meta property="og:{metadata}" content="{content}" />'
def rebuild_page(source_url, target_url):
source_open_graph_data = ogp_get(source_url)
if source_open_graph_data is None:
return None
og_meta_tags = []
for metadata, content in source_open_graph_data['og'].items():
og_tag = open_graph_template.format(
metadata=metadata,
content=content
)
og_meta_tags.append(og_tag)
og_tags_str = '\n '.join(og_meta_tags)
page_title = source_open_graph_data.get('page_title', '')
return html_template.format(
title=page_title,
og_tags=og_tags_str,
target_url=target_url
)
def ogp_get(url, checked_url=None):
result = {
'og': {}
}
page_html = safe_get_url(url)
if page_html is None:
return None
soup = BeautifulSoup(page_html, 'lxml')
if soup.title:
result['page_title'] = soup.title.string
ogps = soup.find_all('meta', property=open_graph_rgx)
for ogp in ogps:
content = ogp.get('content')
ogp_type = ogp.get('property')
if content and ogp_type:
match = open_graph_rgx.match(ogp_type)
if match:
meta = match.group('meta')
result['og'][meta] = content
frames = soup.find_all('frame')
iframes = soup.find_all('iframe')
checked_url = set([url]) if checked_url is None else checked_url
in_pages = frames + iframes
for in_page in in_pages:
in_page_uri = in_page.get('src')
if in_page_uri is None:
continue
in_page_url = urljoin(url, in_page_uri)
if in_page_url in checked_url:
continue
checked_url.add(in_page_url)
sub_result = ogp_get(in_page_url, checked_url)
if sub_result:
not_set_page_title = 'page_title' not in result
if not_set_page_title and 'page_title' in sub_result:
result['page_title'] = sub_result['page_title']
update_og = {
k: v
for k, v in sub_result['og'].items()
if k not in result['og'].keys()
}
result['og'].update(update_og)
return result
def safe_get_url(url):
try:
url = urlopen(url)
return url.read()
except:
pass
return None
if __name__ == '__main__':
rebuilding_page = rebuild_page('SOURCE_URL', 'TARGET_URL')
print(rebuilding_page)
| en | 0.14399 | <html> <head> {og_tags} <title>{title}</title> <meta http-equiv="refresh" content="0; {target_url}" /> </head> </html> | 2.806633 | 3 |
credoscript/models/groove.py | tlb-lab/credoscript | 0 | 6631659 | from sqlalchemy.orm import backref, relationship
from credoscript import Base, schema
from credoscript.mixins import PathMixin
class Groove(Base, PathMixin):
"""
Class representing a Groove entity from CREDO. A groove is a binary interaction
between a polypeptide and a oligonucleotide chain (DNA/RNA/Hybrid).
Attributes
----------
Mapped Attributes
-----------------
"""
__tablename__ = '%s.grooves' % schema['credo']
ChainProt = relationship("Chain",
primaryjoin="Chain.chain_id==Groove.chain_prot_id",
foreign_keys="[Chain.chain_id]", uselist=False, innerjoin=True)
ChainNuc = relationship("Chain",
primaryjoin="Chain.chain_id==Groove.chain_nuc_id",
foreign_keys="[Chain.chain_id]", uselist=False, innerjoin=True)
Peptides = relationship("Peptide",
secondary=Base.metadata.tables['%s.groove_residue_pairs' % schema['credo']],
primaryjoin="Groove.groove_id==GrooveResiduePair.groove_id",
secondaryjoin="GrooveResiduePair.residue_prot_id==Peptide.residue_id",
foreign_keys="[GrooveResiduePair.groove_id, Peptide.residue_id]",
uselist=True, innerjoin=True, lazy='dynamic')
def __repr__(self):
return '<Groove({self.path})>'.format(self=self)
@property
def Contacts(self):
"""
Returns all the Contacts that are formed between the two Chains of this
Interface.
Parameters
----------
*expressions : BinaryExpressions, optional
SQLAlchemy BinaryExpressions that will be used to filter the query.
Queried Entities
----------------
Contact, AtomBgn (Atom), AtomEnd (Atom), ResidueBgn (Residue),
ResidueEnd (Residue), Interface
Returns
-------
contacts : list
Contacts that are formed between the two Chains of this Interface.
Examples
--------
"""
adaptor = ContactAdaptor(dynamic=True)
return adaptor.fetch_all_by_groove_id(self.groove_id, self.biomolecule_id)
class GrooveResiduePair(Base):
"""
"""
__tablename__ = '%s.groove_residue_pairs' % schema['credo']
Groove = relationship("Groove",
primaryjoin="GrooveResiduePair.groove_id==Groove.groove_id",
foreign_keys="[Groove.groove_id]", uselist=False,
innerjoin=True,
backref=backref('GrooveResiduePairs', uselist=True,
lazy='dynamic', innerjoin=True))
from ..adaptors.contactadaptor import ContactAdaptor
| from sqlalchemy.orm import backref, relationship
from credoscript import Base, schema
from credoscript.mixins import PathMixin
class Groove(Base, PathMixin):
"""
Class representing a Groove entity from CREDO. A groove is a binary interaction
between a polypeptide and a oligonucleotide chain (DNA/RNA/Hybrid).
Attributes
----------
Mapped Attributes
-----------------
"""
__tablename__ = '%s.grooves' % schema['credo']
ChainProt = relationship("Chain",
primaryjoin="Chain.chain_id==Groove.chain_prot_id",
foreign_keys="[Chain.chain_id]", uselist=False, innerjoin=True)
ChainNuc = relationship("Chain",
primaryjoin="Chain.chain_id==Groove.chain_nuc_id",
foreign_keys="[Chain.chain_id]", uselist=False, innerjoin=True)
Peptides = relationship("Peptide",
secondary=Base.metadata.tables['%s.groove_residue_pairs' % schema['credo']],
primaryjoin="Groove.groove_id==GrooveResiduePair.groove_id",
secondaryjoin="GrooveResiduePair.residue_prot_id==Peptide.residue_id",
foreign_keys="[GrooveResiduePair.groove_id, Peptide.residue_id]",
uselist=True, innerjoin=True, lazy='dynamic')
def __repr__(self):
return '<Groove({self.path})>'.format(self=self)
@property
def Contacts(self):
"""
Returns all the Contacts that are formed between the two Chains of this
Interface.
Parameters
----------
*expressions : BinaryExpressions, optional
SQLAlchemy BinaryExpressions that will be used to filter the query.
Queried Entities
----------------
Contact, AtomBgn (Atom), AtomEnd (Atom), ResidueBgn (Residue),
ResidueEnd (Residue), Interface
Returns
-------
contacts : list
Contacts that are formed between the two Chains of this Interface.
Examples
--------
"""
adaptor = ContactAdaptor(dynamic=True)
return adaptor.fetch_all_by_groove_id(self.groove_id, self.biomolecule_id)
class GrooveResiduePair(Base):
"""
"""
__tablename__ = '%s.groove_residue_pairs' % schema['credo']
Groove = relationship("Groove",
primaryjoin="GrooveResiduePair.groove_id==Groove.groove_id",
foreign_keys="[Groove.groove_id]", uselist=False,
innerjoin=True,
backref=backref('GrooveResiduePairs', uselist=True,
lazy='dynamic', innerjoin=True))
from ..adaptors.contactadaptor import ContactAdaptor
| en | 0.743052 | Class representing a Groove entity from CREDO. A groove is a binary interaction between a polypeptide and a oligonucleotide chain (DNA/RNA/Hybrid). Attributes ---------- Mapped Attributes ----------------- Returns all the Contacts that are formed between the two Chains of this Interface. Parameters ---------- *expressions : BinaryExpressions, optional SQLAlchemy BinaryExpressions that will be used to filter the query. Queried Entities ---------------- Contact, AtomBgn (Atom), AtomEnd (Atom), ResidueBgn (Residue), ResidueEnd (Residue), Interface Returns ------- contacts : list Contacts that are formed between the two Chains of this Interface. Examples -------- | 2.662284 | 3 |
api/endpoints/projects/languageProject.py | mshen63/DevUp | 2 | 6631660 | <filename>api/endpoints/projects/languageProject.py
from flask import Blueprint, jsonify, request
from models.models import db, ProjectModel, RelProjectLanguage, LanguageModel
languageProject_api = Blueprint("languageProject_api", __name__)
@languageProject_api.route("/addProjectLanguage", methods=("POST",))
def addProjectLanguage():
body = request.get_json()
language = str(body["language"])
projectId = str(body["projectId"])
error = None
if not language or not projectId:
error = "Missing Data"
if LanguageModel.query.filter_by(name=language).first() is None:
error = f"Language {language} does not exist"
if ProjectModel.query.filter_by(id=projectId).first() is None:
error = f"Project with id {projectId} does not exist"
if (
RelProjectLanguage.query.filter_by(
language=language, projectId=projectId
).first()
is not None
):
error = f"Language {language} already is in the project with id {projectId}"
if error is None:
add_language = RelProjectLanguage(language, projectId)
db.session.add(add_language)
db.session.commit()
message = (
f"Language {language} added to the project with id {projectId} successfully"
)
return jsonify({"status": "ok", "message": message}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
@languageProject_api.route("/getProjectLanguages", methods=("POST",))
def getProjectLanguages():
body = request.get_json()
projectId = str(body["projectId"])
error = None
if not projectId:
error = "Missing Data"
if error is None:
response = RelProjectLanguage.query.filter_by(projectId=projectId).all()
languages = []
for item in response:
languages.append({"language": item.language})
return jsonify({"languages": languages}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
@languageProject_api.route("/deleteProjectLanguage", methods=("DELETE",))
def deleteProjectLanguage():
body = request.get_json()
projectId = str(body["projectId"])
language = str(body["language"])
error = None
if not projectId:
error = "Missing Data"
if (
RelProjectLanguage.query.filter_by(
language=language, projectId=projectId
).first()
is None
):
error = f"Language not in project with id {projectId}"
if error is None:
RelProjectLanguage.query.filter_by(
language=language, projectId=projectId
).delete()
db.session.commit()
message = f"Language {language} removed from the project with id {projectId}"
return jsonify({"status": "ok", "message": message}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
| <filename>api/endpoints/projects/languageProject.py
from flask import Blueprint, jsonify, request
from models.models import db, ProjectModel, RelProjectLanguage, LanguageModel
languageProject_api = Blueprint("languageProject_api", __name__)
@languageProject_api.route("/addProjectLanguage", methods=("POST",))
def addProjectLanguage():
body = request.get_json()
language = str(body["language"])
projectId = str(body["projectId"])
error = None
if not language or not projectId:
error = "Missing Data"
if LanguageModel.query.filter_by(name=language).first() is None:
error = f"Language {language} does not exist"
if ProjectModel.query.filter_by(id=projectId).first() is None:
error = f"Project with id {projectId} does not exist"
if (
RelProjectLanguage.query.filter_by(
language=language, projectId=projectId
).first()
is not None
):
error = f"Language {language} already is in the project with id {projectId}"
if error is None:
add_language = RelProjectLanguage(language, projectId)
db.session.add(add_language)
db.session.commit()
message = (
f"Language {language} added to the project with id {projectId} successfully"
)
return jsonify({"status": "ok", "message": message}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
@languageProject_api.route("/getProjectLanguages", methods=("POST",))
def getProjectLanguages():
body = request.get_json()
projectId = str(body["projectId"])
error = None
if not projectId:
error = "Missing Data"
if error is None:
response = RelProjectLanguage.query.filter_by(projectId=projectId).all()
languages = []
for item in response:
languages.append({"language": item.language})
return jsonify({"languages": languages}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
@languageProject_api.route("/deleteProjectLanguage", methods=("DELETE",))
def deleteProjectLanguage():
body = request.get_json()
projectId = str(body["projectId"])
language = str(body["language"])
error = None
if not projectId:
error = "Missing Data"
if (
RelProjectLanguage.query.filter_by(
language=language, projectId=projectId
).first()
is None
):
error = f"Language not in project with id {projectId}"
if error is None:
RelProjectLanguage.query.filter_by(
language=language, projectId=projectId
).delete()
db.session.commit()
message = f"Language {language} removed from the project with id {projectId}"
return jsonify({"status": "ok", "message": message}), 200
else:
return jsonify({"status": "bad", "error": error}), 400
| none | 1 | 2.88957 | 3 |
|
asset_name_versioning.py | uom-daris/mflux-python-utilities | 0 | 6631661 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 10:00:41 2017
@author: jwinton
"""
import mfclient
import mf_connect
from xml.sax.saxutils import escape
"""
A python module that renames mediaflux assets and copies the name metadata to a versioned metadata chunk
"""
def set_mf_name(namespace):
# Create a mediaflux connection
cxn = mf_connect.connect()
w = mfclient.XmlStringWriter('args')
w.add("where","namespace>="+namespace+" and mf-name hasno value"
"")
w.add("size","infinity")
# mfcommand = "asset.query :where namespace>="+namespace+" and (xpath(asset/name) contians"+assetContains+")"
# print w.doc_text()
r = cxn.execute("asset.query",w.doc_text())
# print r
for a in r.values("id"):
# print a
nameq = mfclient.XmlStringWriter('args')
nameq.add("where","id="+a)
nameq.add("action","get-name")
# assetname = ""
name = cxn.execute("asset.query", nameq.doc_text())
if isinstance(name.value('name'), unicode):
print "skip " + name.value('name')
else:
assetname = name.value("name")
assetname = escape(assetname)
print name.value("name")
nameset = mfclient.XmlStringWriter('args')
nameset.add("id",a)
nameset.push("meta")
nameset.push("mf-name")
# nameset.add("name",name.value("name"))
nameset.add("name", assetname)
# print nameset.doc_text()
cxn.execute("asset.set",nameset.doc_text())
ns = "/projects/proj-MELU-1128.4.29"
# ns = "/projects/proj-demonstration-1128.4.15/audio"
set_mf_name(ns) | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 10:00:41 2017
@author: jwinton
"""
import mfclient
import mf_connect
from xml.sax.saxutils import escape
"""
A python module that renames mediaflux assets and copies the name metadata to a versioned metadata chunk
"""
def set_mf_name(namespace):
# Create a mediaflux connection
cxn = mf_connect.connect()
w = mfclient.XmlStringWriter('args')
w.add("where","namespace>="+namespace+" and mf-name hasno value"
"")
w.add("size","infinity")
# mfcommand = "asset.query :where namespace>="+namespace+" and (xpath(asset/name) contians"+assetContains+")"
# print w.doc_text()
r = cxn.execute("asset.query",w.doc_text())
# print r
for a in r.values("id"):
# print a
nameq = mfclient.XmlStringWriter('args')
nameq.add("where","id="+a)
nameq.add("action","get-name")
# assetname = ""
name = cxn.execute("asset.query", nameq.doc_text())
if isinstance(name.value('name'), unicode):
print "skip " + name.value('name')
else:
assetname = name.value("name")
assetname = escape(assetname)
print name.value("name")
nameset = mfclient.XmlStringWriter('args')
nameset.add("id",a)
nameset.push("meta")
nameset.push("mf-name")
# nameset.add("name",name.value("name"))
nameset.add("name", assetname)
# print nameset.doc_text()
cxn.execute("asset.set",nameset.doc_text())
ns = "/projects/proj-MELU-1128.4.29"
# ns = "/projects/proj-demonstration-1128.4.15/audio"
set_mf_name(ns) | en | 0.514374 | #!/usr/bin/env python2 # -*- coding: utf-8 -*- Created on Fri Jan 27 10:00:41 2017 @author: jwinton A python module that renames mediaflux assets and copies the name metadata to a versioned metadata chunk # Create a mediaflux connection # mfcommand = "asset.query :where namespace>="+namespace+" and (xpath(asset/name) contians"+assetContains+")" # print w.doc_text() # print r # print a # assetname = "" # nameset.add("name",name.value("name")) # print nameset.doc_text() # ns = "/projects/proj-demonstration-1128.4.15/audio" | 2.135174 | 2 |
isi_sdk_8_2_0/isi_sdk_8_2_0/models/cloud_settings_settings_cloud_policy_defaults.py | mohitjain97/isilon_sdk_python | 24 | 6631662 | <reponame>mohitjain97/isilon_sdk_python
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_0.models.cloud_settings_settings_cloud_policy_defaults_cache import CloudSettingsSettingsCloudPolicyDefaultsCache # noqa: F401,E501
class CloudSettingsSettingsCloudPolicyDefaults(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'archive_snapshot_files': 'bool',
'cache': 'CloudSettingsSettingsCloudPolicyDefaultsCache',
'compression': 'bool',
'data_retention': 'int',
'encryption': 'bool',
'full_backup_retention': 'int',
'incremental_backup_retention': 'int',
'writeback_frequency': 'int'
}
attribute_map = {
'archive_snapshot_files': 'archive_snapshot_files',
'cache': 'cache',
'compression': 'compression',
'data_retention': 'data_retention',
'encryption': 'encryption',
'full_backup_retention': 'full_backup_retention',
'incremental_backup_retention': 'incremental_backup_retention',
'writeback_frequency': 'writeback_frequency'
}
def __init__(self, archive_snapshot_files=None, cache=None, compression=None, data_retention=None, encryption=None, full_backup_retention=None, incremental_backup_retention=None, writeback_frequency=None): # noqa: E501
"""CloudSettingsSettingsCloudPolicyDefaults - a model defined in Swagger""" # noqa: E501
self._archive_snapshot_files = None
self._cache = None
self._compression = None
self._data_retention = None
self._encryption = None
self._full_backup_retention = None
self._incremental_backup_retention = None
self._writeback_frequency = None
self.discriminator = None
if archive_snapshot_files is not None:
self.archive_snapshot_files = archive_snapshot_files
if cache is not None:
self.cache = cache
if compression is not None:
self.compression = compression
if data_retention is not None:
self.data_retention = data_retention
if encryption is not None:
self.encryption = encryption
if full_backup_retention is not None:
self.full_backup_retention = full_backup_retention
if incremental_backup_retention is not None:
self.incremental_backup_retention = incremental_backup_retention
if writeback_frequency is not None:
self.writeback_frequency = writeback_frequency
@property
def archive_snapshot_files(self):
"""Gets the archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies if files with snapshots should be archived. # noqa: E501
:return: The archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: bool
"""
return self._archive_snapshot_files
@archive_snapshot_files.setter
def archive_snapshot_files(self, archive_snapshot_files):
"""Sets the archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies if files with snapshots should be archived. # noqa: E501
:param archive_snapshot_files: The archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: bool
"""
self._archive_snapshot_files = archive_snapshot_files
@property
def cache(self):
"""Gets the cache of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies default cloudpool cache settings for new filepool policies. # noqa: E501
:return: The cache of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: CloudSettingsSettingsCloudPolicyDefaultsCache
"""
return self._cache
@cache.setter
def cache(self, cache):
"""Sets the cache of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies default cloudpool cache settings for new filepool policies. # noqa: E501
:param cache: The cache of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: CloudSettingsSettingsCloudPolicyDefaultsCache
"""
self._cache = cache
@property
def compression(self):
"""Gets the compression of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies if files should be compressed. # noqa: E501
:return: The compression of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: bool
"""
return self._compression
@compression.setter
def compression(self, compression):
"""Sets the compression of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies if files should be compressed. # noqa: E501
:param compression: The compression of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: bool
"""
self._compression = compression
@property
def data_retention(self):
"""Gets the data_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies the minimum amount of time archived data will be retained in the cloud after deletion. # noqa: E501
:return: The data_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: int
"""
return self._data_retention
@data_retention.setter
def data_retention(self, data_retention):
"""Sets the data_retention of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies the minimum amount of time archived data will be retained in the cloud after deletion. # noqa: E501
:param data_retention: The data_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: int
"""
self._data_retention = data_retention
@property
def encryption(self):
"""Gets the encryption of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies if files should be encrypted. # noqa: E501
:return: The encryption of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: bool
"""
return self._encryption
@encryption.setter
def encryption(self, encryption):
"""Sets the encryption of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies if files should be encrypted. # noqa: E501
:param encryption: The encryption of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: bool
"""
self._encryption = encryption
@property
def full_backup_retention(self):
"""Gets the full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
(Used with NDMP backups only. Not applicable to SyncIQ.) The minimum amount of time cloud files will be retained after the creation of a full NDMP backup. # noqa: E501
:return: The full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: int
"""
return self._full_backup_retention
@full_backup_retention.setter
def full_backup_retention(self, full_backup_retention):
"""Sets the full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults.
(Used with NDMP backups only. Not applicable to SyncIQ.) The minimum amount of time cloud files will be retained after the creation of a full NDMP backup. # noqa: E501
:param full_backup_retention: The full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: int
"""
self._full_backup_retention = full_backup_retention
@property
def incremental_backup_retention(self):
"""Gets the incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
(Used with SyncIQ and NDMP backups.) The minimum amount of time cloud files will be retained after the creation of a SyncIQ backup or an incremental NDMP backup. # noqa: E501
:return: The incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: int
"""
return self._incremental_backup_retention
@incremental_backup_retention.setter
def incremental_backup_retention(self, incremental_backup_retention):
"""Sets the incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults.
(Used with SyncIQ and NDMP backups.) The minimum amount of time cloud files will be retained after the creation of a SyncIQ backup or an incremental NDMP backup. # noqa: E501
:param incremental_backup_retention: The incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: int
"""
self._incremental_backup_retention = incremental_backup_retention
@property
def writeback_frequency(self):
"""Gets the writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
The minimum amount of time to wait before updating cloud data with local changes. # noqa: E501
:return: The writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: int
"""
return self._writeback_frequency
@writeback_frequency.setter
def writeback_frequency(self, writeback_frequency):
"""Sets the writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults.
The minimum amount of time to wait before updating cloud data with local changes. # noqa: E501
:param writeback_frequency: The writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: int
"""
self._writeback_frequency = writeback_frequency
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudSettingsSettingsCloudPolicyDefaults):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_0.models.cloud_settings_settings_cloud_policy_defaults_cache import CloudSettingsSettingsCloudPolicyDefaultsCache # noqa: F401,E501
class CloudSettingsSettingsCloudPolicyDefaults(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'archive_snapshot_files': 'bool',
'cache': 'CloudSettingsSettingsCloudPolicyDefaultsCache',
'compression': 'bool',
'data_retention': 'int',
'encryption': 'bool',
'full_backup_retention': 'int',
'incremental_backup_retention': 'int',
'writeback_frequency': 'int'
}
attribute_map = {
'archive_snapshot_files': 'archive_snapshot_files',
'cache': 'cache',
'compression': 'compression',
'data_retention': 'data_retention',
'encryption': 'encryption',
'full_backup_retention': 'full_backup_retention',
'incremental_backup_retention': 'incremental_backup_retention',
'writeback_frequency': 'writeback_frequency'
}
def __init__(self, archive_snapshot_files=None, cache=None, compression=None, data_retention=None, encryption=None, full_backup_retention=None, incremental_backup_retention=None, writeback_frequency=None): # noqa: E501
"""CloudSettingsSettingsCloudPolicyDefaults - a model defined in Swagger""" # noqa: E501
self._archive_snapshot_files = None
self._cache = None
self._compression = None
self._data_retention = None
self._encryption = None
self._full_backup_retention = None
self._incremental_backup_retention = None
self._writeback_frequency = None
self.discriminator = None
if archive_snapshot_files is not None:
self.archive_snapshot_files = archive_snapshot_files
if cache is not None:
self.cache = cache
if compression is not None:
self.compression = compression
if data_retention is not None:
self.data_retention = data_retention
if encryption is not None:
self.encryption = encryption
if full_backup_retention is not None:
self.full_backup_retention = full_backup_retention
if incremental_backup_retention is not None:
self.incremental_backup_retention = incremental_backup_retention
if writeback_frequency is not None:
self.writeback_frequency = writeback_frequency
@property
def archive_snapshot_files(self):
"""Gets the archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies if files with snapshots should be archived. # noqa: E501
:return: The archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: bool
"""
return self._archive_snapshot_files
@archive_snapshot_files.setter
def archive_snapshot_files(self, archive_snapshot_files):
"""Sets the archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies if files with snapshots should be archived. # noqa: E501
:param archive_snapshot_files: The archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: bool
"""
self._archive_snapshot_files = archive_snapshot_files
@property
def cache(self):
"""Gets the cache of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies default cloudpool cache settings for new filepool policies. # noqa: E501
:return: The cache of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: CloudSettingsSettingsCloudPolicyDefaultsCache
"""
return self._cache
@cache.setter
def cache(self, cache):
"""Sets the cache of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies default cloudpool cache settings for new filepool policies. # noqa: E501
:param cache: The cache of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: CloudSettingsSettingsCloudPolicyDefaultsCache
"""
self._cache = cache
@property
def compression(self):
"""Gets the compression of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies if files should be compressed. # noqa: E501
:return: The compression of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: bool
"""
return self._compression
@compression.setter
def compression(self, compression):
"""Sets the compression of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies if files should be compressed. # noqa: E501
:param compression: The compression of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: bool
"""
self._compression = compression
@property
def data_retention(self):
"""Gets the data_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies the minimum amount of time archived data will be retained in the cloud after deletion. # noqa: E501
:return: The data_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: int
"""
return self._data_retention
@data_retention.setter
def data_retention(self, data_retention):
"""Sets the data_retention of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies the minimum amount of time archived data will be retained in the cloud after deletion. # noqa: E501
:param data_retention: The data_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: int
"""
self._data_retention = data_retention
@property
def encryption(self):
"""Gets the encryption of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
Specifies if files should be encrypted. # noqa: E501
:return: The encryption of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: bool
"""
return self._encryption
@encryption.setter
def encryption(self, encryption):
"""Sets the encryption of this CloudSettingsSettingsCloudPolicyDefaults.
Specifies if files should be encrypted. # noqa: E501
:param encryption: The encryption of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: bool
"""
self._encryption = encryption
@property
def full_backup_retention(self):
"""Gets the full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
(Used with NDMP backups only. Not applicable to SyncIQ.) The minimum amount of time cloud files will be retained after the creation of a full NDMP backup. # noqa: E501
:return: The full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: int
"""
return self._full_backup_retention
@full_backup_retention.setter
def full_backup_retention(self, full_backup_retention):
"""Sets the full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults.
(Used with NDMP backups only. Not applicable to SyncIQ.) The minimum amount of time cloud files will be retained after the creation of a full NDMP backup. # noqa: E501
:param full_backup_retention: The full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: int
"""
self._full_backup_retention = full_backup_retention
@property
def incremental_backup_retention(self):
"""Gets the incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
(Used with SyncIQ and NDMP backups.) The minimum amount of time cloud files will be retained after the creation of a SyncIQ backup or an incremental NDMP backup. # noqa: E501
:return: The incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: int
"""
return self._incremental_backup_retention
@incremental_backup_retention.setter
def incremental_backup_retention(self, incremental_backup_retention):
"""Sets the incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults.
(Used with SyncIQ and NDMP backups.) The minimum amount of time cloud files will be retained after the creation of a SyncIQ backup or an incremental NDMP backup. # noqa: E501
:param incremental_backup_retention: The incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: int
"""
self._incremental_backup_retention = incremental_backup_retention
@property
def writeback_frequency(self):
"""Gets the writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
The minimum amount of time to wait before updating cloud data with local changes. # noqa: E501
:return: The writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:rtype: int
"""
return self._writeback_frequency
@writeback_frequency.setter
def writeback_frequency(self, writeback_frequency):
"""Sets the writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults.
The minimum amount of time to wait before updating cloud data with local changes. # noqa: E501
:param writeback_frequency: The writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501
:type: int
"""
self._writeback_frequency = writeback_frequency
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudSettingsSettingsCloudPolicyDefaults):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | en | 0.608972 | # coding: utf-8 Isilon SDK Isilon SDK - Language bindings for the OneFS API # noqa: E501 OpenAPI spec version: 7 Contact: <EMAIL> Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 # noqa: F401,E501 NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 CloudSettingsSettingsCloudPolicyDefaults - a model defined in Swagger # noqa: E501 Gets the archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 Specifies if files with snapshots should be archived. # noqa: E501 :return: The archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :rtype: bool Sets the archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. Specifies if files with snapshots should be archived. # noqa: E501 :param archive_snapshot_files: The archive_snapshot_files of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :type: bool Gets the cache of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 Specifies default cloudpool cache settings for new filepool policies. # noqa: E501 :return: The cache of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :rtype: CloudSettingsSettingsCloudPolicyDefaultsCache Sets the cache of this CloudSettingsSettingsCloudPolicyDefaults. Specifies default cloudpool cache settings for new filepool policies. # noqa: E501 :param cache: The cache of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :type: CloudSettingsSettingsCloudPolicyDefaultsCache Gets the compression of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 Specifies if files should be compressed. # noqa: E501 :return: The compression of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :rtype: bool Sets the compression of this CloudSettingsSettingsCloudPolicyDefaults. Specifies if files should be compressed. # noqa: E501 :param compression: The compression of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :type: bool Gets the data_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 Specifies the minimum amount of time archived data will be retained in the cloud after deletion. # noqa: E501 :return: The data_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :rtype: int Sets the data_retention of this CloudSettingsSettingsCloudPolicyDefaults. Specifies the minimum amount of time archived data will be retained in the cloud after deletion. # noqa: E501 :param data_retention: The data_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :type: int Gets the encryption of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 Specifies if files should be encrypted. # noqa: E501 :return: The encryption of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :rtype: bool Sets the encryption of this CloudSettingsSettingsCloudPolicyDefaults. Specifies if files should be encrypted. # noqa: E501 :param encryption: The encryption of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :type: bool Gets the full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 (Used with NDMP backups only. Not applicable to SyncIQ.) The minimum amount of time cloud files will be retained after the creation of a full NDMP backup. # noqa: E501 :return: The full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :rtype: int Sets the full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. (Used with NDMP backups only. Not applicable to SyncIQ.) The minimum amount of time cloud files will be retained after the creation of a full NDMP backup. # noqa: E501 :param full_backup_retention: The full_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :type: int Gets the incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 (Used with SyncIQ and NDMP backups.) The minimum amount of time cloud files will be retained after the creation of a SyncIQ backup or an incremental NDMP backup. # noqa: E501 :return: The incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :rtype: int Sets the incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. (Used with SyncIQ and NDMP backups.) The minimum amount of time cloud files will be retained after the creation of a SyncIQ backup or an incremental NDMP backup. # noqa: E501 :param incremental_backup_retention: The incremental_backup_retention of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :type: int Gets the writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 The minimum amount of time to wait before updating cloud data with local changes. # noqa: E501 :return: The writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :rtype: int Sets the writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. The minimum amount of time to wait before updating cloud data with local changes. # noqa: E501 :param writeback_frequency: The writeback_frequency of this CloudSettingsSettingsCloudPolicyDefaults. # noqa: E501 :type: int Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.307963 | 1 |
app/run.py | EFerriss/playful | 1 | 6631663 | <filename>app/run.py
#!/user/bin/python
from playful import app
app.run(debug = True) | <filename>app/run.py
#!/user/bin/python
from playful import app
app.run(debug = True) | en | 0.310673 | #!/user/bin/python | 1.264139 | 1 |
tests/targets/nester.py | Laufire/ec | 2 | 6631664 | <reponame>Laufire/ec<gh_stars>1-10
"""
nester
======
Used to test nested modules.
"""
from ec.ec import member, task
import simple
member(simple) # add the imported member to the script
@task
def task1():
print 'task1'
| """
nester
======
Used to test nested modules.
"""
from ec.ec import member, task
import simple
member(simple) # add the imported member to the script
@task
def task1():
print 'task1' | en | 0.737827 | nester ====== Used to test nested modules. # add the imported member to the script | 2.656942 | 3 |
MachineLearning/week2/ransac_demo.py | fengjixuchui/EmbeddedSystem | 228 | 6631665 | <reponame>fengjixuchui/EmbeddedSystem
# Ransac many noisy data
import numpy as np
import matplotlib.pyplot as plt
import random
SIZE = 50
ERROR = 50
x = np.linspace(0, 10, SIZE)
y = 3 * x + 10
random_x = [x[i] + random.uniform(-0.6, 0.2) for i in range(SIZE)]
random_y = [y[i] + random.uniform(-0.6, 0.2) for i in range(SIZE)]
# add some error points
for i in range(ERROR):
random_x.append(random.uniform(0, 20))
random_y.append(random.uniform(10, 40))
RANDOM_X = np.array(random_x)
RANDOM_Y = np.array(random_y)
# fig = plt.figure()
# ax1 = fig.add_subplot(1, 1, 1)
# ax1.scatter(RANDOM_X, RANDOM_Y)
# plt.show()
# use OLS to fit the model
# from sklearn.linear_model import LinearRegression
#
# data_X = RANDOM_X.reshape(-1, 1)
# data_Y = RANDOM_Y.reshape(-1, 1)
#
# reg = LinearRegression(fit_intercept=True)
# reg.fit(data_X, data_Y)
# slope = reg.coef_
# intercept = reg.intercept_
# PREDICT_Y = reg.predict(data_X)
#
# fig = plt.figure()
# ax1 = fig.add_subplot(1, 1, 1)
# ax1.scatter(RANDOM_X, RANDOM_Y)
# ax1.plot(RANDOM_X, PREDICT_Y, c='red')
# plt.show()
## RANSAC
# 1. 要得到一个直线模型,需要两个点唯一确定一个直线方程。所以第一步随机选择两个点
# 2. 通过这两个点,可以计算出这两个点所表示的模型方程 y=ax+b
# 3. 将所有的数据点套到这个模型中计算误差
# 4. 找到所有满足误差阈值的点
# 5. 重复前 4 步迭代过程,直到达到一定迭代次数后,选出那个被支持的最多的模型,作为问题的解
iterations = 100
tolerent_sigma = 1
thresh_size = 0.5
best_slope = -1
best_intercept = 0
pretotal = 0
plt.ion()
plt.figure()
for i in range(iterations):
# 1. 每次迭代随机选取两个样本点
sample_index = random.sample(range(SIZE + ERROR), 2)
x_1 = RANDOM_X[sample_index[0]]
x_2 = RANDOM_X[sample_index[1]]
y_1 = RANDOM_Y[sample_index[0]]
y_2 = RANDOM_Y[sample_index[1]]
# 2. 根据随机选取的点来计算参数值
slope = (y_2 - y_1) / (x_2 - x_1)
intercept = y_1 - slope * x_1
# 3. 计算负荷要求的内点个数
total_inliers = 0
for index in range(SIZE + ERROR):
PREDICT_Y = slope * RANDOM_X[index] + intercept
if abs(PREDICT_Y - RANDOM_Y[index]) < tolerent_sigma:
total_inliers += 1
if total_inliers > pretotal:
pretotal = total_inliers
best_slope = slope
best_intercept = intercept
# 4. 如果内点个数大于阈值则停止迭代
if total_inliers > (SIZE + ERROR) * thresh_size:
break
plt.title(f"RANSAC in Linear Regression: Iter {i + 1}, Inliers {pretotal}")
plt.scatter(RANDOM_X, RANDOM_Y)
Y = best_slope * RANDOM_X + best_intercept
plt.plot(RANDOM_X, Y,'black')
plt.pause(0.2)
plt.clf()
| # Ransac many noisy data
import numpy as np
import matplotlib.pyplot as plt
import random
SIZE = 50
ERROR = 50
x = np.linspace(0, 10, SIZE)
y = 3 * x + 10
random_x = [x[i] + random.uniform(-0.6, 0.2) for i in range(SIZE)]
random_y = [y[i] + random.uniform(-0.6, 0.2) for i in range(SIZE)]
# add some error points
for i in range(ERROR):
random_x.append(random.uniform(0, 20))
random_y.append(random.uniform(10, 40))
RANDOM_X = np.array(random_x)
RANDOM_Y = np.array(random_y)
# fig = plt.figure()
# ax1 = fig.add_subplot(1, 1, 1)
# ax1.scatter(RANDOM_X, RANDOM_Y)
# plt.show()
# use OLS to fit the model
# from sklearn.linear_model import LinearRegression
#
# data_X = RANDOM_X.reshape(-1, 1)
# data_Y = RANDOM_Y.reshape(-1, 1)
#
# reg = LinearRegression(fit_intercept=True)
# reg.fit(data_X, data_Y)
# slope = reg.coef_
# intercept = reg.intercept_
# PREDICT_Y = reg.predict(data_X)
#
# fig = plt.figure()
# ax1 = fig.add_subplot(1, 1, 1)
# ax1.scatter(RANDOM_X, RANDOM_Y)
# ax1.plot(RANDOM_X, PREDICT_Y, c='red')
# plt.show()
## RANSAC
# 1. 要得到一个直线模型,需要两个点唯一确定一个直线方程。所以第一步随机选择两个点
# 2. 通过这两个点,可以计算出这两个点所表示的模型方程 y=ax+b
# 3. 将所有的数据点套到这个模型中计算误差
# 4. 找到所有满足误差阈值的点
# 5. 重复前 4 步迭代过程,直到达到一定迭代次数后,选出那个被支持的最多的模型,作为问题的解
iterations = 100
tolerent_sigma = 1
thresh_size = 0.5
best_slope = -1
best_intercept = 0
pretotal = 0
plt.ion()
plt.figure()
for i in range(iterations):
# 1. 每次迭代随机选取两个样本点
sample_index = random.sample(range(SIZE + ERROR), 2)
x_1 = RANDOM_X[sample_index[0]]
x_2 = RANDOM_X[sample_index[1]]
y_1 = RANDOM_Y[sample_index[0]]
y_2 = RANDOM_Y[sample_index[1]]
# 2. 根据随机选取的点来计算参数值
slope = (y_2 - y_1) / (x_2 - x_1)
intercept = y_1 - slope * x_1
# 3. 计算负荷要求的内点个数
total_inliers = 0
for index in range(SIZE + ERROR):
PREDICT_Y = slope * RANDOM_X[index] + intercept
if abs(PREDICT_Y - RANDOM_Y[index]) < tolerent_sigma:
total_inliers += 1
if total_inliers > pretotal:
pretotal = total_inliers
best_slope = slope
best_intercept = intercept
# 4. 如果内点个数大于阈值则停止迭代
if total_inliers > (SIZE + ERROR) * thresh_size:
break
plt.title(f"RANSAC in Linear Regression: Iter {i + 1}, Inliers {pretotal}")
plt.scatter(RANDOM_X, RANDOM_Y)
Y = best_slope * RANDOM_X + best_intercept
plt.plot(RANDOM_X, Y,'black')
plt.pause(0.2)
plt.clf() | zh | 0.307043 | # Ransac many noisy data # add some error points # fig = plt.figure() # ax1 = fig.add_subplot(1, 1, 1) # ax1.scatter(RANDOM_X, RANDOM_Y) # plt.show() # use OLS to fit the model # from sklearn.linear_model import LinearRegression # # data_X = RANDOM_X.reshape(-1, 1) # data_Y = RANDOM_Y.reshape(-1, 1) # # reg = LinearRegression(fit_intercept=True) # reg.fit(data_X, data_Y) # slope = reg.coef_ # intercept = reg.intercept_ # PREDICT_Y = reg.predict(data_X) # # fig = plt.figure() # ax1 = fig.add_subplot(1, 1, 1) # ax1.scatter(RANDOM_X, RANDOM_Y) # ax1.plot(RANDOM_X, PREDICT_Y, c='red') # plt.show() ## RANSAC # 1. 要得到一个直线模型,需要两个点唯一确定一个直线方程。所以第一步随机选择两个点 # 2. 通过这两个点,可以计算出这两个点所表示的模型方程 y=ax+b # 3. 将所有的数据点套到这个模型中计算误差 # 4. 找到所有满足误差阈值的点 # 5. 重复前 4 步迭代过程,直到达到一定迭代次数后,选出那个被支持的最多的模型,作为问题的解 # 1. 每次迭代随机选取两个样本点 # 2. 根据随机选取的点来计算参数值 # 3. 计算负荷要求的内点个数 # 4. 如果内点个数大于阈值则停止迭代 | 3.599449 | 4 |
Lib/site-packages/asn1crypto/cms.py | ldepaula3/TextAnalyticsApp | 0 | 6631666 | <filename>Lib/site-packages/asn1crypto/cms.py
# coding: utf-8
"""
ASN.1 type classes for cryptographic message syntax (CMS). Structures are also
compatible with PKCS#7. Exports the following items:
- AuthenticatedData()
- AuthEnvelopedData()
- CompressedData()
- ContentInfo()
- DigestedData()
- EncryptedData()
- EnvelopedData()
- SignedAndEnvelopedData()
- SignedData()
Other type classes are defined that help compose the types listed above.
Most CMS structures in the wild are formatted as ContentInfo encapsulating one of the other types.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
try:
import zlib
except (ImportError):
zlib = None
from .algos import (
_ForceNullParameters,
DigestAlgorithm,
EncryptionAlgorithm,
HmacAlgorithm,
KdfAlgorithm,
SignedDigestAlgorithm,
)
from .core import (
Any,
BitString,
Choice,
Enumerated,
GeneralizedTime,
Integer,
ObjectIdentifier,
OctetBitString,
OctetString,
ParsableOctetString,
Sequence,
SequenceOf,
SetOf,
UTCTime,
UTF8String,
)
from .crl import CertificateList
from .keys import PublicKeyInfo
from .ocsp import OCSPResponse
from .x509 import Attributes, Certificate, Extensions, GeneralName, GeneralNames, Name
# These structures are taken from
# ftp://ftp.rsasecurity.com/pub/pkcs/ascii/pkcs-6.asc
class ExtendedCertificateInfo(Sequence):
_fields = [
('version', Integer),
('certificate', Certificate),
('attributes', Attributes),
]
class ExtendedCertificate(Sequence):
_fields = [
('extended_certificate_info', ExtendedCertificateInfo),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
]
# These structures are taken from https://tools.ietf.org/html/rfc5652,
# https://tools.ietf.org/html/rfc5083, http://tools.ietf.org/html/rfc2315,
# https://tools.ietf.org/html/rfc5940, https://tools.ietf.org/html/rfc3274,
# https://tools.ietf.org/html/rfc3281
class CMSVersion(Integer):
_map = {
0: 'v0',
1: 'v1',
2: 'v2',
3: 'v3',
4: 'v4',
5: 'v5',
}
class CMSAttributeType(ObjectIdentifier):
_map = {
'1.2.840.113549.1.9.3': 'content_type',
'1.2.840.113549.1.9.4': 'message_digest',
'1.2.840.113549.1.9.5': 'signing_time',
'1.2.840.113549.1.9.6': 'counter_signature',
# https://tools.ietf.org/html/rfc3161#page-20
'1.2.840.113549.1.9.16.2.14': 'signature_time_stamp_token',
# https://tools.ietf.org/html/rfc6211#page-5
'1.2.840.113549.1.9.52': 'cms_algorithm_protection',
}
class Time(Choice):
_alternatives = [
('utc_time', UTCTime),
('generalized_time', GeneralizedTime),
]
class ContentType(ObjectIdentifier):
_map = {
'1.2.840.113549.1.7.1': 'data',
'1.2.840.113549.1.7.2': 'signed_data',
'1.2.840.113549.1.7.3': 'enveloped_data',
'1.2.840.113549.1.7.4': 'signed_and_enveloped_data',
'1.2.840.113549.1.7.5': 'digested_data',
'1.2.840.113549.1.7.6': 'encrypted_data',
'1.2.840.113549.172.16.31.10.2': 'authenticated_data',
'1.2.840.113549.172.16.31.10.9': 'compressed_data',
'1.2.840.113549.172.16.31.10.23': 'authenticated_enveloped_data',
}
class CMSAlgorithmProtection(Sequence):
_fields = [
('digest_algorithm', DigestAlgorithm),
('signature_algorithm', SignedDigestAlgorithm, {'implicit': 1, 'optional': True}),
('mac_algorithm', HmacAlgorithm, {'implicit': 2, 'optional': True}),
]
class SetOfContentType(SetOf):
_child_spec = ContentType
class SetOfOctetString(SetOf):
_child_spec = OctetString
class SetOfTime(SetOf):
_child_spec = Time
class SetOfAny(SetOf):
_child_spec = Any
class SetOfCMSAlgorithmProtection(SetOf):
_child_spec = CMSAlgorithmProtection
class CMSAttribute(Sequence):
_fields = [
('type', CMSAttributeType),
('values', None),
]
_oid_specs = {}
def _values_spec(self):
return self._oid_specs.get(self['type'].native, SetOfAny)
_spec_callbacks = {
'values': _values_spec
}
class CMSAttributes(SetOf):
_child_spec = CMSAttribute
class IssuerSerial(Sequence):
_fields = [
('issuer', GeneralNames),
('serial', Integer),
('issuer_uid', OctetBitString, {'optional': True}),
]
class AttCertVersion(Integer):
_map = {
0: 'v1',
1: 'v2',
}
class AttCertSubject(Choice):
_alternatives = [
('base_certificate_id', IssuerSerial, {'explicit': 0}),
('subject_name', GeneralNames, {'explicit': 1}),
]
class AttCertValidityPeriod(Sequence):
_fields = [
('not_before_time', GeneralizedTime),
('not_after_time', GeneralizedTime),
]
class AttributeCertificateInfoV1(Sequence):
_fields = [
('version', AttCertVersion, {'default': 'v1'}),
('subject', AttCertSubject),
('issuer', GeneralNames),
('signature', SignedDigestAlgorithm),
('serial_number', Integer),
('att_cert_validity_period', AttCertValidityPeriod),
('attributes', Attributes),
('issuer_unique_id', OctetBitString, {'optional': True}),
('extensions', Extensions, {'optional': True}),
]
class AttributeCertificateV1(Sequence):
_fields = [
('ac_info', AttributeCertificateInfoV1),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
]
class DigestedObjectType(Enumerated):
_map = {
0: 'public_key',
1: 'public_key_cert',
2: 'other_objy_types',
}
class ObjectDigestInfo(Sequence):
_fields = [
('digested_object_type', DigestedObjectType),
('other_object_type_id', ObjectIdentifier, {'optional': True}),
('digest_algorithm', DigestAlgorithm),
('object_digest', OctetBitString),
]
class Holder(Sequence):
_fields = [
('base_certificate_id', IssuerSerial, {'implicit': 0, 'optional': True}),
('entity_name', GeneralNames, {'implicit': 1, 'optional': True}),
('object_digest_info', ObjectDigestInfo, {'implicit': 2, 'optional': True}),
]
class V2Form(Sequence):
_fields = [
('issuer_name', GeneralNames, {'optional': True}),
('base_certificate_id', IssuerSerial, {'explicit': 0, 'optional': True}),
('object_digest_info', ObjectDigestInfo, {'explicit': 1, 'optional': True}),
]
class AttCertIssuer(Choice):
_alternatives = [
('v1_form', GeneralNames),
('v2_form', V2Form, {'explicit': 0}),
]
class IetfAttrValue(Choice):
_alternatives = [
('octets', OctetString),
('oid', ObjectIdentifier),
('string', UTF8String),
]
class IetfAttrValues(SequenceOf):
_child_spec = IetfAttrValue
class IetfAttrSyntax(Sequence):
_fields = [
('policy_authority', GeneralNames, {'implicit': 0, 'optional': True}),
('values', IetfAttrValues),
]
class SetOfIetfAttrSyntax(SetOf):
_child_spec = IetfAttrSyntax
class SvceAuthInfo(Sequence):
_fields = [
('service', GeneralName),
('ident', GeneralName),
('auth_info', OctetString, {'optional': True}),
]
class SetOfSvceAuthInfo(SetOf):
_child_spec = SvceAuthInfo
class RoleSyntax(Sequence):
_fields = [
('role_authority', GeneralNames, {'implicit': 0, 'optional': True}),
('role_name', GeneralName, {'implicit': 1}),
]
class SetOfRoleSyntax(SetOf):
_child_spec = RoleSyntax
class ClassList(BitString):
_map = {
0: 'unmarked',
1: 'unclassified',
2: 'restricted',
3: 'confidential',
4: 'secret',
5: 'top_secret',
}
class SecurityCategory(Sequence):
_fields = [
('type', ObjectIdentifier, {'implicit': 0}),
('value', Any, {'implicit': 1}),
]
class SetOfSecurityCategory(SetOf):
_child_spec = SecurityCategory
class Clearance(Sequence):
_fields = [
('policy_id', ObjectIdentifier, {'implicit': 0}),
('class_list', ClassList, {'implicit': 1, 'default': 'unclassified'}),
('security_categories', SetOfSecurityCategory, {'implicit': 2, 'optional': True}),
]
class SetOfClearance(SetOf):
_child_spec = Clearance
class BigTime(Sequence):
_fields = [
('major', Integer),
('fractional_seconds', Integer),
('sign', Integer, {'optional': True}),
]
class LeapData(Sequence):
_fields = [
('leap_time', BigTime),
('action', Integer),
]
class SetOfLeapData(SetOf):
_child_spec = LeapData
class TimingMetrics(Sequence):
_fields = [
('ntp_time', BigTime),
('offset', BigTime),
('delay', BigTime),
('expiration', BigTime),
('leap_event', SetOfLeapData, {'optional': True}),
]
class SetOfTimingMetrics(SetOf):
_child_spec = TimingMetrics
class TimingPolicy(Sequence):
_fields = [
('policy_id', SequenceOf, {'spec': ObjectIdentifier}),
('max_offset', BigTime, {'explicit': 0, 'optional': True}),
('max_delay', BigTime, {'explicit': 1, 'optional': True}),
]
class SetOfTimingPolicy(SetOf):
_child_spec = TimingPolicy
class AttCertAttributeType(ObjectIdentifier):
_map = {
'1.3.6.1.5.172.16.31.10': 'authentication_info',
'1.3.6.1.5.192.168.3.11': 'access_identity',
'1.3.6.1.5.172.16.58.3': 'charging_identity',
'1.3.6.1.5.172.16.31.10': 'group',
'2.5.4.72': 'role',
'2.5.4.55': 'clearance',
'1.3.6.1.4.1.601.10.4.1': 'timing_metrics',
'1.3.6.1.4.1.601.10.4.2': 'timing_policy',
}
class AttCertAttribute(Sequence):
_fields = [
('type', AttCertAttributeType),
('values', None),
]
_oid_specs = {
'authentication_info': SetOfSvceAuthInfo,
'access_identity': SetOfSvceAuthInfo,
'charging_identity': SetOfIetfAttrSyntax,
'group': SetOfIetfAttrSyntax,
'role': SetOfRoleSyntax,
'clearance': SetOfClearance,
'timing_metrics': SetOfTimingMetrics,
'timing_policy': SetOfTimingPolicy,
}
def _values_spec(self):
return self._oid_specs.get(self['type'].native, SetOfAny)
_spec_callbacks = {
'values': _values_spec
}
class AttCertAttributes(SequenceOf):
_child_spec = AttCertAttribute
class AttributeCertificateInfoV2(Sequence):
_fields = [
('version', AttCertVersion),
('holder', Holder),
('issuer', AttCertIssuer),
('signature', SignedDigestAlgorithm),
('serial_number', Integer),
('att_cert_validity_period', AttCertValidityPeriod),
('attributes', AttCertAttributes),
('issuer_unique_id', OctetBitString, {'optional': True}),
('extensions', Extensions, {'optional': True}),
]
class AttributeCertificateV2(Sequence):
# Handle the situation where a V2 cert is encoded as V1
_bad_tag = 1
_fields = [
('ac_info', AttributeCertificateInfoV2),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
]
class OtherCertificateFormat(Sequence):
_fields = [
('other_cert_format', ObjectIdentifier),
('other_cert', Any),
]
class CertificateChoices(Choice):
_alternatives = [
('certificate', Certificate),
('extended_certificate', ExtendedCertificate, {'implicit': 0}),
('v1_attr_cert', AttributeCertificateV1, {'implicit': 1}),
('v2_attr_cert', AttributeCertificateV2, {'implicit': 2}),
('other', OtherCertificateFormat, {'implicit': 3}),
]
def validate(self, class_, tag, contents):
"""
Ensures that the class and tag specified exist as an alternative. This
custom version fixes parsing broken encodings there a V2 attribute
# certificate is encoded as a V1
:param class_:
The integer class_ from the encoded value header
:param tag:
The integer tag from the encoded value header
:param contents:
A byte string of the contents of the value - used when the object
is explicitly tagged
:raises:
ValueError - when value is not a valid alternative
"""
super(CertificateChoices, self).validate(class_, tag, contents)
if self._choice == 2:
if AttCertVersion.load(Sequence.load(contents)[0].dump()).native == 'v2':
self._choice = 3
class CertificateSet(SetOf):
_child_spec = CertificateChoices
class ContentInfo(Sequence):
_fields = [
('content_type', ContentType),
('content', Any, {'explicit': 0, 'optional': True}),
]
_oid_pair = ('content_type', 'content')
_oid_specs = {}
class SetOfContentInfo(SetOf):
_child_spec = ContentInfo
class EncapsulatedContentInfo(Sequence):
_fields = [
('content_type', ContentType),
('content', ParsableOctetString, {'explicit': 0, 'optional': True}),
]
_oid_pair = ('content_type', 'content')
_oid_specs = {}
class IssuerAndSerialNumber(Sequence):
_fields = [
('issuer', Name),
('serial_number', Integer),
]
class SignerIdentifier(Choice):
_alternatives = [
('issuer_and_serial_number', IssuerAndSerialNumber),
('subject_key_identifier', OctetString, {'implicit': 0}),
]
class DigestAlgorithms(SetOf):
_child_spec = DigestAlgorithm
class CertificateRevocationLists(SetOf):
_child_spec = CertificateList
class SCVPReqRes(Sequence):
_fields = [
('request', ContentInfo, {'explicit': 0, 'optional': True}),
('response', ContentInfo),
]
class OtherRevInfoFormatId(ObjectIdentifier):
_map = {
'1.3.6.1.5.192.168.127.12': 'ocsp_response',
'1.3.6.1.5.172.16.58.3': 'scvp',
}
class OtherRevocationInfoFormat(Sequence):
_fields = [
('other_rev_info_format', OtherRevInfoFormatId),
('other_rev_info', Any),
]
_oid_pair = ('other_rev_info_format', 'other_rev_info')
_oid_specs = {
'ocsp_response': OCSPResponse,
'scvp': SCVPReqRes,
}
class RevocationInfoChoice(Choice):
_alternatives = [
('crl', CertificateList),
('other', OtherRevocationInfoFormat, {'implicit': 1}),
]
class RevocationInfoChoices(SetOf):
_child_spec = RevocationInfoChoice
class SignerInfo(Sequence):
_fields = [
('version', CMSVersion),
('sid', SignerIdentifier),
('digest_algorithm', DigestAlgorithm),
('signed_attrs', CMSAttributes, {'implicit': 0, 'optional': True}),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetString),
('unsigned_attrs', CMSAttributes, {'implicit': 1, 'optional': True}),
]
class SignerInfos(SetOf):
_child_spec = SignerInfo
class SignedData(Sequence):
_fields = [
('version', CMSVersion),
('digest_algorithms', DigestAlgorithms),
('encap_content_info', None),
('certificates', CertificateSet, {'implicit': 0, 'optional': True}),
('crls', RevocationInfoChoices, {'implicit': 1, 'optional': True}),
('signer_infos', SignerInfos),
]
def _encap_content_info_spec(self):
# If the encap_content_info is version v1, then this could be a PKCS#7
# structure, or a CMS structure. CMS wraps the encoded value in an
# Octet String tag.
# If the version is greater than 1, it is definite CMS
if self['version'].native != 'v1':
return EncapsulatedContentInfo
# Otherwise, the ContentInfo spec from PKCS#7 will be compatible with
# CMS v1 (which only allows Data, an Octet String) and PKCS#7, which
# allows Any
return ContentInfo
_spec_callbacks = {
'encap_content_info': _encap_content_info_spec
}
class OriginatorInfo(Sequence):
_fields = [
('certs', CertificateSet, {'implicit': 0, 'optional': True}),
('crls', RevocationInfoChoices, {'implicit': 1, 'optional': True}),
]
class RecipientIdentifier(Choice):
_alternatives = [
('issuer_and_serial_number', IssuerAndSerialNumber),
('subject_key_identifier', OctetString, {'implicit': 0}),
]
class KeyEncryptionAlgorithmId(ObjectIdentifier):
_map = {
'1.2.840.113549.1.1.1': 'rsa',
'2.16.840.1.101.3.4.1.5': 'aes128_wrap',
'2.16.840.172.16.17.32.1.8': 'aes128_wrap_pad',
'2.16.840.1.101.3.4.1.25': 'aes192_wrap',
'2.16.840.172.16.17.32.1.28': 'aes192_wrap_pad',
'2.16.840.172.16.17.32.1.45': 'aes256_wrap',
'2.16.840.172.16.17.32.1.48': 'aes256_wrap_pad',
}
class KeyEncryptionAlgorithm(_ForceNullParameters, Sequence):
_fields = [
('algorithm', KeyEncryptionAlgorithmId),
('parameters', Any, {'optional': True}),
]
class KeyTransRecipientInfo(Sequence):
_fields = [
('version', CMSVersion),
('rid', RecipientIdentifier),
('key_encryption_algorithm', KeyEncryptionAlgorithm),
('encrypted_key', OctetString),
]
class OriginatorIdentifierOrKey(Choice):
_alternatives = [
('issuer_and_serial_number', IssuerAndSerialNumber),
('subject_key_identifier', OctetString, {'implicit': 0}),
('originator_key', PublicKeyInfo, {'implicit': 1}),
]
class OtherKeyAttribute(Sequence):
_fields = [
('key_attr_id', ObjectIdentifier),
('key_attr', Any),
]
class RecipientKeyIdentifier(Sequence):
_fields = [
('subject_key_identifier', OctetString),
('date', GeneralizedTime, {'optional': True}),
('other', OtherKeyAttribute, {'optional': True}),
]
class KeyAgreementRecipientIdentifier(Choice):
_alternatives = [
('issuer_and_serial_number', IssuerAndSerialNumber),
('r_key_id', RecipientKeyIdentifier, {'implicit': 0}),
]
class RecipientEncryptedKey(Sequence):
_fields = [
('rid', KeyAgreementRecipientIdentifier),
('encrypted_key', OctetString),
]
class RecipientEncryptedKeys(SequenceOf):
_child_spec = RecipientEncryptedKey
class KeyAgreeRecipientInfo(Sequence):
_fields = [
('version', CMSVersion),
('originator', OriginatorIdentifierOrKey, {'explicit': 0}),
('ukm', OctetString, {'explicit': 1, 'optional': True}),
('key_encryption_algorithm', KeyEncryptionAlgorithm),
('recipient_encrypted_keys', RecipientEncryptedKeys),
]
class KEKIdentifier(Sequence):
_fields = [
('key_identifier', OctetString),
('date', GeneralizedTime, {'optional': True}),
('other', OtherKeyAttribute, {'optional': True}),
]
class KEKRecipientInfo(Sequence):
_fields = [
('version', CMSVersion),
('kekid', KEKIdentifier),
('key_encryption_algorithm', KeyEncryptionAlgorithm),
('encrypted_key', OctetString),
]
class PasswordRecipientInfo(Sequence):
_fields = [
('version', CMSVersion),
('key_derivation_algorithm', KdfAlgorithm, {'implicit': 0, 'optional': True}),
('key_encryption_algorithm', KeyEncryptionAlgorithm),
('encrypted_key', OctetString),
]
class OtherRecipientInfo(Sequence):
_fields = [
('ori_type', ObjectIdentifier),
('ori_value', Any),
]
class RecipientInfo(Choice):
_alternatives = [
('ktri', KeyTransRecipientInfo),
('kari', KeyAgreeRecipientInfo, {'implicit': 1}),
('kekri', KEKRecipientInfo, {'implicit': 2}),
('pwri', PasswordRecipientInfo, {'implicit': 3}),
('ori', OtherRecipientInfo, {'implicit': 4}),
]
class RecipientInfos(SetOf):
_child_spec = RecipientInfo
class EncryptedContentInfo(Sequence):
_fields = [
('content_type', ContentType),
('content_encryption_algorithm', EncryptionAlgorithm),
('encrypted_content', OctetString, {'implicit': 0, 'optional': True}),
]
class EnvelopedData(Sequence):
_fields = [
('version', CMSVersion),
('originator_info', OriginatorInfo, {'implicit': 0, 'optional': True}),
('recipient_infos', RecipientInfos),
('encrypted_content_info', EncryptedContentInfo),
('unprotected_attrs', CMSAttributes, {'implicit': 1, 'optional': True}),
]
class SignedAndEnvelopedData(Sequence):
_fields = [
('version', CMSVersion),
('recipient_infos', RecipientInfos),
('digest_algorithms', DigestAlgorithms),
('encrypted_content_info', EncryptedContentInfo),
('certificates', CertificateSet, {'implicit': 0, 'optional': True}),
('crls', CertificateRevocationLists, {'implicit': 1, 'optional': True}),
('signer_infos', SignerInfos),
]
class DigestedData(Sequence):
_fields = [
('version', CMSVersion),
('digest_algorithm', DigestAlgorithm),
('encap_content_info', None),
('digest', OctetString),
]
def _encap_content_info_spec(self):
# If the encap_content_info is version v1, then this could be a PKCS#7
# structure, or a CMS structure. CMS wraps the encoded value in an
# Octet String tag.
# If the version is greater than 1, it is definite CMS
if self['version'].native != 'v1':
return EncapsulatedContentInfo
# Otherwise, the ContentInfo spec from PKCS#7 will be compatible with
# CMS v1 (which only allows Data, an Octet String) and PKCS#7, which
# allows Any
return ContentInfo
_spec_callbacks = {
'encap_content_info': _encap_content_info_spec
}
class EncryptedData(Sequence):
_fields = [
('version', CMSVersion),
('encrypted_content_info', EncryptedContentInfo),
('unprotected_attrs', CMSAttributes, {'implicit': 1, 'optional': True}),
]
class AuthenticatedData(Sequence):
_fields = [
('version', CMSVersion),
('originator_info', OriginatorInfo, {'implicit': 0, 'optional': True}),
('recipient_infos', RecipientInfos),
('mac_algorithm', HmacAlgorithm),
('digest_algorithm', DigestAlgorithm, {'implicit': 1, 'optional': True}),
# This does not require the _spec_callbacks approach of SignedData and
# DigestedData since AuthenticatedData was not part of PKCS#7
('encap_content_info', EncapsulatedContentInfo),
('auth_attrs', CMSAttributes, {'implicit': 2, 'optional': True}),
('mac', OctetString),
('unauth_attrs', CMSAttributes, {'implicit': 3, 'optional': True}),
]
class AuthEnvelopedData(Sequence):
_fields = [
('version', CMSVersion),
('originator_info', OriginatorInfo, {'implicit': 0, 'optional': True}),
('recipient_infos', RecipientInfos),
('auth_encrypted_content_info', EncryptedContentInfo),
('auth_attrs', CMSAttributes, {'implicit': 1, 'optional': True}),
('mac', OctetString),
('unauth_attrs', CMSAttributes, {'implicit': 2, 'optional': True}),
]
class CompressionAlgorithmId(ObjectIdentifier):
_map = {
'1.2.840.113549.1.9.16.3.8': 'zlib',
}
class CompressionAlgorithm(Sequence):
_fields = [
('algorithm', CompressionAlgorithmId),
('parameters', Any, {'optional': True}),
]
class CompressedData(Sequence):
_fields = [
('version', CMSVersion),
('compression_algorithm', CompressionAlgorithm),
('encap_content_info', EncapsulatedContentInfo),
]
_decompressed = None
@property
def decompressed(self):
if self._decompressed is None:
if zlib is None:
raise SystemError('The zlib module is not available')
self._decompressed = zlib.decompress(self['encap_content_info']['content'].native)
return self._decompressed
ContentInfo._oid_specs = {
'data': OctetString,
'signed_data': SignedData,
'enveloped_data': EnvelopedData,
'signed_and_enveloped_data': SignedAndEnvelopedData,
'digested_data': DigestedData,
'encrypted_data': EncryptedData,
'authenticated_data': AuthenticatedData,
'compressed_data': CompressedData,
'authenticated_enveloped_data': AuthEnvelopedData,
}
EncapsulatedContentInfo._oid_specs = {
'signed_data': SignedData,
'enveloped_data': EnvelopedData,
'signed_and_enveloped_data': SignedAndEnvelopedData,
'digested_data': DigestedData,
'encrypted_data': EncryptedData,
'authenticated_data': AuthenticatedData,
'compressed_data': CompressedData,
'authenticated_enveloped_data': AuthEnvelopedData,
}
CMSAttribute._oid_specs = {
'content_type': SetOfContentType,
'message_digest': SetOfOctetString,
'signing_time': SetOfTime,
'counter_signature': SignerInfos,
'signature_time_stamp_token': SetOfContentInfo,
'cms_algorithm_protection': SetOfCMSAlgorithmProtection,
}
| <filename>Lib/site-packages/asn1crypto/cms.py
# coding: utf-8
"""
ASN.1 type classes for cryptographic message syntax (CMS). Structures are also
compatible with PKCS#7. Exports the following items:
- AuthenticatedData()
- AuthEnvelopedData()
- CompressedData()
- ContentInfo()
- DigestedData()
- EncryptedData()
- EnvelopedData()
- SignedAndEnvelopedData()
- SignedData()
Other type classes are defined that help compose the types listed above.
Most CMS structures in the wild are formatted as ContentInfo encapsulating one of the other types.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
try:
import zlib
except (ImportError):
zlib = None
from .algos import (
_ForceNullParameters,
DigestAlgorithm,
EncryptionAlgorithm,
HmacAlgorithm,
KdfAlgorithm,
SignedDigestAlgorithm,
)
from .core import (
Any,
BitString,
Choice,
Enumerated,
GeneralizedTime,
Integer,
ObjectIdentifier,
OctetBitString,
OctetString,
ParsableOctetString,
Sequence,
SequenceOf,
SetOf,
UTCTime,
UTF8String,
)
from .crl import CertificateList
from .keys import PublicKeyInfo
from .ocsp import OCSPResponse
from .x509 import Attributes, Certificate, Extensions, GeneralName, GeneralNames, Name
# These structures are taken from
# ftp://ftp.rsasecurity.com/pub/pkcs/ascii/pkcs-6.asc
class ExtendedCertificateInfo(Sequence):
_fields = [
('version', Integer),
('certificate', Certificate),
('attributes', Attributes),
]
class ExtendedCertificate(Sequence):
_fields = [
('extended_certificate_info', ExtendedCertificateInfo),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
]
# These structures are taken from https://tools.ietf.org/html/rfc5652,
# https://tools.ietf.org/html/rfc5083, http://tools.ietf.org/html/rfc2315,
# https://tools.ietf.org/html/rfc5940, https://tools.ietf.org/html/rfc3274,
# https://tools.ietf.org/html/rfc3281
class CMSVersion(Integer):
_map = {
0: 'v0',
1: 'v1',
2: 'v2',
3: 'v3',
4: 'v4',
5: 'v5',
}
class CMSAttributeType(ObjectIdentifier):
_map = {
'1.2.840.113549.1.9.3': 'content_type',
'1.2.840.113549.1.9.4': 'message_digest',
'1.2.840.113549.1.9.5': 'signing_time',
'1.2.840.113549.1.9.6': 'counter_signature',
# https://tools.ietf.org/html/rfc3161#page-20
'1.2.840.113549.1.9.16.2.14': 'signature_time_stamp_token',
# https://tools.ietf.org/html/rfc6211#page-5
'1.2.840.113549.1.9.52': 'cms_algorithm_protection',
}
class Time(Choice):
_alternatives = [
('utc_time', UTCTime),
('generalized_time', GeneralizedTime),
]
class ContentType(ObjectIdentifier):
_map = {
'1.2.840.113549.1.7.1': 'data',
'1.2.840.113549.1.7.2': 'signed_data',
'1.2.840.113549.1.7.3': 'enveloped_data',
'1.2.840.113549.1.7.4': 'signed_and_enveloped_data',
'1.2.840.113549.1.7.5': 'digested_data',
'1.2.840.113549.1.7.6': 'encrypted_data',
'1.2.840.113549.172.16.31.10.2': 'authenticated_data',
'1.2.840.113549.172.16.31.10.9': 'compressed_data',
'1.2.840.113549.172.16.31.10.23': 'authenticated_enveloped_data',
}
class CMSAlgorithmProtection(Sequence):
_fields = [
('digest_algorithm', DigestAlgorithm),
('signature_algorithm', SignedDigestAlgorithm, {'implicit': 1, 'optional': True}),
('mac_algorithm', HmacAlgorithm, {'implicit': 2, 'optional': True}),
]
class SetOfContentType(SetOf):
_child_spec = ContentType
class SetOfOctetString(SetOf):
_child_spec = OctetString
class SetOfTime(SetOf):
_child_spec = Time
class SetOfAny(SetOf):
_child_spec = Any
class SetOfCMSAlgorithmProtection(SetOf):
_child_spec = CMSAlgorithmProtection
class CMSAttribute(Sequence):
_fields = [
('type', CMSAttributeType),
('values', None),
]
_oid_specs = {}
def _values_spec(self):
return self._oid_specs.get(self['type'].native, SetOfAny)
_spec_callbacks = {
'values': _values_spec
}
class CMSAttributes(SetOf):
_child_spec = CMSAttribute
class IssuerSerial(Sequence):
_fields = [
('issuer', GeneralNames),
('serial', Integer),
('issuer_uid', OctetBitString, {'optional': True}),
]
class AttCertVersion(Integer):
_map = {
0: 'v1',
1: 'v2',
}
class AttCertSubject(Choice):
_alternatives = [
('base_certificate_id', IssuerSerial, {'explicit': 0}),
('subject_name', GeneralNames, {'explicit': 1}),
]
class AttCertValidityPeriod(Sequence):
_fields = [
('not_before_time', GeneralizedTime),
('not_after_time', GeneralizedTime),
]
class AttributeCertificateInfoV1(Sequence):
_fields = [
('version', AttCertVersion, {'default': 'v1'}),
('subject', AttCertSubject),
('issuer', GeneralNames),
('signature', SignedDigestAlgorithm),
('serial_number', Integer),
('att_cert_validity_period', AttCertValidityPeriod),
('attributes', Attributes),
('issuer_unique_id', OctetBitString, {'optional': True}),
('extensions', Extensions, {'optional': True}),
]
class AttributeCertificateV1(Sequence):
_fields = [
('ac_info', AttributeCertificateInfoV1),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
]
class DigestedObjectType(Enumerated):
_map = {
0: 'public_key',
1: 'public_key_cert',
2: 'other_objy_types',
}
class ObjectDigestInfo(Sequence):
_fields = [
('digested_object_type', DigestedObjectType),
('other_object_type_id', ObjectIdentifier, {'optional': True}),
('digest_algorithm', DigestAlgorithm),
('object_digest', OctetBitString),
]
class Holder(Sequence):
_fields = [
('base_certificate_id', IssuerSerial, {'implicit': 0, 'optional': True}),
('entity_name', GeneralNames, {'implicit': 1, 'optional': True}),
('object_digest_info', ObjectDigestInfo, {'implicit': 2, 'optional': True}),
]
class V2Form(Sequence):
_fields = [
('issuer_name', GeneralNames, {'optional': True}),
('base_certificate_id', IssuerSerial, {'explicit': 0, 'optional': True}),
('object_digest_info', ObjectDigestInfo, {'explicit': 1, 'optional': True}),
]
class AttCertIssuer(Choice):
_alternatives = [
('v1_form', GeneralNames),
('v2_form', V2Form, {'explicit': 0}),
]
class IetfAttrValue(Choice):
_alternatives = [
('octets', OctetString),
('oid', ObjectIdentifier),
('string', UTF8String),
]
class IetfAttrValues(SequenceOf):
_child_spec = IetfAttrValue
class IetfAttrSyntax(Sequence):
_fields = [
('policy_authority', GeneralNames, {'implicit': 0, 'optional': True}),
('values', IetfAttrValues),
]
class SetOfIetfAttrSyntax(SetOf):
_child_spec = IetfAttrSyntax
class SvceAuthInfo(Sequence):
_fields = [
('service', GeneralName),
('ident', GeneralName),
('auth_info', OctetString, {'optional': True}),
]
class SetOfSvceAuthInfo(SetOf):
_child_spec = SvceAuthInfo
class RoleSyntax(Sequence):
_fields = [
('role_authority', GeneralNames, {'implicit': 0, 'optional': True}),
('role_name', GeneralName, {'implicit': 1}),
]
class SetOfRoleSyntax(SetOf):
_child_spec = RoleSyntax
class ClassList(BitString):
_map = {
0: 'unmarked',
1: 'unclassified',
2: 'restricted',
3: 'confidential',
4: 'secret',
5: 'top_secret',
}
class SecurityCategory(Sequence):
_fields = [
('type', ObjectIdentifier, {'implicit': 0}),
('value', Any, {'implicit': 1}),
]
class SetOfSecurityCategory(SetOf):
_child_spec = SecurityCategory
class Clearance(Sequence):
_fields = [
('policy_id', ObjectIdentifier, {'implicit': 0}),
('class_list', ClassList, {'implicit': 1, 'default': 'unclassified'}),
('security_categories', SetOfSecurityCategory, {'implicit': 2, 'optional': True}),
]
class SetOfClearance(SetOf):
_child_spec = Clearance
class BigTime(Sequence):
_fields = [
('major', Integer),
('fractional_seconds', Integer),
('sign', Integer, {'optional': True}),
]
class LeapData(Sequence):
_fields = [
('leap_time', BigTime),
('action', Integer),
]
class SetOfLeapData(SetOf):
_child_spec = LeapData
class TimingMetrics(Sequence):
_fields = [
('ntp_time', BigTime),
('offset', BigTime),
('delay', BigTime),
('expiration', BigTime),
('leap_event', SetOfLeapData, {'optional': True}),
]
class SetOfTimingMetrics(SetOf):
_child_spec = TimingMetrics
class TimingPolicy(Sequence):
_fields = [
('policy_id', SequenceOf, {'spec': ObjectIdentifier}),
('max_offset', BigTime, {'explicit': 0, 'optional': True}),
('max_delay', BigTime, {'explicit': 1, 'optional': True}),
]
class SetOfTimingPolicy(SetOf):
_child_spec = TimingPolicy
class AttCertAttributeType(ObjectIdentifier):
_map = {
'1.3.6.1.5.172.16.31.10': 'authentication_info',
'1.3.6.1.5.192.168.3.11': 'access_identity',
'1.3.6.1.5.172.16.58.3': 'charging_identity',
'1.3.6.1.5.172.16.31.10': 'group',
'2.5.4.72': 'role',
'2.5.4.55': 'clearance',
'1.3.6.1.4.1.601.10.4.1': 'timing_metrics',
'1.3.6.1.4.1.601.10.4.2': 'timing_policy',
}
class AttCertAttribute(Sequence):
_fields = [
('type', AttCertAttributeType),
('values', None),
]
_oid_specs = {
'authentication_info': SetOfSvceAuthInfo,
'access_identity': SetOfSvceAuthInfo,
'charging_identity': SetOfIetfAttrSyntax,
'group': SetOfIetfAttrSyntax,
'role': SetOfRoleSyntax,
'clearance': SetOfClearance,
'timing_metrics': SetOfTimingMetrics,
'timing_policy': SetOfTimingPolicy,
}
def _values_spec(self):
return self._oid_specs.get(self['type'].native, SetOfAny)
_spec_callbacks = {
'values': _values_spec
}
class AttCertAttributes(SequenceOf):
_child_spec = AttCertAttribute
class AttributeCertificateInfoV2(Sequence):
_fields = [
('version', AttCertVersion),
('holder', Holder),
('issuer', AttCertIssuer),
('signature', SignedDigestAlgorithm),
('serial_number', Integer),
('att_cert_validity_period', AttCertValidityPeriod),
('attributes', AttCertAttributes),
('issuer_unique_id', OctetBitString, {'optional': True}),
('extensions', Extensions, {'optional': True}),
]
class AttributeCertificateV2(Sequence):
# Handle the situation where a V2 cert is encoded as V1
_bad_tag = 1
_fields = [
('ac_info', AttributeCertificateInfoV2),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
]
class OtherCertificateFormat(Sequence):
_fields = [
('other_cert_format', ObjectIdentifier),
('other_cert', Any),
]
class CertificateChoices(Choice):
_alternatives = [
('certificate', Certificate),
('extended_certificate', ExtendedCertificate, {'implicit': 0}),
('v1_attr_cert', AttributeCertificateV1, {'implicit': 1}),
('v2_attr_cert', AttributeCertificateV2, {'implicit': 2}),
('other', OtherCertificateFormat, {'implicit': 3}),
]
def validate(self, class_, tag, contents):
"""
Ensures that the class and tag specified exist as an alternative. This
custom version fixes parsing broken encodings there a V2 attribute
# certificate is encoded as a V1
:param class_:
The integer class_ from the encoded value header
:param tag:
The integer tag from the encoded value header
:param contents:
A byte string of the contents of the value - used when the object
is explicitly tagged
:raises:
ValueError - when value is not a valid alternative
"""
super(CertificateChoices, self).validate(class_, tag, contents)
if self._choice == 2:
if AttCertVersion.load(Sequence.load(contents)[0].dump()).native == 'v2':
self._choice = 3
class CertificateSet(SetOf):
_child_spec = CertificateChoices
class ContentInfo(Sequence):
_fields = [
('content_type', ContentType),
('content', Any, {'explicit': 0, 'optional': True}),
]
_oid_pair = ('content_type', 'content')
_oid_specs = {}
class SetOfContentInfo(SetOf):
_child_spec = ContentInfo
class EncapsulatedContentInfo(Sequence):
_fields = [
('content_type', ContentType),
('content', ParsableOctetString, {'explicit': 0, 'optional': True}),
]
_oid_pair = ('content_type', 'content')
_oid_specs = {}
class IssuerAndSerialNumber(Sequence):
_fields = [
('issuer', Name),
('serial_number', Integer),
]
class SignerIdentifier(Choice):
_alternatives = [
('issuer_and_serial_number', IssuerAndSerialNumber),
('subject_key_identifier', OctetString, {'implicit': 0}),
]
class DigestAlgorithms(SetOf):
_child_spec = DigestAlgorithm
class CertificateRevocationLists(SetOf):
_child_spec = CertificateList
class SCVPReqRes(Sequence):
_fields = [
('request', ContentInfo, {'explicit': 0, 'optional': True}),
('response', ContentInfo),
]
class OtherRevInfoFormatId(ObjectIdentifier):
_map = {
'1.3.6.1.5.192.168.127.12': 'ocsp_response',
'1.3.6.1.5.172.16.58.3': 'scvp',
}
class OtherRevocationInfoFormat(Sequence):
_fields = [
('other_rev_info_format', OtherRevInfoFormatId),
('other_rev_info', Any),
]
_oid_pair = ('other_rev_info_format', 'other_rev_info')
_oid_specs = {
'ocsp_response': OCSPResponse,
'scvp': SCVPReqRes,
}
class RevocationInfoChoice(Choice):
_alternatives = [
('crl', CertificateList),
('other', OtherRevocationInfoFormat, {'implicit': 1}),
]
class RevocationInfoChoices(SetOf):
_child_spec = RevocationInfoChoice
class SignerInfo(Sequence):
_fields = [
('version', CMSVersion),
('sid', SignerIdentifier),
('digest_algorithm', DigestAlgorithm),
('signed_attrs', CMSAttributes, {'implicit': 0, 'optional': True}),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetString),
('unsigned_attrs', CMSAttributes, {'implicit': 1, 'optional': True}),
]
class SignerInfos(SetOf):
_child_spec = SignerInfo
class SignedData(Sequence):
_fields = [
('version', CMSVersion),
('digest_algorithms', DigestAlgorithms),
('encap_content_info', None),
('certificates', CertificateSet, {'implicit': 0, 'optional': True}),
('crls', RevocationInfoChoices, {'implicit': 1, 'optional': True}),
('signer_infos', SignerInfos),
]
def _encap_content_info_spec(self):
# If the encap_content_info is version v1, then this could be a PKCS#7
# structure, or a CMS structure. CMS wraps the encoded value in an
# Octet String tag.
# If the version is greater than 1, it is definite CMS
if self['version'].native != 'v1':
return EncapsulatedContentInfo
# Otherwise, the ContentInfo spec from PKCS#7 will be compatible with
# CMS v1 (which only allows Data, an Octet String) and PKCS#7, which
# allows Any
return ContentInfo
_spec_callbacks = {
'encap_content_info': _encap_content_info_spec
}
class OriginatorInfo(Sequence):
_fields = [
('certs', CertificateSet, {'implicit': 0, 'optional': True}),
('crls', RevocationInfoChoices, {'implicit': 1, 'optional': True}),
]
class RecipientIdentifier(Choice):
_alternatives = [
('issuer_and_serial_number', IssuerAndSerialNumber),
('subject_key_identifier', OctetString, {'implicit': 0}),
]
class KeyEncryptionAlgorithmId(ObjectIdentifier):
_map = {
'1.2.840.113549.1.1.1': 'rsa',
'2.16.840.1.101.3.4.1.5': 'aes128_wrap',
'2.16.840.172.16.17.32.1.8': 'aes128_wrap_pad',
'2.16.840.1.101.3.4.1.25': 'aes192_wrap',
'2.16.840.172.16.17.32.1.28': 'aes192_wrap_pad',
'2.16.840.172.16.17.32.1.45': 'aes256_wrap',
'2.16.840.172.16.17.32.1.48': 'aes256_wrap_pad',
}
class KeyEncryptionAlgorithm(_ForceNullParameters, Sequence):
_fields = [
('algorithm', KeyEncryptionAlgorithmId),
('parameters', Any, {'optional': True}),
]
class KeyTransRecipientInfo(Sequence):
_fields = [
('version', CMSVersion),
('rid', RecipientIdentifier),
('key_encryption_algorithm', KeyEncryptionAlgorithm),
('encrypted_key', OctetString),
]
class OriginatorIdentifierOrKey(Choice):
_alternatives = [
('issuer_and_serial_number', IssuerAndSerialNumber),
('subject_key_identifier', OctetString, {'implicit': 0}),
('originator_key', PublicKeyInfo, {'implicit': 1}),
]
class OtherKeyAttribute(Sequence):
_fields = [
('key_attr_id', ObjectIdentifier),
('key_attr', Any),
]
class RecipientKeyIdentifier(Sequence):
_fields = [
('subject_key_identifier', OctetString),
('date', GeneralizedTime, {'optional': True}),
('other', OtherKeyAttribute, {'optional': True}),
]
class KeyAgreementRecipientIdentifier(Choice):
_alternatives = [
('issuer_and_serial_number', IssuerAndSerialNumber),
('r_key_id', RecipientKeyIdentifier, {'implicit': 0}),
]
class RecipientEncryptedKey(Sequence):
_fields = [
('rid', KeyAgreementRecipientIdentifier),
('encrypted_key', OctetString),
]
class RecipientEncryptedKeys(SequenceOf):
_child_spec = RecipientEncryptedKey
class KeyAgreeRecipientInfo(Sequence):
_fields = [
('version', CMSVersion),
('originator', OriginatorIdentifierOrKey, {'explicit': 0}),
('ukm', OctetString, {'explicit': 1, 'optional': True}),
('key_encryption_algorithm', KeyEncryptionAlgorithm),
('recipient_encrypted_keys', RecipientEncryptedKeys),
]
class KEKIdentifier(Sequence):
_fields = [
('key_identifier', OctetString),
('date', GeneralizedTime, {'optional': True}),
('other', OtherKeyAttribute, {'optional': True}),
]
class KEKRecipientInfo(Sequence):
_fields = [
('version', CMSVersion),
('kekid', KEKIdentifier),
('key_encryption_algorithm', KeyEncryptionAlgorithm),
('encrypted_key', OctetString),
]
class PasswordRecipientInfo(Sequence):
_fields = [
('version', CMSVersion),
('key_derivation_algorithm', KdfAlgorithm, {'implicit': 0, 'optional': True}),
('key_encryption_algorithm', KeyEncryptionAlgorithm),
('encrypted_key', OctetString),
]
class OtherRecipientInfo(Sequence):
_fields = [
('ori_type', ObjectIdentifier),
('ori_value', Any),
]
class RecipientInfo(Choice):
_alternatives = [
('ktri', KeyTransRecipientInfo),
('kari', KeyAgreeRecipientInfo, {'implicit': 1}),
('kekri', KEKRecipientInfo, {'implicit': 2}),
('pwri', PasswordRecipientInfo, {'implicit': 3}),
('ori', OtherRecipientInfo, {'implicit': 4}),
]
class RecipientInfos(SetOf):
_child_spec = RecipientInfo
class EncryptedContentInfo(Sequence):
_fields = [
('content_type', ContentType),
('content_encryption_algorithm', EncryptionAlgorithm),
('encrypted_content', OctetString, {'implicit': 0, 'optional': True}),
]
class EnvelopedData(Sequence):
_fields = [
('version', CMSVersion),
('originator_info', OriginatorInfo, {'implicit': 0, 'optional': True}),
('recipient_infos', RecipientInfos),
('encrypted_content_info', EncryptedContentInfo),
('unprotected_attrs', CMSAttributes, {'implicit': 1, 'optional': True}),
]
class SignedAndEnvelopedData(Sequence):
_fields = [
('version', CMSVersion),
('recipient_infos', RecipientInfos),
('digest_algorithms', DigestAlgorithms),
('encrypted_content_info', EncryptedContentInfo),
('certificates', CertificateSet, {'implicit': 0, 'optional': True}),
('crls', CertificateRevocationLists, {'implicit': 1, 'optional': True}),
('signer_infos', SignerInfos),
]
class DigestedData(Sequence):
_fields = [
('version', CMSVersion),
('digest_algorithm', DigestAlgorithm),
('encap_content_info', None),
('digest', OctetString),
]
def _encap_content_info_spec(self):
# If the encap_content_info is version v1, then this could be a PKCS#7
# structure, or a CMS structure. CMS wraps the encoded value in an
# Octet String tag.
# If the version is greater than 1, it is definite CMS
if self['version'].native != 'v1':
return EncapsulatedContentInfo
# Otherwise, the ContentInfo spec from PKCS#7 will be compatible with
# CMS v1 (which only allows Data, an Octet String) and PKCS#7, which
# allows Any
return ContentInfo
_spec_callbacks = {
'encap_content_info': _encap_content_info_spec
}
class EncryptedData(Sequence):
_fields = [
('version', CMSVersion),
('encrypted_content_info', EncryptedContentInfo),
('unprotected_attrs', CMSAttributes, {'implicit': 1, 'optional': True}),
]
class AuthenticatedData(Sequence):
_fields = [
('version', CMSVersion),
('originator_info', OriginatorInfo, {'implicit': 0, 'optional': True}),
('recipient_infos', RecipientInfos),
('mac_algorithm', HmacAlgorithm),
('digest_algorithm', DigestAlgorithm, {'implicit': 1, 'optional': True}),
# This does not require the _spec_callbacks approach of SignedData and
# DigestedData since AuthenticatedData was not part of PKCS#7
('encap_content_info', EncapsulatedContentInfo),
('auth_attrs', CMSAttributes, {'implicit': 2, 'optional': True}),
('mac', OctetString),
('unauth_attrs', CMSAttributes, {'implicit': 3, 'optional': True}),
]
class AuthEnvelopedData(Sequence):
_fields = [
('version', CMSVersion),
('originator_info', OriginatorInfo, {'implicit': 0, 'optional': True}),
('recipient_infos', RecipientInfos),
('auth_encrypted_content_info', EncryptedContentInfo),
('auth_attrs', CMSAttributes, {'implicit': 1, 'optional': True}),
('mac', OctetString),
('unauth_attrs', CMSAttributes, {'implicit': 2, 'optional': True}),
]
class CompressionAlgorithmId(ObjectIdentifier):
_map = {
'1.2.840.113549.1.9.16.3.8': 'zlib',
}
class CompressionAlgorithm(Sequence):
_fields = [
('algorithm', CompressionAlgorithmId),
('parameters', Any, {'optional': True}),
]
class CompressedData(Sequence):
_fields = [
('version', CMSVersion),
('compression_algorithm', CompressionAlgorithm),
('encap_content_info', EncapsulatedContentInfo),
]
_decompressed = None
@property
def decompressed(self):
if self._decompressed is None:
if zlib is None:
raise SystemError('The zlib module is not available')
self._decompressed = zlib.decompress(self['encap_content_info']['content'].native)
return self._decompressed
ContentInfo._oid_specs = {
'data': OctetString,
'signed_data': SignedData,
'enveloped_data': EnvelopedData,
'signed_and_enveloped_data': SignedAndEnvelopedData,
'digested_data': DigestedData,
'encrypted_data': EncryptedData,
'authenticated_data': AuthenticatedData,
'compressed_data': CompressedData,
'authenticated_enveloped_data': AuthEnvelopedData,
}
EncapsulatedContentInfo._oid_specs = {
'signed_data': SignedData,
'enveloped_data': EnvelopedData,
'signed_and_enveloped_data': SignedAndEnvelopedData,
'digested_data': DigestedData,
'encrypted_data': EncryptedData,
'authenticated_data': AuthenticatedData,
'compressed_data': CompressedData,
'authenticated_enveloped_data': AuthEnvelopedData,
}
CMSAttribute._oid_specs = {
'content_type': SetOfContentType,
'message_digest': SetOfOctetString,
'signing_time': SetOfTime,
'counter_signature': SignerInfos,
'signature_time_stamp_token': SetOfContentInfo,
'cms_algorithm_protection': SetOfCMSAlgorithmProtection,
}
| en | 0.688955 | # coding: utf-8 ASN.1 type classes for cryptographic message syntax (CMS). Structures are also
compatible with PKCS#7. Exports the following items:
- AuthenticatedData()
- AuthEnvelopedData()
- CompressedData()
- ContentInfo()
- DigestedData()
- EncryptedData()
- EnvelopedData()
- SignedAndEnvelopedData()
- SignedData()
Other type classes are defined that help compose the types listed above.
Most CMS structures in the wild are formatted as ContentInfo encapsulating one of the other types. # These structures are taken from # ftp://ftp.rsasecurity.com/pub/pkcs/ascii/pkcs-6.asc # These structures are taken from https://tools.ietf.org/html/rfc5652, # https://tools.ietf.org/html/rfc5083, http://tools.ietf.org/html/rfc2315, # https://tools.ietf.org/html/rfc5940, https://tools.ietf.org/html/rfc3274, # https://tools.ietf.org/html/rfc3281 # https://tools.ietf.org/html/rfc3161#page-20 # https://tools.ietf.org/html/rfc6211#page-5 # Handle the situation where a V2 cert is encoded as V1 Ensures that the class and tag specified exist as an alternative. This
custom version fixes parsing broken encodings there a V2 attribute
# certificate is encoded as a V1
:param class_:
The integer class_ from the encoded value header
:param tag:
The integer tag from the encoded value header
:param contents:
A byte string of the contents of the value - used when the object
is explicitly tagged
:raises:
ValueError - when value is not a valid alternative # If the encap_content_info is version v1, then this could be a PKCS#7 # structure, or a CMS structure. CMS wraps the encoded value in an # Octet String tag. # If the version is greater than 1, it is definite CMS # Otherwise, the ContentInfo spec from PKCS#7 will be compatible with # CMS v1 (which only allows Data, an Octet String) and PKCS#7, which # allows Any # If the encap_content_info is version v1, then this could be a PKCS#7 # structure, or a CMS structure. CMS wraps the encoded value in an # Octet String tag. # If the version is greater than 1, it is definite CMS # Otherwise, the ContentInfo spec from PKCS#7 will be compatible with # CMS v1 (which only allows Data, an Octet String) and PKCS#7, which # allows Any # This does not require the _spec_callbacks approach of SignedData and # DigestedData since AuthenticatedData was not part of PKCS#7 | 2.238328 | 2 |
web/apps/web_copo/copo_middleware/__init__.py | rpatil524/COPO | 16 | 6631667 | <reponame>rpatil524/COPO<gh_stars>10-100
__author__ = '<EMAIL> - 29/04/2016'
| __author__ = '<EMAIL> - 29/04/2016' | none | 1 | 1.102319 | 1 |
|
packages/pyre/constraints/Between.py | rtburns-jpl/pyre | 0 | 6631668 | # -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2020 all rights reserved
#
# superclass
from .Constraint import Constraint
# declaration
class Between(Constraint):
"""
Given {a} and {b} from a set with an ordering principle, this constraint is satisfied if
the candidate is in {(a,b)}
"""
# interface
def validate(self, value, **kwds):
"""
Check whether {candidate} satisfies this constraint
"""
# if {candidate} is between my {low} and my {high}
if self.low < value < self.high:
# indicate success
return value
# otherwise, chain up
return super().validate(value=value, **kwds)
# meta-methods
def __init__(self, low, high, **kwds):
# chain up
super().__init__(**kwds)
# save my range
self.low = low
self.high = high
# all done
return
def __str__(self):
return "between {0.low} and {0.high}".format(self)
# end of file
| # -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2020 all rights reserved
#
# superclass
from .Constraint import Constraint
# declaration
class Between(Constraint):
"""
Given {a} and {b} from a set with an ordering principle, this constraint is satisfied if
the candidate is in {(a,b)}
"""
# interface
def validate(self, value, **kwds):
"""
Check whether {candidate} satisfies this constraint
"""
# if {candidate} is between my {low} and my {high}
if self.low < value < self.high:
# indicate success
return value
# otherwise, chain up
return super().validate(value=value, **kwds)
# meta-methods
def __init__(self, low, high, **kwds):
# chain up
super().__init__(**kwds)
# save my range
self.low = low
self.high = high
# all done
return
def __str__(self):
return "between {0.low} and {0.high}".format(self)
# end of file
| en | 0.759562 | # -*- coding: utf-8 -*- # # <NAME> # orthologue # (c) 1998-2020 all rights reserved # # superclass # declaration Given {a} and {b} from a set with an ordering principle, this constraint is satisfied if the candidate is in {(a,b)} # interface Check whether {candidate} satisfies this constraint # if {candidate} is between my {low} and my {high} # indicate success # otherwise, chain up # meta-methods # chain up # save my range # all done # end of file | 3.415673 | 3 |
notebooks/connectfour-withMinmax.py | nilutz/Connectfour | 1 | 6631669 | <reponame>nilutz/Connectfour<filename>notebooks/connectfour-withMinmax.py
# coding: utf-8
# In[1]:
import numpy as np
# In[2]:
class Minmax(object):
'''
This is a simple MinMax algorithm with score-Hashing and Alpha Beta pruning.
The score evaluates a heuristic.
'''
DIC ={}
def __init__(self):
#load a DIC with some precalculated values values
try:
self.DIC = self.load_obj('scores')
except (RuntimeError, TypeError, NameError, OSError, IOError):
self.DIC = {}
print '--> no scores.pkl available. Now it takes way longer to build the tree!!'
def save_obj(self, obj, name ):
import pickle
'''
save a data object
'''
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(self, name):
'''
loads a data object
'''
import pickle
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def get_hash_key(self, board):
'''
This function creates a hashkey for a board so it can be stored in the dictionary.
'''
b=np.ndarray.flatten(board)
string_flat=np.char.mod('%d', b)
key = "".join(string_flat)
return key
#heuristic to evaluate the next best move
def score(self, state, depth, cur_player):
'''
heuristic function that uses Hashing for the scores if they are already calculated
state: current gameState
depth: current depth
cur_player: current player
we are using a simple heuristic here,
just counting and punishing good and reward good moves with a weight.
score = num of(four in row)*1000+ num of(three in row)* 100+ num of(two in row)*10
- num of opponent(four in row)*100000- num of opponent(three in row)*100-
num of opponent(two in row)*10
returns the score
'''
if cur_player == 1:
oponent = 2
else:
oponent = 1
hash_key = self.get_hash_key(state)
# Score already calculated
if hash_key in self.DIC:
return self.DIC[hash_key]
# else calculate
else:
#counting the number of good four/threes/twos in a row/column/diag
_ , b = zip(*(move_was_winning_move(state, cur_player, e) for e in range(2,5)))
_ , c = zip(*(move_was_winning_move(state, oponent, f) for f in range(2,5)))
score = b[2]*1000+b[1]*100+b[0]*10-c[2]*100000-c[1]*100-c[0]*10
#and put in DIC
self.DIC[hash_key] = score
return score
def listofmoves(self, gameState):
'''
returns a list of possible moves = column not full and orders it with middle first
gameState: current gamestate
return: list of possible moves
'''
l=[]
for i in range(gameState.shape[1]):
if 0 in gameState[:,i]:
l.append(i)
m=sum(l)/len(l)
return sorted(l, key=lambda x:abs(x-m))
def min_play(self, board, depth, alpha ,beta , cur_player):
'''
recursively building a the tree, min part of minmax
minimzing the score, moving the oponent
board: a gameState
depth: depth parameter of search tree
alpha: alpha for alphabeta pruning
beta: beta for alphabeta pruning
cur_player: the current_player's number
return best score
'''
#eval current player
if cur_player == 1:
oponent = 2
else:
oponent = 1
#termination
#max depth
if depth == 0:
return self.score(board, depth, cur_player)
#all full
if not move_still_possible(board):
return self.score(board, depth, cur_player)
#or winner
winningmove, _ = move_was_winning_move(board, cur_player, 4)
if winningmove:
return self.score(board, depth, cur_player)
best_score = np.inf
#get all available moves
moves = self.listofmoves(board)
best_move = moves[0]
if len(moves) == 0:
return self.score(board, depth, cur_player)
for eachmove in moves:
#copy board and move oponent
boardcopy = board.copy()
board_i, _ , _ = move(boardcopy, oponent, eachmove)
#build recursivley max
score = self.max_play(board_i, depth-1,alpha ,beta , cur_player)
#compare scores MIN
#if score < best_score:
# best_move = eachmove
# best_score = score
#print 'if', best_move, best_score
#with alpha - beta
if score < beta:
beta = score
#best_move = eachmove
if beta <= alpha:
return beta
return beta#, best_move
def max_play(self, board, depth, alpha ,beta , cur_player):
'''
recursively building a the tree, max part of minmax
maximizing the score
board: a gameState
depth: depth parameter of search tree
alpha: alpha for alphabeta pruning
beta: beta for alphabeta pruning
cur_player: the current_player's number
return best score
'''
#eval current player
if cur_player == 1:
oponent = 2
else:
oponent = 1
#termination
#max depth
if depth == 0:
return self.score(board, depth, cur_player)
#all full
elif not move_still_possible(board):
return self.score(board, depth, cur_player)
#or winner
winningmove, _ = move_was_winning_move(board, cur_player, 4)
if winningmove:
return self.score(board, depth, cur_player)
best_score = -np.inf
#get all available moves
moves = self.listofmoves(board)
best_move = moves[0]
if len(moves) == 0:
return self.score(board, depth, cur_player)
for eachmove in moves:
#copy board and move player
boardcopy = board.copy()
board_i, _ , _ = move(boardcopy, cur_player, eachmove)
#build recursivley min
score = self.min_play(board_i, depth-1,alpha ,beta , cur_player)
#compare scores MAX
#if score > best_score:
# best_move = eachmove
# best_score = score
#print 'if', best_move, best_score
#with alpha-beta
if score > alpha:
alpha = score
#best_move = eachmove
if alpha >= beta:
return alpha
return alpha #, best_move
def minmax(self, board, cur_player, depth=4, alpha=-np.inf, beta=np.inf):
'''
recursively building a the tree with alpha beta pruning
may not be the best choice but everyone keeps saying: memory is cheap
board: a gameState
depth: depth parameter of search tree
cur_player: the current_player's number
return best score, best move
'''
#eval current player
if cur_player == 1:
oponent = 2
else:
oponent = 1
best_score = -np.inf
#get all available moves
moves = self.listofmoves(board)
best_move = moves[0]
#for each move do
for eachmove in moves:
#print eachmove
#copy board and move
boardcopy = board.copy()
#build recursivley
board_i, _ , _ = move(boardcopy, cur_player, eachmove)
score = self.min_play(board_i, depth-1 ,alpha ,beta, cur_player)
#compare scores
if score > alpha:
alpha = score
best_move = eachmove
if alpha >= beta:
return alpha
return alpha, best_move
# In[3]:
#import numpy as np
def move_is_correct(grid,num):
'''
@param grid: 6x7 grid containing the current game state
@param num: column
returns True if move is allowed on that column
'''
#if 0 is in column
if 0 in grid[:,num]:
#move is allowed
return True
else:
return False
def move_still_possible(S):
'''
@param S: 6x7 grid containing the current game state
returns True if grid contains no 0, therefore no move possible anymore
'''
return not(S[S==0].size == 0)
def move(S,p,col_num):
'''
@param S: 6x7 grid containing the current game state
@param p: current player
@param col_num: column number
sets the player's number on the grid and returns the grid
'''
#sanity check
if 0 in S[:,col_num]:
y = np.where(S[:,col_num]==0)[0][-1]
S[y,col_num] = p
return S , y, col_num
else:
return S, None, None
return
def move_at_random(S):
'''
@param S: 6x7 grid containing the current game state
moves at random
'''
return np.random.randint(0,S.shape[1])
#neat and ugly but the fastest way to search a matrix for a vector is a string find
player1=[' ','1', '1 1', '1 1 1', '1 1 1 1']
oponent=[' ','1', '2 2', '2 2 2', '2 2 2 2']
def move_was_winning_move(S, p, num):
'''
@param S: 6x7 grid containing the current game state
@param p: current player
@param num: how many occurences to count
combines all the allowed formations of the grid and string_finds with
the currents player vector. Returns true if match.
return: True or False whether move was winning move or not,
and count of occurences
'''
if p == 1:
match = player1[num]
else:
match = oponent[num]
l=[]
#for every possible diag
for i in range(-2,4):
l.append(np.diag(S,k = i))
l.append(np.diag(np.fliplr(S),k=i))
#left to right
l.append(S)
#top to bottom
l.append(np.rot90(S))
#convert to string
stringmatrix =''.join(np.array_str(e) for e in l)
#count the occurences
counter = stringmatrix.count(match)
#print stringmatrix
#if four in a row
if num == 4 and counter == 1:
return True, counter
return False, counter
# relate numbers (1, -1, 0) to symbols ('x', 'o', ' ')
symbols = {1:'m', 2:'r', 0:' '}
# print game state matrix using symbols
def print_game_state(S):
B = np.copy(S).astype(object)
for n in [1, 2, 0]:
B[B==n] = symbols[n]
print B
if __name__ == '__main__':
# initialize 6x7 connectfour board
gameState = np.zeros((6,7), dtype=int)
# initialize player number, move counter
player = 1
mvcntr = 1
# initialize flag that indicates win
noWinnerYet = True
m = Minmax()
le = len(m.DIC)
print 'This is a Minmax vs Random Connect Four simulation: '
difficulty = int(raw_input('Difficulty: '))
while move_still_possible(gameState) and noWinnerYet:
while True:
# get player symbol
name = symbols[player]
print '%s moves' % name
#move with Minmax
if player == 1:
_ , col_num = m.minmax(gameState, 1, difficulty, -np.inf, np.inf)
# let player move at random
else:
col_num = move_at_random(gameState)
if move_is_correct(gameState, col_num):
gameState, _ , _ = move(gameState,player,col_num)
# print current game state
print_game_state(gameState)
# evaluate game state
winningmove, _ = move_was_winning_move(gameState, player, 4)
if winningmove:
print 'player %s wins after %d moves' % (name, mvcntr)
noWinnerYet = False
# switch player and increase move counter
if player == 1:
player = 2
elif player == 2:
player = 1
mvcntr += 1
break
if noWinnerYet:
print 'game ended in a draw'
#save new DIC for better Hashing
if le < len(m.DIC):
m.save_obj(m.DIC,'scores')
| # coding: utf-8
# In[1]:
import numpy as np
# In[2]:
class Minmax(object):
'''
This is a simple MinMax algorithm with score-Hashing and Alpha Beta pruning.
The score evaluates a heuristic.
'''
DIC ={}
def __init__(self):
#load a DIC with some precalculated values values
try:
self.DIC = self.load_obj('scores')
except (RuntimeError, TypeError, NameError, OSError, IOError):
self.DIC = {}
print '--> no scores.pkl available. Now it takes way longer to build the tree!!'
def save_obj(self, obj, name ):
import pickle
'''
save a data object
'''
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(self, name):
'''
loads a data object
'''
import pickle
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def get_hash_key(self, board):
'''
This function creates a hashkey for a board so it can be stored in the dictionary.
'''
b=np.ndarray.flatten(board)
string_flat=np.char.mod('%d', b)
key = "".join(string_flat)
return key
#heuristic to evaluate the next best move
def score(self, state, depth, cur_player):
'''
heuristic function that uses Hashing for the scores if they are already calculated
state: current gameState
depth: current depth
cur_player: current player
we are using a simple heuristic here,
just counting and punishing good and reward good moves with a weight.
score = num of(four in row)*1000+ num of(three in row)* 100+ num of(two in row)*10
- num of opponent(four in row)*100000- num of opponent(three in row)*100-
num of opponent(two in row)*10
returns the score
'''
if cur_player == 1:
oponent = 2
else:
oponent = 1
hash_key = self.get_hash_key(state)
# Score already calculated
if hash_key in self.DIC:
return self.DIC[hash_key]
# else calculate
else:
#counting the number of good four/threes/twos in a row/column/diag
_ , b = zip(*(move_was_winning_move(state, cur_player, e) for e in range(2,5)))
_ , c = zip(*(move_was_winning_move(state, oponent, f) for f in range(2,5)))
score = b[2]*1000+b[1]*100+b[0]*10-c[2]*100000-c[1]*100-c[0]*10
#and put in DIC
self.DIC[hash_key] = score
return score
def listofmoves(self, gameState):
'''
returns a list of possible moves = column not full and orders it with middle first
gameState: current gamestate
return: list of possible moves
'''
l=[]
for i in range(gameState.shape[1]):
if 0 in gameState[:,i]:
l.append(i)
m=sum(l)/len(l)
return sorted(l, key=lambda x:abs(x-m))
def min_play(self, board, depth, alpha ,beta , cur_player):
'''
recursively building a the tree, min part of minmax
minimzing the score, moving the oponent
board: a gameState
depth: depth parameter of search tree
alpha: alpha for alphabeta pruning
beta: beta for alphabeta pruning
cur_player: the current_player's number
return best score
'''
#eval current player
if cur_player == 1:
oponent = 2
else:
oponent = 1
#termination
#max depth
if depth == 0:
return self.score(board, depth, cur_player)
#all full
if not move_still_possible(board):
return self.score(board, depth, cur_player)
#or winner
winningmove, _ = move_was_winning_move(board, cur_player, 4)
if winningmove:
return self.score(board, depth, cur_player)
best_score = np.inf
#get all available moves
moves = self.listofmoves(board)
best_move = moves[0]
if len(moves) == 0:
return self.score(board, depth, cur_player)
for eachmove in moves:
#copy board and move oponent
boardcopy = board.copy()
board_i, _ , _ = move(boardcopy, oponent, eachmove)
#build recursivley max
score = self.max_play(board_i, depth-1,alpha ,beta , cur_player)
#compare scores MIN
#if score < best_score:
# best_move = eachmove
# best_score = score
#print 'if', best_move, best_score
#with alpha - beta
if score < beta:
beta = score
#best_move = eachmove
if beta <= alpha:
return beta
return beta#, best_move
def max_play(self, board, depth, alpha ,beta , cur_player):
'''
recursively building a the tree, max part of minmax
maximizing the score
board: a gameState
depth: depth parameter of search tree
alpha: alpha for alphabeta pruning
beta: beta for alphabeta pruning
cur_player: the current_player's number
return best score
'''
#eval current player
if cur_player == 1:
oponent = 2
else:
oponent = 1
#termination
#max depth
if depth == 0:
return self.score(board, depth, cur_player)
#all full
elif not move_still_possible(board):
return self.score(board, depth, cur_player)
#or winner
winningmove, _ = move_was_winning_move(board, cur_player, 4)
if winningmove:
return self.score(board, depth, cur_player)
best_score = -np.inf
#get all available moves
moves = self.listofmoves(board)
best_move = moves[0]
if len(moves) == 0:
return self.score(board, depth, cur_player)
for eachmove in moves:
#copy board and move player
boardcopy = board.copy()
board_i, _ , _ = move(boardcopy, cur_player, eachmove)
#build recursivley min
score = self.min_play(board_i, depth-1,alpha ,beta , cur_player)
#compare scores MAX
#if score > best_score:
# best_move = eachmove
# best_score = score
#print 'if', best_move, best_score
#with alpha-beta
if score > alpha:
alpha = score
#best_move = eachmove
if alpha >= beta:
return alpha
return alpha #, best_move
def minmax(self, board, cur_player, depth=4, alpha=-np.inf, beta=np.inf):
'''
recursively building a the tree with alpha beta pruning
may not be the best choice but everyone keeps saying: memory is cheap
board: a gameState
depth: depth parameter of search tree
cur_player: the current_player's number
return best score, best move
'''
#eval current player
if cur_player == 1:
oponent = 2
else:
oponent = 1
best_score = -np.inf
#get all available moves
moves = self.listofmoves(board)
best_move = moves[0]
#for each move do
for eachmove in moves:
#print eachmove
#copy board and move
boardcopy = board.copy()
#build recursivley
board_i, _ , _ = move(boardcopy, cur_player, eachmove)
score = self.min_play(board_i, depth-1 ,alpha ,beta, cur_player)
#compare scores
if score > alpha:
alpha = score
best_move = eachmove
if alpha >= beta:
return alpha
return alpha, best_move
# In[3]:
#import numpy as np
def move_is_correct(grid,num):
'''
@param grid: 6x7 grid containing the current game state
@param num: column
returns True if move is allowed on that column
'''
#if 0 is in column
if 0 in grid[:,num]:
#move is allowed
return True
else:
return False
def move_still_possible(S):
'''
@param S: 6x7 grid containing the current game state
returns True if grid contains no 0, therefore no move possible anymore
'''
return not(S[S==0].size == 0)
def move(S,p,col_num):
'''
@param S: 6x7 grid containing the current game state
@param p: current player
@param col_num: column number
sets the player's number on the grid and returns the grid
'''
#sanity check
if 0 in S[:,col_num]:
y = np.where(S[:,col_num]==0)[0][-1]
S[y,col_num] = p
return S , y, col_num
else:
return S, None, None
return
def move_at_random(S):
'''
@param S: 6x7 grid containing the current game state
moves at random
'''
return np.random.randint(0,S.shape[1])
#neat and ugly but the fastest way to search a matrix for a vector is a string find
player1=[' ','1', '1 1', '1 1 1', '1 1 1 1']
oponent=[' ','1', '2 2', '2 2 2', '2 2 2 2']
def move_was_winning_move(S, p, num):
'''
@param S: 6x7 grid containing the current game state
@param p: current player
@param num: how many occurences to count
combines all the allowed formations of the grid and string_finds with
the currents player vector. Returns true if match.
return: True or False whether move was winning move or not,
and count of occurences
'''
if p == 1:
match = player1[num]
else:
match = oponent[num]
l=[]
#for every possible diag
for i in range(-2,4):
l.append(np.diag(S,k = i))
l.append(np.diag(np.fliplr(S),k=i))
#left to right
l.append(S)
#top to bottom
l.append(np.rot90(S))
#convert to string
stringmatrix =''.join(np.array_str(e) for e in l)
#count the occurences
counter = stringmatrix.count(match)
#print stringmatrix
#if four in a row
if num == 4 and counter == 1:
return True, counter
return False, counter
# relate numbers (1, -1, 0) to symbols ('x', 'o', ' ')
symbols = {1:'m', 2:'r', 0:' '}
# print game state matrix using symbols
def print_game_state(S):
B = np.copy(S).astype(object)
for n in [1, 2, 0]:
B[B==n] = symbols[n]
print B
if __name__ == '__main__':
# initialize 6x7 connectfour board
gameState = np.zeros((6,7), dtype=int)
# initialize player number, move counter
player = 1
mvcntr = 1
# initialize flag that indicates win
noWinnerYet = True
m = Minmax()
le = len(m.DIC)
print 'This is a Minmax vs Random Connect Four simulation: '
difficulty = int(raw_input('Difficulty: '))
while move_still_possible(gameState) and noWinnerYet:
while True:
# get player symbol
name = symbols[player]
print '%s moves' % name
#move with Minmax
if player == 1:
_ , col_num = m.minmax(gameState, 1, difficulty, -np.inf, np.inf)
# let player move at random
else:
col_num = move_at_random(gameState)
if move_is_correct(gameState, col_num):
gameState, _ , _ = move(gameState,player,col_num)
# print current game state
print_game_state(gameState)
# evaluate game state
winningmove, _ = move_was_winning_move(gameState, player, 4)
if winningmove:
print 'player %s wins after %d moves' % (name, mvcntr)
noWinnerYet = False
# switch player and increase move counter
if player == 1:
player = 2
elif player == 2:
player = 1
mvcntr += 1
break
if noWinnerYet:
print 'game ended in a draw'
#save new DIC for better Hashing
if le < len(m.DIC):
m.save_obj(m.DIC,'scores') | en | 0.741092 | # coding: utf-8 # In[1]: # In[2]: This is a simple MinMax algorithm with score-Hashing and Alpha Beta pruning. The score evaluates a heuristic. #load a DIC with some precalculated values values save a data object loads a data object This function creates a hashkey for a board so it can be stored in the dictionary. #heuristic to evaluate the next best move heuristic function that uses Hashing for the scores if they are already calculated state: current gameState depth: current depth cur_player: current player we are using a simple heuristic here, just counting and punishing good and reward good moves with a weight. score = num of(four in row)*1000+ num of(three in row)* 100+ num of(two in row)*10 - num of opponent(four in row)*100000- num of opponent(three in row)*100- num of opponent(two in row)*10 returns the score # Score already calculated # else calculate #counting the number of good four/threes/twos in a row/column/diag #and put in DIC returns a list of possible moves = column not full and orders it with middle first gameState: current gamestate return: list of possible moves recursively building a the tree, min part of minmax minimzing the score, moving the oponent board: a gameState depth: depth parameter of search tree alpha: alpha for alphabeta pruning beta: beta for alphabeta pruning cur_player: the current_player's number return best score #eval current player #termination #max depth #all full #or winner #get all available moves #copy board and move oponent #build recursivley max #compare scores MIN #if score < best_score: # best_move = eachmove # best_score = score #print 'if', best_move, best_score #with alpha - beta #best_move = eachmove #, best_move recursively building a the tree, max part of minmax maximizing the score board: a gameState depth: depth parameter of search tree alpha: alpha for alphabeta pruning beta: beta for alphabeta pruning cur_player: the current_player's number return best score #eval current player #termination #max depth #all full #or winner #get all available moves #copy board and move player #build recursivley min #compare scores MAX #if score > best_score: # best_move = eachmove # best_score = score #print 'if', best_move, best_score #with alpha-beta #best_move = eachmove #, best_move recursively building a the tree with alpha beta pruning may not be the best choice but everyone keeps saying: memory is cheap board: a gameState depth: depth parameter of search tree cur_player: the current_player's number return best score, best move #eval current player #get all available moves #for each move do #print eachmove #copy board and move #build recursivley #compare scores # In[3]: #import numpy as np @param grid: 6x7 grid containing the current game state @param num: column returns True if move is allowed on that column #if 0 is in column #move is allowed @param S: 6x7 grid containing the current game state returns True if grid contains no 0, therefore no move possible anymore @param S: 6x7 grid containing the current game state @param p: current player @param col_num: column number sets the player's number on the grid and returns the grid #sanity check @param S: 6x7 grid containing the current game state moves at random #neat and ugly but the fastest way to search a matrix for a vector is a string find @param S: 6x7 grid containing the current game state @param p: current player @param num: how many occurences to count combines all the allowed formations of the grid and string_finds with the currents player vector. Returns true if match. return: True or False whether move was winning move or not, and count of occurences #for every possible diag #left to right #top to bottom #convert to string #count the occurences #print stringmatrix #if four in a row # relate numbers (1, -1, 0) to symbols ('x', 'o', ' ') # print game state matrix using symbols # initialize 6x7 connectfour board # initialize player number, move counter # initialize flag that indicates win # get player symbol #move with Minmax # let player move at random # print current game state # evaluate game state # switch player and increase move counter #save new DIC for better Hashing | 3.527331 | 4 |
RLBotPack/Manticore/behaviour/save_goal.py | FootlessQuill54/RLBotPack | 0 | 6631670 | <gh_stars>0
from rlbot.agents.base_agent import SimpleControllerState
from controllers.aim_cone import AimCone
from strategy.objective import Objective
from strategy.utility_system import UtilityState
from utility import predict
from utility.info import Ball, Goal
from utility.rlmath import sign, clip
from utility.vec import Vec3, norm
class SaveGoal(UtilityState):
def __init__(self, bot):
team_sign = bot.info.team_sign
self.aim_cone = None
self.ball_to_goal_right = None
self.ball_to_goal_left = None
def utility_score(self, bot) -> float:
team_sign = bot.info.team_sign
ball = bot.info.ball
ball_to_goal = bot.info.own_goal.pos - ball.pos
too_close = norm(ball_to_goal) < Goal.WIDTH2 + Ball.RADIUS
hits_goal_prediction = predict.will_ball_hit_goal(bot)
hits_goal = hits_goal_prediction.happens and sign(ball.vel.y) == team_sign and hits_goal_prediction.time < 3
obj_bonus = {
Objective.UNKNOWN: 1,
Objective.GO_FOR_IT: 1,
Objective.FOLLOW_UP: 0,
Objective.ROTATING: 0,
Objective.SOLO: 1,
}[bot.info.my_car.objective]
return float(hits_goal or too_close) * obj_bonus
def run(self, bot) -> SimpleControllerState:
car = bot.info.my_car
ball = bot.info.ball
hits_goal_prediction = predict.will_ball_hit_goal(bot)
reach_time = clip(predict.time_till_reach_ball(car, ball), 0, hits_goal_prediction.time - 0.5)
reachable_ball = predict.ball_predict(bot, reach_time)
self.ball_to_goal_right = bot.info.own_goal.right_post - reachable_ball.pos
self.ball_to_goal_left = bot.info.own_goal.left_post - reachable_ball.pos
self.aim_cone = AimCone(self.ball_to_goal_left, self.ball_to_goal_right)
if bot.do_rendering:
self.aim_cone.draw(bot, reachable_ball.pos, r=200, g=0, b=160)
shoot_controls = bot.shoot.with_aiming(bot, self.aim_cone, reach_time)
if not bot.shoot.can_shoot:
# Go home
return bot.drive.home(bot)
else:
return shoot_controls
| from rlbot.agents.base_agent import SimpleControllerState
from controllers.aim_cone import AimCone
from strategy.objective import Objective
from strategy.utility_system import UtilityState
from utility import predict
from utility.info import Ball, Goal
from utility.rlmath import sign, clip
from utility.vec import Vec3, norm
class SaveGoal(UtilityState):
def __init__(self, bot):
team_sign = bot.info.team_sign
self.aim_cone = None
self.ball_to_goal_right = None
self.ball_to_goal_left = None
def utility_score(self, bot) -> float:
team_sign = bot.info.team_sign
ball = bot.info.ball
ball_to_goal = bot.info.own_goal.pos - ball.pos
too_close = norm(ball_to_goal) < Goal.WIDTH2 + Ball.RADIUS
hits_goal_prediction = predict.will_ball_hit_goal(bot)
hits_goal = hits_goal_prediction.happens and sign(ball.vel.y) == team_sign and hits_goal_prediction.time < 3
obj_bonus = {
Objective.UNKNOWN: 1,
Objective.GO_FOR_IT: 1,
Objective.FOLLOW_UP: 0,
Objective.ROTATING: 0,
Objective.SOLO: 1,
}[bot.info.my_car.objective]
return float(hits_goal or too_close) * obj_bonus
def run(self, bot) -> SimpleControllerState:
car = bot.info.my_car
ball = bot.info.ball
hits_goal_prediction = predict.will_ball_hit_goal(bot)
reach_time = clip(predict.time_till_reach_ball(car, ball), 0, hits_goal_prediction.time - 0.5)
reachable_ball = predict.ball_predict(bot, reach_time)
self.ball_to_goal_right = bot.info.own_goal.right_post - reachable_ball.pos
self.ball_to_goal_left = bot.info.own_goal.left_post - reachable_ball.pos
self.aim_cone = AimCone(self.ball_to_goal_left, self.ball_to_goal_right)
if bot.do_rendering:
self.aim_cone.draw(bot, reachable_ball.pos, r=200, g=0, b=160)
shoot_controls = bot.shoot.with_aiming(bot, self.aim_cone, reach_time)
if not bot.shoot.can_shoot:
# Go home
return bot.drive.home(bot)
else:
return shoot_controls | none | 1 | 2.416946 | 2 |
|
rnaseq_report/modules/rnaseq_post_alignment_qc/post_alignment_qc.py | yqsongGitHub/rnaseq-report | 0 | 6631671 | #!/usr/bin/env python
""" RnaSeqReport plugin module """
from __future__ import print_function
from collections import defaultdict, OrderedDict
import logging
import os
import re
from multiqc import config
from multiqc.modules.base_module import BaseMultiqcModule
from multiqc.plots import bargraph, linegraph, table
# Initialise the logger
log = logging.getLogger(__name__)
class MultiqcModule(BaseMultiqcModule):
""" Qualimap is really a collection of separate programs:
BamQC, RNASeq and Counts.. This module is split into separate
files to reflect this and help with code organisation. """
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name='Post Alignment QC',
anchor='rnaseq_post_alignment_qc',
href='https://github.com/clinico-omics/rnaseq-report',
info=" is an report module to show the quality of base.")
# Initialise the submodules
from . import QM_BamQC
# Set up class objects to hold parsed data()
self.general_stats_headers = OrderedDict()
self.general_stats_data = defaultdict(lambda: dict())
n = dict()
n['BamQC'] = QM_BamQC.parse_reports(self)
if n['BamQC'] > 0:
log.info("Found {} BamQC reports".format(n['BamQC']))
### add report section-------------
# Make the plots for the report
self.add_qm_rnaseq_stats_data()
self.post_alignment_stats()
QM_BamQC.report_sections(self)
self.qm_rnaseq_parse_reports()
# Exit if we didn't find anything
if sum(n.values()) == 0:
raise UserWarning
# Helper functions
def get_s_name(self, f):
s_name = os.path.basename(os.path.dirname(f['root']))
s_name = self.clean_s_name(s_name, f['root'])
if s_name.endswith('.qc'):
s_name = s_name[:-3]
return s_name
def post_alignment_stats(self):
""" Add some single-number stats to the basic statistics
table at the top of the report """
# Prep the data
error_rate_mean = []
gc_mean = []
median_insert_size_mean = []
exonic_mean = []
intronic_mean = []
intergenic_mean = []
for k in self.general_stats_data.keys():
error_rate_mean.append(
self.general_stats_data[k]['general_error_rate'])
gc_mean.append(self.general_stats_data[k]['avg_gc'])
median_insert_size_mean.append(
self.general_stats_data[k]['median_insert_size'])
exonic_mean.append(
self.general_stats_data[k]['reads_aligned_exonic'])
intronic_mean.append(
self.general_stats_data[k]['reads_aligned_intronic'])
intergenic_mean.append(
self.general_stats_data[k]['reads_aligned_intergenic'])
self.general_stats_data['Batch average value'] = {
'general_error_rate':
sum(error_rate_mean) / len(error_rate_mean),
'avg_gc':
sum(gc_mean) / len(gc_mean),
'median_insert_size':
sum(median_insert_size_mean) / len(median_insert_size_mean),
'reads_aligned_exonic':
sum(exonic_mean) / len(exonic_mean),
'reads_aligned_intronic':
sum(intronic_mean) / len(intronic_mean),
'reads_aligned_intergenic':
sum(intergenic_mean) / len(intergenic_mean)
}
self.general_stats_data['Historical value'] = {
'general_error_rate': '0.01±0.01'
}
self.add_section(name='Post alignment stats',
anchor='post_alignment_stats',
description='The summary of qualimap',
helptext='''
To enable multiple samples to be plotted on the same graph, only the mean quality
scores are plotted (unlike the box plots seen in FastQC reports).
Taken from the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/2%20Per%20Base%20Sequence%20Quality.html):
_The y-axis on the graph shows the quality scores. The higher the score, the better
the base call. The background of the graph divides the y axis into very good quality
calls (green), calls of reasonable quality (orange), and calls of poor quality (red).
The quality of calls on most platforms will degrade as the run progresses, so it is
common to see base calls falling into the orange area towards the end of a read._
''',
plot=table.plot(self.general_stats_data,
self.general_stats_headers))
def add_qm_rnaseq_stats_data(self):
regexes = {
'reads_aligned': r"read(?:s| pairs) aligned\s*=\s*([\d,]+)",
'total_alignments': r"total alignments\s*=\s*([\d,]+)",
'non_unique_alignments': r"non-unique alignments\s*=\s*([\d,]+)",
'reads_aligned_genes': r"aligned to genes\s*=\s*([\d,]+)",
'ambiguous_alignments': r"ambiguous alignments\s*=\s*([\d,]+)",
'not_aligned': r"not aligned\s*=\s*([\d,]+)",
'5_3_bias': r"5'-3' bias\s*=\s*([\d,\.]+)$",
'reads_aligned_exonic': r"exonic\s*=\s*([\d,]+)",
'reads_aligned_intronic': r"intronic\s*=\s*([\d,]+)",
'reads_aligned_intergenic': r"intergenic\s*=\s*([\d,]+)",
'reads_aligned_overlapping_exon':
r"overlapping exon\s*=\s*([\d,]+)",
}
for f in self.find_log_files(
'rnaseq_post_alignment_qc/rnaseq_qc/rnaseq_qc_results'):
d = dict()
# Get the sample name
s_name_regex = re.search(r"bam file\s*=\s*(.+)", f['f'],
re.MULTILINE)
if s_name_regex:
d['bam_file'] = s_name_regex.group(1)
s_name = self.clean_s_name(d['bam_file'], f['root'])
else:
log.warn(
"Couldn't find an input filename in genome_results file {}/{}"
.format(f['root'], f['fn']))
return None
# Check for and 'fix' European style decimal places / thousand separators
comma_regex = re.search(r"exonic\s*=\s*[\d\.]+ \(\d{1,3},\d+%\)",
f['f'], re.MULTILINE)
if comma_regex:
log.debug(
"Trying to fix European comma style syntax in Qualimap report {}/{}"
.format(f['root'], f['fn']))
f['f'] = f['f'].replace('.', '')
f['f'] = f['f'].replace(',', '.')
# Go through all numeric regexes
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
try:
d[k] = float(r_search.group(1).replace(',', ''))
except UnicodeEncodeError:
# Qualimap reports infinity (\u221e) when 3' bias denominator is zero
pass
except ValueError:
d[k] = r_search.group(1)
for k in [
'reads_aligned_exonic', 'reads_aligned_intronic',
'reads_aligned_intergenic'
]:
try:
self.general_stats_data[s_name][k] = d[k]
except KeyError:
pass
#### General Stats
self.general_stats_headers['reads_aligned_exonic'] = {
'title': 'Exonic',
'description': 'Reads Aligned Exonic',
'min': 0,
'scale': 'RdBu',
'shared_key': 'read_count'
}
self.general_stats_headers['reads_aligned_intronic'] = {
'title': 'Intronic',
'description': 'Reads Aligned Intronic',
'min': 0,
'scale': 'RdBu',
'shared_key': 'read_count'
}
self.general_stats_headers['reads_aligned_intergenic'] = {
'title': 'Intergenic',
'description': 'Reads Aligned Intergenic',
'min': 0,
'scale': 'RdBu',
'shared_key': 'read_count'
}
def qm_rnaseq_parse_reports(self):
""" Find Qualimap RNASeq reports and parse their data """
self.qualimap_rnaseq_genome_results = dict()
regexes = {
'reads_aligned': r"read(?:s| pairs) aligned\s*=\s*([\d,]+)",
'total_alignments': r"total alignments\s*=\s*([\d,]+)",
'non_unique_alignments': r"non-unique alignments\s*=\s*([\d,]+)",
'reads_aligned_genes': r"aligned to genes\s*=\s*([\d,]+)",
'ambiguous_alignments': r"ambiguous alignments\s*=\s*([\d,]+)",
'not_aligned': r"not aligned\s*=\s*([\d,]+)",
'5_3_bias': r"5'-3' bias\s*=\s*([\d,\.]+)$",
'reads_aligned_exonic': r"exonic\s*=\s*([\d,]+)",
'reads_aligned_intronic': r"intronic\s*=\s*([\d,]+)",
'reads_aligned_intergenic': r"intergenic\s*=\s*([\d,]+)",
'reads_aligned_overlapping_exon':
r"overlapping exon\s*=\s*([\d,]+)",
}
for f in self.find_log_files(
'rnaseq_post_alignment_qc/rnaseq_qc/rnaseq_qc_results'):
d = dict()
# Get the sample name
s_name_regex = re.search(r"bam file\s*=\s*(.+)", f['f'],
re.MULTILINE)
if s_name_regex:
d['bam_file'] = s_name_regex.group(1)
s_name = self.clean_s_name(d['bam_file'], f['root'])
else:
log.warn(
"Couldn't find an input filename in genome_results file {}/{}"
.format(f['root'], f['fn']))
return None
# Check for and 'fix' European style decimal places / thousand separators
comma_regex = re.search(r"exonic\s*=\s*[\d\.]+ \(\d{1,3},\d+%\)",
f['f'], re.MULTILINE)
if comma_regex:
log.debug(
"Trying to fix European comma style syntax in Qualimap report {}/{}"
.format(f['root'], f['fn']))
f['f'] = f['f'].replace('.', '')
f['f'] = f['f'].replace(',', '.')
# Go through all numeric regexes
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
try:
d[k] = float(r_search.group(1).replace(',', ''))
except UnicodeEncodeError:
# Qualimap reports infinity (\u221e) when 3' bias denominator is zero
pass
except ValueError:
d[k] = r_search.group(1)
# Save results
if s_name in self.qualimap_rnaseq_genome_results:
log.debug(
"Duplicate genome results sample name found! Overwriting: {}"
.format(s_name))
self.qualimap_rnaseq_genome_results[s_name] = d
self.add_data_source(f,
s_name=s_name,
section='rna_genome_results')
#### Coverage profile
self.qualimap_rnaseq_cov_hist = dict()
for f in self.find_log_files(
'rnaseq_post_alignment_qc/rnaseq_qc/coverage',
filehandles=True):
s_name = self.get_s_name(f)
d = dict()
for l in f['f']:
if l.startswith('#'):
continue
coverage, count = l.split(None, 1)
coverage = int(round(float(coverage)))
count = float(count)
d[coverage] = count
if len(d) == 0:
log.debug(
"Couldn't parse contents of coverage histogram file {}".
format(f['fn']))
return None
# Save results
if s_name in self.qualimap_rnaseq_cov_hist:
log.debug(
"Duplicate coverage histogram sample name found! Overwriting: {}"
.format(s_name))
self.qualimap_rnaseq_cov_hist[s_name] = d
self.add_data_source(f,
s_name=s_name,
section='rna_coverage_histogram')
# Filter to strip out ignored sample names
self.qualimap_rnaseq_genome_results = self.ignore_samples(
self.qualimap_rnaseq_genome_results)
self.qualimap_rnaseq_cov_hist = self.ignore_samples(
self.qualimap_rnaseq_cov_hist)
#### Plots
# Genomic Origin Bar Graph
# NB: Ignore 'Overlapping Exon' in report - these make the numbers add up to > 100%
if len(self.qualimap_rnaseq_genome_results) > 0:
gorigin_cats = OrderedDict()
gorigin_cats['reads_aligned_exonic'] = {'name': 'Exonic'}
gorigin_cats['reads_aligned_intronic'] = {'name': 'Intronic'}
gorigin_cats['reads_aligned_intergenic'] = {'name': 'Intergenic'}
gorigin_pconfig = {
'id': 'qualimap_genomic_origin',
'title': 'Qualimap RNAseq: Genomic Origin',
'ylab': 'Number of reads',
'cpswitch_c_active': False
}
genomic_origin_helptext = '''
There are currently three main approaches to map reads to transcripts in an
RNA-seq experiment: mapping reads to a reference genome to identify expressed
transcripts that are annotated (and discover those that are unknown), mapping
reads to a reference transcriptome, and <i>de novo</i> assembly of transcript
sequences (<a href="https://doi.org/10.1186/s13059-016-0881-8"
target="_blank">Conesa et al. 2016</a>).
For RNA-seq QC analysis, QualiMap can be used to assess alignments produced by
the first of these approaches. For input, it requires a GTF annotation file
along with a reference genome, which can be used to reconstruct the exon
structure of known transcripts. This allows mapped reads to be grouped by
whether they originate in an exonic region (for QualiMap, this may include
5′ and 3′ UTR regions as well as protein-coding exons), an intron,
or an intergenic region (see the <a href="http://qualimap.bioinfo.cipf.es/doc_html/index.html"
target="_blank">Qualimap 2 documentation</a>).
The inferred genomic origins of RNA-seq reads are presented here as a bar graph
showing either the number or percentage of mapped reads in each read dataset
that have been assigned to each type of genomic region. This graph can be used
to assess the proportion of useful reads in an RNA-seq experiment. That
proportion can be reduced by the presence of intron sequences, especially if
depletion of ribosomal RNA was used during sample preparation (<a href="https://doi.org/10.1038/nrg3642"
target="_blank">Sims et al. 2014</a>). It can also be reduced by off-target
transcripts, which are detected in greater numbers at the sequencing depths
needed to detect poorly-expressed transcripts (<a href="https://doi.org/10.1101/gr.124321.111"
target="_blank">Tarazona et al. 2011</a>).'''
self.add_section(
name='Genomic origin of reads',
anchor='qualimap-reads-genomic-origin',
description=
'Classification of mapped reads as originating in exonic, intronic or intergenic regions. These can be displayed as either the number or percentage of mapped reads.',
helptext=genomic_origin_helptext,
plot=bargraph.plot(self.qualimap_rnaseq_genome_results,
gorigin_cats, gorigin_pconfig))
if len(self.qualimap_rnaseq_cov_hist) > 0:
coverage_profile_helptext = '''
There are currently three main approaches to map reads to transcripts in an
RNA-seq experiment: mapping reads to a reference genome to identify expressed
transcripts that are annotated (and discover those that are unknown), mapping
reads to a reference transcriptome, and <i>de novo</i> assembly of transcript
sequences (<a href="https://doi.org/10.1186/s13059-016-0881-8"
target="_blank">Conesa et al. 2016</a>).
For RNA-seq QC analysis, QualiMap can be used to assess alignments produced by
the first of these approaches. For input, it requires a GTF annotation file
along with a reference genome, which can be used to reconstruct the exon
structure of known transcripts. QualiMap uses this information to calculate the
depth of coverage along the length of each annotated transcript. For a set of
reads mapped to a transcript, the depth of coverage at a given base position is
the number of high-quality reads that map to the transcript at that position
(<a href="https://doi.org/10.1038/nrg3642" target="_blank">Sims et al. 2014</a>).
QualiMap calculates coverage depth at every base position of each annotated
transcript. To enable meaningful comparison between transcripts, base positions
are rescaled to relative positions expressed as percentage distance along each
transcript (*0%, 1%, …, 99%*). For the set of transcripts with at least
one mapped read, QualiMap plots the cumulative mapped-read depth (y-axis) at
each relative transcript position (x-axis). This plot shows the gene coverage
profile across all mapped transcripts for each read dataset. It provides a
visual way to assess positional biases, such as an accumulation of mapped reads
at the 3′ end of transcripts, which may indicate poor RNA quality in the
original sample (<a href="https://doi.org/10.1186/s13059-016-0881-8"
target="_blank">Conesa et al. 2016</a>).'''
self.add_section(
name='Gene Coverage Profile',
anchor='qualimap-genome-fraction-coverage',
description=
'Mean distribution of coverage depth across the length of all mapped transcripts.',
helptext=coverage_profile_helptext,
plot=linegraph.plot(
self.qualimap_rnaseq_cov_hist, {
'id': 'qualimap_gene_coverage_profile',
'title':
'Qualimap RNAseq: Coverage Profile Along Genes (total)',
'ylab': 'Coverage',
'xlab': 'Transcript Position (%)',
'ymin': 0,
'xmin': 0,
'xmax': 100,
'tt_label': '<b>{point.x} bp</b>: {point.y:.0f}%',
}))
# Return the number of reports we found
return len(self.qualimap_rnaseq_genome_results.keys())
| #!/usr/bin/env python
""" RnaSeqReport plugin module """
from __future__ import print_function
from collections import defaultdict, OrderedDict
import logging
import os
import re
from multiqc import config
from multiqc.modules.base_module import BaseMultiqcModule
from multiqc.plots import bargraph, linegraph, table
# Initialise the logger
log = logging.getLogger(__name__)
class MultiqcModule(BaseMultiqcModule):
""" Qualimap is really a collection of separate programs:
BamQC, RNASeq and Counts.. This module is split into separate
files to reflect this and help with code organisation. """
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name='Post Alignment QC',
anchor='rnaseq_post_alignment_qc',
href='https://github.com/clinico-omics/rnaseq-report',
info=" is an report module to show the quality of base.")
# Initialise the submodules
from . import QM_BamQC
# Set up class objects to hold parsed data()
self.general_stats_headers = OrderedDict()
self.general_stats_data = defaultdict(lambda: dict())
n = dict()
n['BamQC'] = QM_BamQC.parse_reports(self)
if n['BamQC'] > 0:
log.info("Found {} BamQC reports".format(n['BamQC']))
### add report section-------------
# Make the plots for the report
self.add_qm_rnaseq_stats_data()
self.post_alignment_stats()
QM_BamQC.report_sections(self)
self.qm_rnaseq_parse_reports()
# Exit if we didn't find anything
if sum(n.values()) == 0:
raise UserWarning
# Helper functions
def get_s_name(self, f):
s_name = os.path.basename(os.path.dirname(f['root']))
s_name = self.clean_s_name(s_name, f['root'])
if s_name.endswith('.qc'):
s_name = s_name[:-3]
return s_name
def post_alignment_stats(self):
""" Add some single-number stats to the basic statistics
table at the top of the report """
# Prep the data
error_rate_mean = []
gc_mean = []
median_insert_size_mean = []
exonic_mean = []
intronic_mean = []
intergenic_mean = []
for k in self.general_stats_data.keys():
error_rate_mean.append(
self.general_stats_data[k]['general_error_rate'])
gc_mean.append(self.general_stats_data[k]['avg_gc'])
median_insert_size_mean.append(
self.general_stats_data[k]['median_insert_size'])
exonic_mean.append(
self.general_stats_data[k]['reads_aligned_exonic'])
intronic_mean.append(
self.general_stats_data[k]['reads_aligned_intronic'])
intergenic_mean.append(
self.general_stats_data[k]['reads_aligned_intergenic'])
self.general_stats_data['Batch average value'] = {
'general_error_rate':
sum(error_rate_mean) / len(error_rate_mean),
'avg_gc':
sum(gc_mean) / len(gc_mean),
'median_insert_size':
sum(median_insert_size_mean) / len(median_insert_size_mean),
'reads_aligned_exonic':
sum(exonic_mean) / len(exonic_mean),
'reads_aligned_intronic':
sum(intronic_mean) / len(intronic_mean),
'reads_aligned_intergenic':
sum(intergenic_mean) / len(intergenic_mean)
}
self.general_stats_data['Historical value'] = {
'general_error_rate': '0.01±0.01'
}
self.add_section(name='Post alignment stats',
anchor='post_alignment_stats',
description='The summary of qualimap',
helptext='''
To enable multiple samples to be plotted on the same graph, only the mean quality
scores are plotted (unlike the box plots seen in FastQC reports).
Taken from the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/2%20Per%20Base%20Sequence%20Quality.html):
_The y-axis on the graph shows the quality scores. The higher the score, the better
the base call. The background of the graph divides the y axis into very good quality
calls (green), calls of reasonable quality (orange), and calls of poor quality (red).
The quality of calls on most platforms will degrade as the run progresses, so it is
common to see base calls falling into the orange area towards the end of a read._
''',
plot=table.plot(self.general_stats_data,
self.general_stats_headers))
def add_qm_rnaseq_stats_data(self):
regexes = {
'reads_aligned': r"read(?:s| pairs) aligned\s*=\s*([\d,]+)",
'total_alignments': r"total alignments\s*=\s*([\d,]+)",
'non_unique_alignments': r"non-unique alignments\s*=\s*([\d,]+)",
'reads_aligned_genes': r"aligned to genes\s*=\s*([\d,]+)",
'ambiguous_alignments': r"ambiguous alignments\s*=\s*([\d,]+)",
'not_aligned': r"not aligned\s*=\s*([\d,]+)",
'5_3_bias': r"5'-3' bias\s*=\s*([\d,\.]+)$",
'reads_aligned_exonic': r"exonic\s*=\s*([\d,]+)",
'reads_aligned_intronic': r"intronic\s*=\s*([\d,]+)",
'reads_aligned_intergenic': r"intergenic\s*=\s*([\d,]+)",
'reads_aligned_overlapping_exon':
r"overlapping exon\s*=\s*([\d,]+)",
}
for f in self.find_log_files(
'rnaseq_post_alignment_qc/rnaseq_qc/rnaseq_qc_results'):
d = dict()
# Get the sample name
s_name_regex = re.search(r"bam file\s*=\s*(.+)", f['f'],
re.MULTILINE)
if s_name_regex:
d['bam_file'] = s_name_regex.group(1)
s_name = self.clean_s_name(d['bam_file'], f['root'])
else:
log.warn(
"Couldn't find an input filename in genome_results file {}/{}"
.format(f['root'], f['fn']))
return None
# Check for and 'fix' European style decimal places / thousand separators
comma_regex = re.search(r"exonic\s*=\s*[\d\.]+ \(\d{1,3},\d+%\)",
f['f'], re.MULTILINE)
if comma_regex:
log.debug(
"Trying to fix European comma style syntax in Qualimap report {}/{}"
.format(f['root'], f['fn']))
f['f'] = f['f'].replace('.', '')
f['f'] = f['f'].replace(',', '.')
# Go through all numeric regexes
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
try:
d[k] = float(r_search.group(1).replace(',', ''))
except UnicodeEncodeError:
# Qualimap reports infinity (\u221e) when 3' bias denominator is zero
pass
except ValueError:
d[k] = r_search.group(1)
for k in [
'reads_aligned_exonic', 'reads_aligned_intronic',
'reads_aligned_intergenic'
]:
try:
self.general_stats_data[s_name][k] = d[k]
except KeyError:
pass
#### General Stats
self.general_stats_headers['reads_aligned_exonic'] = {
'title': 'Exonic',
'description': 'Reads Aligned Exonic',
'min': 0,
'scale': 'RdBu',
'shared_key': 'read_count'
}
self.general_stats_headers['reads_aligned_intronic'] = {
'title': 'Intronic',
'description': 'Reads Aligned Intronic',
'min': 0,
'scale': 'RdBu',
'shared_key': 'read_count'
}
self.general_stats_headers['reads_aligned_intergenic'] = {
'title': 'Intergenic',
'description': 'Reads Aligned Intergenic',
'min': 0,
'scale': 'RdBu',
'shared_key': 'read_count'
}
def qm_rnaseq_parse_reports(self):
""" Find Qualimap RNASeq reports and parse their data """
self.qualimap_rnaseq_genome_results = dict()
regexes = {
'reads_aligned': r"read(?:s| pairs) aligned\s*=\s*([\d,]+)",
'total_alignments': r"total alignments\s*=\s*([\d,]+)",
'non_unique_alignments': r"non-unique alignments\s*=\s*([\d,]+)",
'reads_aligned_genes': r"aligned to genes\s*=\s*([\d,]+)",
'ambiguous_alignments': r"ambiguous alignments\s*=\s*([\d,]+)",
'not_aligned': r"not aligned\s*=\s*([\d,]+)",
'5_3_bias': r"5'-3' bias\s*=\s*([\d,\.]+)$",
'reads_aligned_exonic': r"exonic\s*=\s*([\d,]+)",
'reads_aligned_intronic': r"intronic\s*=\s*([\d,]+)",
'reads_aligned_intergenic': r"intergenic\s*=\s*([\d,]+)",
'reads_aligned_overlapping_exon':
r"overlapping exon\s*=\s*([\d,]+)",
}
for f in self.find_log_files(
'rnaseq_post_alignment_qc/rnaseq_qc/rnaseq_qc_results'):
d = dict()
# Get the sample name
s_name_regex = re.search(r"bam file\s*=\s*(.+)", f['f'],
re.MULTILINE)
if s_name_regex:
d['bam_file'] = s_name_regex.group(1)
s_name = self.clean_s_name(d['bam_file'], f['root'])
else:
log.warn(
"Couldn't find an input filename in genome_results file {}/{}"
.format(f['root'], f['fn']))
return None
# Check for and 'fix' European style decimal places / thousand separators
comma_regex = re.search(r"exonic\s*=\s*[\d\.]+ \(\d{1,3},\d+%\)",
f['f'], re.MULTILINE)
if comma_regex:
log.debug(
"Trying to fix European comma style syntax in Qualimap report {}/{}"
.format(f['root'], f['fn']))
f['f'] = f['f'].replace('.', '')
f['f'] = f['f'].replace(',', '.')
# Go through all numeric regexes
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
try:
d[k] = float(r_search.group(1).replace(',', ''))
except UnicodeEncodeError:
# Qualimap reports infinity (\u221e) when 3' bias denominator is zero
pass
except ValueError:
d[k] = r_search.group(1)
# Save results
if s_name in self.qualimap_rnaseq_genome_results:
log.debug(
"Duplicate genome results sample name found! Overwriting: {}"
.format(s_name))
self.qualimap_rnaseq_genome_results[s_name] = d
self.add_data_source(f,
s_name=s_name,
section='rna_genome_results')
#### Coverage profile
self.qualimap_rnaseq_cov_hist = dict()
for f in self.find_log_files(
'rnaseq_post_alignment_qc/rnaseq_qc/coverage',
filehandles=True):
s_name = self.get_s_name(f)
d = dict()
for l in f['f']:
if l.startswith('#'):
continue
coverage, count = l.split(None, 1)
coverage = int(round(float(coverage)))
count = float(count)
d[coverage] = count
if len(d) == 0:
log.debug(
"Couldn't parse contents of coverage histogram file {}".
format(f['fn']))
return None
# Save results
if s_name in self.qualimap_rnaseq_cov_hist:
log.debug(
"Duplicate coverage histogram sample name found! Overwriting: {}"
.format(s_name))
self.qualimap_rnaseq_cov_hist[s_name] = d
self.add_data_source(f,
s_name=s_name,
section='rna_coverage_histogram')
# Filter to strip out ignored sample names
self.qualimap_rnaseq_genome_results = self.ignore_samples(
self.qualimap_rnaseq_genome_results)
self.qualimap_rnaseq_cov_hist = self.ignore_samples(
self.qualimap_rnaseq_cov_hist)
#### Plots
# Genomic Origin Bar Graph
# NB: Ignore 'Overlapping Exon' in report - these make the numbers add up to > 100%
if len(self.qualimap_rnaseq_genome_results) > 0:
gorigin_cats = OrderedDict()
gorigin_cats['reads_aligned_exonic'] = {'name': 'Exonic'}
gorigin_cats['reads_aligned_intronic'] = {'name': 'Intronic'}
gorigin_cats['reads_aligned_intergenic'] = {'name': 'Intergenic'}
gorigin_pconfig = {
'id': 'qualimap_genomic_origin',
'title': 'Qualimap RNAseq: Genomic Origin',
'ylab': 'Number of reads',
'cpswitch_c_active': False
}
genomic_origin_helptext = '''
There are currently three main approaches to map reads to transcripts in an
RNA-seq experiment: mapping reads to a reference genome to identify expressed
transcripts that are annotated (and discover those that are unknown), mapping
reads to a reference transcriptome, and <i>de novo</i> assembly of transcript
sequences (<a href="https://doi.org/10.1186/s13059-016-0881-8"
target="_blank">Conesa et al. 2016</a>).
For RNA-seq QC analysis, QualiMap can be used to assess alignments produced by
the first of these approaches. For input, it requires a GTF annotation file
along with a reference genome, which can be used to reconstruct the exon
structure of known transcripts. This allows mapped reads to be grouped by
whether they originate in an exonic region (for QualiMap, this may include
5′ and 3′ UTR regions as well as protein-coding exons), an intron,
or an intergenic region (see the <a href="http://qualimap.bioinfo.cipf.es/doc_html/index.html"
target="_blank">Qualimap 2 documentation</a>).
The inferred genomic origins of RNA-seq reads are presented here as a bar graph
showing either the number or percentage of mapped reads in each read dataset
that have been assigned to each type of genomic region. This graph can be used
to assess the proportion of useful reads in an RNA-seq experiment. That
proportion can be reduced by the presence of intron sequences, especially if
depletion of ribosomal RNA was used during sample preparation (<a href="https://doi.org/10.1038/nrg3642"
target="_blank">Sims et al. 2014</a>). It can also be reduced by off-target
transcripts, which are detected in greater numbers at the sequencing depths
needed to detect poorly-expressed transcripts (<a href="https://doi.org/10.1101/gr.124321.111"
target="_blank">Tarazona et al. 2011</a>).'''
self.add_section(
name='Genomic origin of reads',
anchor='qualimap-reads-genomic-origin',
description=
'Classification of mapped reads as originating in exonic, intronic or intergenic regions. These can be displayed as either the number or percentage of mapped reads.',
helptext=genomic_origin_helptext,
plot=bargraph.plot(self.qualimap_rnaseq_genome_results,
gorigin_cats, gorigin_pconfig))
if len(self.qualimap_rnaseq_cov_hist) > 0:
coverage_profile_helptext = '''
There are currently three main approaches to map reads to transcripts in an
RNA-seq experiment: mapping reads to a reference genome to identify expressed
transcripts that are annotated (and discover those that are unknown), mapping
reads to a reference transcriptome, and <i>de novo</i> assembly of transcript
sequences (<a href="https://doi.org/10.1186/s13059-016-0881-8"
target="_blank">Conesa et al. 2016</a>).
For RNA-seq QC analysis, QualiMap can be used to assess alignments produced by
the first of these approaches. For input, it requires a GTF annotation file
along with a reference genome, which can be used to reconstruct the exon
structure of known transcripts. QualiMap uses this information to calculate the
depth of coverage along the length of each annotated transcript. For a set of
reads mapped to a transcript, the depth of coverage at a given base position is
the number of high-quality reads that map to the transcript at that position
(<a href="https://doi.org/10.1038/nrg3642" target="_blank">Sims et al. 2014</a>).
QualiMap calculates coverage depth at every base position of each annotated
transcript. To enable meaningful comparison between transcripts, base positions
are rescaled to relative positions expressed as percentage distance along each
transcript (*0%, 1%, …, 99%*). For the set of transcripts with at least
one mapped read, QualiMap plots the cumulative mapped-read depth (y-axis) at
each relative transcript position (x-axis). This plot shows the gene coverage
profile across all mapped transcripts for each read dataset. It provides a
visual way to assess positional biases, such as an accumulation of mapped reads
at the 3′ end of transcripts, which may indicate poor RNA quality in the
original sample (<a href="https://doi.org/10.1186/s13059-016-0881-8"
target="_blank">Conesa et al. 2016</a>).'''
self.add_section(
name='Gene Coverage Profile',
anchor='qualimap-genome-fraction-coverage',
description=
'Mean distribution of coverage depth across the length of all mapped transcripts.',
helptext=coverage_profile_helptext,
plot=linegraph.plot(
self.qualimap_rnaseq_cov_hist, {
'id': 'qualimap_gene_coverage_profile',
'title':
'Qualimap RNAseq: Coverage Profile Along Genes (total)',
'ylab': 'Coverage',
'xlab': 'Transcript Position (%)',
'ymin': 0,
'xmin': 0,
'xmax': 100,
'tt_label': '<b>{point.x} bp</b>: {point.y:.0f}%',
}))
# Return the number of reports we found
return len(self.qualimap_rnaseq_genome_results.keys())
| en | 0.841989 | #!/usr/bin/env python RnaSeqReport plugin module # Initialise the logger Qualimap is really a collection of separate programs: BamQC, RNASeq and Counts.. This module is split into separate files to reflect this and help with code organisation. # Initialise the parent object # Initialise the submodules # Set up class objects to hold parsed data() ### add report section------------- # Make the plots for the report # Exit if we didn't find anything # Helper functions Add some single-number stats to the basic statistics table at the top of the report # Prep the data To enable multiple samples to be plotted on the same graph, only the mean quality scores are plotted (unlike the box plots seen in FastQC reports). Taken from the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/2%20Per%20Base%20Sequence%20Quality.html): _The y-axis on the graph shows the quality scores. The higher the score, the better the base call. The background of the graph divides the y axis into very good quality calls (green), calls of reasonable quality (orange), and calls of poor quality (red). The quality of calls on most platforms will degrade as the run progresses, so it is common to see base calls falling into the orange area towards the end of a read._ # Get the sample name # Check for and 'fix' European style decimal places / thousand separators # Go through all numeric regexes # Qualimap reports infinity (\u221e) when 3' bias denominator is zero #### General Stats Find Qualimap RNASeq reports and parse their data # Get the sample name # Check for and 'fix' European style decimal places / thousand separators # Go through all numeric regexes # Qualimap reports infinity (\u221e) when 3' bias denominator is zero # Save results #### Coverage profile # Save results # Filter to strip out ignored sample names #### Plots # Genomic Origin Bar Graph # NB: Ignore 'Overlapping Exon' in report - these make the numbers add up to > 100% There are currently three main approaches to map reads to transcripts in an RNA-seq experiment: mapping reads to a reference genome to identify expressed transcripts that are annotated (and discover those that are unknown), mapping reads to a reference transcriptome, and <i>de novo</i> assembly of transcript sequences (<a href="https://doi.org/10.1186/s13059-016-0881-8" target="_blank">Conesa et al. 2016</a>). For RNA-seq QC analysis, QualiMap can be used to assess alignments produced by the first of these approaches. For input, it requires a GTF annotation file along with a reference genome, which can be used to reconstruct the exon structure of known transcripts. This allows mapped reads to be grouped by whether they originate in an exonic region (for QualiMap, this may include 5′ and 3′ UTR regions as well as protein-coding exons), an intron, or an intergenic region (see the <a href="http://qualimap.bioinfo.cipf.es/doc_html/index.html" target="_blank">Qualimap 2 documentation</a>). The inferred genomic origins of RNA-seq reads are presented here as a bar graph showing either the number or percentage of mapped reads in each read dataset that have been assigned to each type of genomic region. This graph can be used to assess the proportion of useful reads in an RNA-seq experiment. That proportion can be reduced by the presence of intron sequences, especially if depletion of ribosomal RNA was used during sample preparation (<a href="https://doi.org/10.1038/nrg3642" target="_blank">Sims et al. 2014</a>). It can also be reduced by off-target transcripts, which are detected in greater numbers at the sequencing depths needed to detect poorly-expressed transcripts (<a href="https://doi.org/10.1101/gr.124321.111" target="_blank">Tarazona et al. 2011</a>). There are currently three main approaches to map reads to transcripts in an RNA-seq experiment: mapping reads to a reference genome to identify expressed transcripts that are annotated (and discover those that are unknown), mapping reads to a reference transcriptome, and <i>de novo</i> assembly of transcript sequences (<a href="https://doi.org/10.1186/s13059-016-0881-8" target="_blank">Conesa et al. 2016</a>). For RNA-seq QC analysis, QualiMap can be used to assess alignments produced by the first of these approaches. For input, it requires a GTF annotation file along with a reference genome, which can be used to reconstruct the exon structure of known transcripts. QualiMap uses this information to calculate the depth of coverage along the length of each annotated transcript. For a set of reads mapped to a transcript, the depth of coverage at a given base position is the number of high-quality reads that map to the transcript at that position (<a href="https://doi.org/10.1038/nrg3642" target="_blank">Sims et al. 2014</a>). QualiMap calculates coverage depth at every base position of each annotated transcript. To enable meaningful comparison between transcripts, base positions are rescaled to relative positions expressed as percentage distance along each transcript (*0%, 1%, …, 99%*). For the set of transcripts with at least one mapped read, QualiMap plots the cumulative mapped-read depth (y-axis) at each relative transcript position (x-axis). This plot shows the gene coverage profile across all mapped transcripts for each read dataset. It provides a visual way to assess positional biases, such as an accumulation of mapped reads at the 3′ end of transcripts, which may indicate poor RNA quality in the original sample (<a href="https://doi.org/10.1186/s13059-016-0881-8" target="_blank">Conesa et al. 2016</a>). # Return the number of reports we found | 2.360645 | 2 |
src_rl_group/load_npy.py | icdm2021submission/Continual-Neural-Network-Model-Retraining | 0 | 6631672 | <filename>src_rl_group/load_npy.py
import numpy as np
num_classes = 10
depth = 1#mnist is 1, cifar-10 is 3
shape=(5, 5, -1, 32)
array_weights1_1 = np.load('cluster_array_weights1_1.npy')
array_weights1_1 = np.reshape(array_weights1_1, shape)
print(array_weights1_1.shape)
np.save('cluster_array_weights1_1', array_weights1_1)
shape = (5, 5, 32, 64)
array_weights1_2 = np.load('cluster_array_weights1_2.npy')
array_weights1_2 = np.reshape(array_weights1_2, shape)
print(array_weights1_2.shape)
np.save('cluster_array_weights1_2', array_weights1_2)
shape = (-1, 1024)
array_weights3_1 = np.load('cluster_array_weights3_1.npy')
array_weights3_1 = np.reshape(array_weights3_1, shape)
print(array_weights3_1.shape)
np.save('cluster_array_weights3_1', array_weights3_1)
shape = (1024, int(num_classes))
array_weights3_2 = np.load('cluster_array_weights3_2.npy')
array_weights3_2 = np.reshape(array_weights3_2, shape)
print(array_weights3_2.shape)
np.save('cluster_array_weights3_2', array_weights3_2) | <filename>src_rl_group/load_npy.py
import numpy as np
num_classes = 10
depth = 1#mnist is 1, cifar-10 is 3
shape=(5, 5, -1, 32)
array_weights1_1 = np.load('cluster_array_weights1_1.npy')
array_weights1_1 = np.reshape(array_weights1_1, shape)
print(array_weights1_1.shape)
np.save('cluster_array_weights1_1', array_weights1_1)
shape = (5, 5, 32, 64)
array_weights1_2 = np.load('cluster_array_weights1_2.npy')
array_weights1_2 = np.reshape(array_weights1_2, shape)
print(array_weights1_2.shape)
np.save('cluster_array_weights1_2', array_weights1_2)
shape = (-1, 1024)
array_weights3_1 = np.load('cluster_array_weights3_1.npy')
array_weights3_1 = np.reshape(array_weights3_1, shape)
print(array_weights3_1.shape)
np.save('cluster_array_weights3_1', array_weights3_1)
shape = (1024, int(num_classes))
array_weights3_2 = np.load('cluster_array_weights3_2.npy')
array_weights3_2 = np.reshape(array_weights3_2, shape)
print(array_weights3_2.shape)
np.save('cluster_array_weights3_2', array_weights3_2) | en | 0.963504 | #mnist is 1, cifar-10 is 3 | 2.722361 | 3 |
argoverse/data_loading/argoverse_forecasting_loader.py | alliecc/argoverse-api | 1 | 6631673 | <filename>argoverse/data_loading/argoverse_forecasting_loader.py
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
import os
from functools import lru_cache
from pathlib import Path
from typing import Any, Mapping, Optional, Sequence, Union
import numpy as np
import pandas as pd
__all__ = ["ArgoverseForecastingLoader"]
@lru_cache(128)
def _read_csv(path: Path, *args: Any, **kwargs: Any) -> pd.DataFrame:
"""A caching CSV reader
Args:
path: Path to the csv file
*args, **kwargs: optional arguments to be used while data loading
Returns:
pandas DataFrame containing the loaded csv
"""
return pd.read_csv(path, *args, **kwargs)
class ArgoverseForecastingLoader:
def __init__(self, root_dir: Union[str, Path]):
"""Initialization function for the class.
Args:
root_dir: Path to the folder having sequence csv files
"""
self.counter: int = 0
root_dir = Path(root_dir)
self.seq_list: Sequence[Path] = [(root_dir / x).absolute() for x in os.listdir(root_dir)]
self.current_seq: Path = self.seq_list[self.counter]
@property
def track_id_list(self) -> Sequence[int]:
"""Get the track ids in the current sequence.
Returns:
list of track ids in the current sequence
"""
_track_id_list: Sequence[int] = np.unique(self.seq_df["TRACK_ID"].values).tolist()
return _track_id_list
@property
def city(self) -> str:
"""Get the city name for the current sequence.
Returns:
city name, i.e., either 'PIT' or 'MIA'
"""
_city: str = self.seq_df["CITY_NAME"].values[0]
return _city
@property
def num_tracks(self) -> int:
"""Get the number of tracks in the current sequence.
Returns:
number of tracks in the current sequence
"""
return len(self.track_id_list)
@property
def seq_df(self) -> pd.DataFrame:
"""Get the dataframe for the current sequence.
Returns:
pandas DataFrame for the current sequence
"""
return _read_csv(self.current_seq)
@property
def agent_traj(self) -> np.ndarray:
"""Get the trajectory for the track of type 'AGENT' in the current sequence.
Returns:
numpy array of shape (seq_len x 2) for the agent trajectory
"""
agent_x = self.seq_df[self.seq_df["OBJECT_TYPE"] == "AGENT"]["X"]
agent_y = self.seq_df[self.seq_df["OBJECT_TYPE"] == "AGENT"]["Y"]
agent_traj = np.column_stack((agent_x, agent_y))
return agent_traj
def __iter__(self) -> "ArgoverseForecastingLoader":
"""Iterator for enumerating over sequences in the root_dir specified.
Returns:
Data Loader object for the first sequence in the data
"""
self.counter = 0
return self
def __next__(self) -> "ArgoverseForecastingLoader":
"""Get the Data Loader object for the next sequence in the data.
Returns:
Data Loader object for the next sequence in the data
"""
if self.counter >= len(self):
raise StopIteration
else:
self.current_seq = self.seq_list[self.counter]
self.counter += 1
return self
def __len__(self) -> int:
"""Get the number of sequences in the data
Returns:
Number of sequences in the data
"""
return len(self.seq_list)
def __str__(self) -> str:
"""Decorator that returns a string storing some stats of the current sequence
Returns:
A string storing some stats of the current sequence
"""
return f"""Seq : {self.current_seq}
----------------------
|| City: {self.city}
|| # Tracks: {len(self.track_id_list)}
----------------------"""
def __getitem__(self, key: int) -> "ArgoverseForecastingLoader":
"""Get the DataLoader object for the sequence corresponding to the given index.
Args:
key: index of the element
Returns:
Data Loader object for the given index
"""
self.counter = key
self.current_seq = self.seq_list[self.counter]
return self
def get(self, seq_id: Union[Path, str]) -> "ArgoverseForecastingLoader":
"""Get the DataLoader object for the given sequence path.
Args:
seq_id: Fully qualified path to the sequence
Returns:
Data Loader object for the given sequence path
"""
self.current_seq = Path(seq_id).absolute()
return self
| <filename>argoverse/data_loading/argoverse_forecasting_loader.py
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
import os
from functools import lru_cache
from pathlib import Path
from typing import Any, Mapping, Optional, Sequence, Union
import numpy as np
import pandas as pd
__all__ = ["ArgoverseForecastingLoader"]
@lru_cache(128)
def _read_csv(path: Path, *args: Any, **kwargs: Any) -> pd.DataFrame:
"""A caching CSV reader
Args:
path: Path to the csv file
*args, **kwargs: optional arguments to be used while data loading
Returns:
pandas DataFrame containing the loaded csv
"""
return pd.read_csv(path, *args, **kwargs)
class ArgoverseForecastingLoader:
def __init__(self, root_dir: Union[str, Path]):
"""Initialization function for the class.
Args:
root_dir: Path to the folder having sequence csv files
"""
self.counter: int = 0
root_dir = Path(root_dir)
self.seq_list: Sequence[Path] = [(root_dir / x).absolute() for x in os.listdir(root_dir)]
self.current_seq: Path = self.seq_list[self.counter]
@property
def track_id_list(self) -> Sequence[int]:
"""Get the track ids in the current sequence.
Returns:
list of track ids in the current sequence
"""
_track_id_list: Sequence[int] = np.unique(self.seq_df["TRACK_ID"].values).tolist()
return _track_id_list
@property
def city(self) -> str:
"""Get the city name for the current sequence.
Returns:
city name, i.e., either 'PIT' or 'MIA'
"""
_city: str = self.seq_df["CITY_NAME"].values[0]
return _city
@property
def num_tracks(self) -> int:
"""Get the number of tracks in the current sequence.
Returns:
number of tracks in the current sequence
"""
return len(self.track_id_list)
@property
def seq_df(self) -> pd.DataFrame:
"""Get the dataframe for the current sequence.
Returns:
pandas DataFrame for the current sequence
"""
return _read_csv(self.current_seq)
@property
def agent_traj(self) -> np.ndarray:
"""Get the trajectory for the track of type 'AGENT' in the current sequence.
Returns:
numpy array of shape (seq_len x 2) for the agent trajectory
"""
agent_x = self.seq_df[self.seq_df["OBJECT_TYPE"] == "AGENT"]["X"]
agent_y = self.seq_df[self.seq_df["OBJECT_TYPE"] == "AGENT"]["Y"]
agent_traj = np.column_stack((agent_x, agent_y))
return agent_traj
def __iter__(self) -> "ArgoverseForecastingLoader":
"""Iterator for enumerating over sequences in the root_dir specified.
Returns:
Data Loader object for the first sequence in the data
"""
self.counter = 0
return self
def __next__(self) -> "ArgoverseForecastingLoader":
"""Get the Data Loader object for the next sequence in the data.
Returns:
Data Loader object for the next sequence in the data
"""
if self.counter >= len(self):
raise StopIteration
else:
self.current_seq = self.seq_list[self.counter]
self.counter += 1
return self
def __len__(self) -> int:
"""Get the number of sequences in the data
Returns:
Number of sequences in the data
"""
return len(self.seq_list)
def __str__(self) -> str:
"""Decorator that returns a string storing some stats of the current sequence
Returns:
A string storing some stats of the current sequence
"""
return f"""Seq : {self.current_seq}
----------------------
|| City: {self.city}
|| # Tracks: {len(self.track_id_list)}
----------------------"""
def __getitem__(self, key: int) -> "ArgoverseForecastingLoader":
"""Get the DataLoader object for the sequence corresponding to the given index.
Args:
key: index of the element
Returns:
Data Loader object for the given index
"""
self.counter = key
self.current_seq = self.seq_list[self.counter]
return self
def get(self, seq_id: Union[Path, str]) -> "ArgoverseForecastingLoader":
"""Get the DataLoader object for the given sequence path.
Args:
seq_id: Fully qualified path to the sequence
Returns:
Data Loader object for the given sequence path
"""
self.current_seq = Path(seq_id).absolute()
return self
| en | 0.683257 | # <Copyright 2019, Argo AI, LLC. Released under the MIT license.> A caching CSV reader Args: path: Path to the csv file *args, **kwargs: optional arguments to be used while data loading Returns: pandas DataFrame containing the loaded csv Initialization function for the class. Args: root_dir: Path to the folder having sequence csv files Get the track ids in the current sequence. Returns: list of track ids in the current sequence Get the city name for the current sequence. Returns: city name, i.e., either 'PIT' or 'MIA' Get the number of tracks in the current sequence. Returns: number of tracks in the current sequence Get the dataframe for the current sequence. Returns: pandas DataFrame for the current sequence Get the trajectory for the track of type 'AGENT' in the current sequence. Returns: numpy array of shape (seq_len x 2) for the agent trajectory Iterator for enumerating over sequences in the root_dir specified. Returns: Data Loader object for the first sequence in the data Get the Data Loader object for the next sequence in the data. Returns: Data Loader object for the next sequence in the data Get the number of sequences in the data Returns: Number of sequences in the data Decorator that returns a string storing some stats of the current sequence Returns: A string storing some stats of the current sequence Seq : {self.current_seq} ---------------------- || City: {self.city} || # Tracks: {len(self.track_id_list)} ---------------------- Get the DataLoader object for the sequence corresponding to the given index. Args: key: index of the element Returns: Data Loader object for the given index Get the DataLoader object for the given sequence path. Args: seq_id: Fully qualified path to the sequence Returns: Data Loader object for the given sequence path | 2.830554 | 3 |
searchableencryption/hve/hierarchicalencoding.py | duykienvp/searchableencryption | 0 | 6631674 | """ HVE with hierarchical encoding.
Section 3.2 of 'An Efficient Privacy-Preserving System for Monitoring Mobile Users:
Making Searchable Encryption Practical'
Link: https://dl.acm.org/citation.cfm?id=2557559
"""
def encode_cell_id(dim: int, row: int, col: int) -> list:
""" Get hierarchical encoding binary representation of cell (row, col) with the grid of dim x dim.
See Section 3.2 in `https://dl.acm.org/citation.cfm?id=2557559`
:param int dim: dimension of the grid
:param int row: row index
:param int col: col index
:returns: a list of binary representation
"""
rep = []
mid = int(dim / 2)
if col < mid:
rep.append(0)
else:
rep.append(1)
col -= mid
if row < mid:
rep.append(0)
else:
rep.append(1)
row -= mid
if 1 < mid:
rep.extend(encode_cell_id(mid, row, col))
return rep
| """ HVE with hierarchical encoding.
Section 3.2 of 'An Efficient Privacy-Preserving System for Monitoring Mobile Users:
Making Searchable Encryption Practical'
Link: https://dl.acm.org/citation.cfm?id=2557559
"""
def encode_cell_id(dim: int, row: int, col: int) -> list:
""" Get hierarchical encoding binary representation of cell (row, col) with the grid of dim x dim.
See Section 3.2 in `https://dl.acm.org/citation.cfm?id=2557559`
:param int dim: dimension of the grid
:param int row: row index
:param int col: col index
:returns: a list of binary representation
"""
rep = []
mid = int(dim / 2)
if col < mid:
rep.append(0)
else:
rep.append(1)
col -= mid
if row < mid:
rep.append(0)
else:
rep.append(1)
row -= mid
if 1 < mid:
rep.extend(encode_cell_id(mid, row, col))
return rep
| en | 0.726501 | HVE with hierarchical encoding. Section 3.2 of 'An Efficient Privacy-Preserving System for Monitoring Mobile Users: Making Searchable Encryption Practical' Link: https://dl.acm.org/citation.cfm?id=2557559 Get hierarchical encoding binary representation of cell (row, col) with the grid of dim x dim. See Section 3.2 in `https://dl.acm.org/citation.cfm?id=2557559` :param int dim: dimension of the grid :param int row: row index :param int col: col index :returns: a list of binary representation | 3.147599 | 3 |
src/project/project.py | memnoth/gtkworkshop | 0 | 6631675 | <filename>src/project/project.py
# project.py
from gi.repository import Gtk
from .MyHeaderBar import MyHeaderBar
from .MyMainBox import MyMainBox
class MyTextEditor(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.headerbar = MyHeaderBar()
self.set_titlebar(self.headerbar)
self.mainbox = MyMainBox()
self.add(self.mainbox)
self.set_default_size(700, 400)
self.connect("destroy", Gtk.main_quit)
self.show_all()
def project():
MyTextEditor()
| <filename>src/project/project.py
# project.py
from gi.repository import Gtk
from .MyHeaderBar import MyHeaderBar
from .MyMainBox import MyMainBox
class MyTextEditor(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self)
self.headerbar = MyHeaderBar()
self.set_titlebar(self.headerbar)
self.mainbox = MyMainBox()
self.add(self.mainbox)
self.set_default_size(700, 400)
self.connect("destroy", Gtk.main_quit)
self.show_all()
def project():
MyTextEditor()
| en | 0.339168 | # project.py | 2.226879 | 2 |
src/models/modules/__init__.py | takedarts/DenseResNet | 0 | 6631676 | from .blurpool import BlurPool2d
from .dropblock import DropBlock
from .reshape import ChannelPad, Reshape
from .selective_kernel import SKConv2d
from .semodule import SEModule
from .shakedrop import ShakeDrop
from .sigmoid import HSigmoid
from .signal_augmentation import SignalAugmentation
from .split_attention import SplitAttentionModule
from .stochastic_depth import StochasticDepth
from .swish import Swish, HSwish
| from .blurpool import BlurPool2d
from .dropblock import DropBlock
from .reshape import ChannelPad, Reshape
from .selective_kernel import SKConv2d
from .semodule import SEModule
from .shakedrop import ShakeDrop
from .sigmoid import HSigmoid
from .signal_augmentation import SignalAugmentation
from .split_attention import SplitAttentionModule
from .stochastic_depth import StochasticDepth
from .swish import Swish, HSwish
| none | 1 | 1.001956 | 1 |
|
resources/lib/services/playback/am_playback.py | mediabrasiltv/plugin.video.netflix | 0 | 6631677 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 <NAME> (plugin.video.netflix)
Copyright (C) 2019 Smeulf (original implementation module)
Operations for changing the playback status
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import time
import xbmc
import resources.lib.common as common
from .action_manager import ActionManager
class AMPlayback(ActionManager):
"""Operations for changing the playback status"""
SETTING_ID = 'ResumeManager_enabled'
def __init__(self):
super(AMPlayback, self).__init__()
self.resume_position = None
self.enabled = True
self.start_time = None
self.is_player_in_pause = False
def __str__(self):
return 'enabled={}'.format(self.enabled)
def initialize(self, data):
# Due to a bug on Kodi the resume on SRTM files not works correctly, so we force the skip to the resume point
self.resume_position = data.get('resume_position')
def on_playback_started(self, player_state):
if self.resume_position:
common.info('AMPlayback has forced resume point to {}', self.resume_position)
xbmc.Player().seekTime(int(self.resume_position))
def on_tick(self, player_state):
# Stops playback when paused for more than one hour.
# Some users leave the playback paused also for more than 12 hours,
# this complicates things to resume playback, because the manifest data expires and with it also all
# the streams urls are no longer guaranteed, so we force the stop of the playback.
if self.is_player_in_pause and (time.time() - self.start_time) > 3600:
common.info('The playback has been stopped because it has been exceeded 1 hour of pause')
common.stop_playback()
def on_playback_pause(self, player_state):
self.start_time = time.time()
self.is_player_in_pause = True
def on_playback_resume(self, player_state):
self.is_player_in_pause = False
| # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 <NAME> (plugin.video.netflix)
Copyright (C) 2019 Smeulf (original implementation module)
Operations for changing the playback status
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import time
import xbmc
import resources.lib.common as common
from .action_manager import ActionManager
class AMPlayback(ActionManager):
"""Operations for changing the playback status"""
SETTING_ID = 'ResumeManager_enabled'
def __init__(self):
super(AMPlayback, self).__init__()
self.resume_position = None
self.enabled = True
self.start_time = None
self.is_player_in_pause = False
def __str__(self):
return 'enabled={}'.format(self.enabled)
def initialize(self, data):
# Due to a bug on Kodi the resume on SRTM files not works correctly, so we force the skip to the resume point
self.resume_position = data.get('resume_position')
def on_playback_started(self, player_state):
if self.resume_position:
common.info('AMPlayback has forced resume point to {}', self.resume_position)
xbmc.Player().seekTime(int(self.resume_position))
def on_tick(self, player_state):
# Stops playback when paused for more than one hour.
# Some users leave the playback paused also for more than 12 hours,
# this complicates things to resume playback, because the manifest data expires and with it also all
# the streams urls are no longer guaranteed, so we force the stop of the playback.
if self.is_player_in_pause and (time.time() - self.start_time) > 3600:
common.info('The playback has been stopped because it has been exceeded 1 hour of pause')
common.stop_playback()
def on_playback_pause(self, player_state):
self.start_time = time.time()
self.is_player_in_pause = True
def on_playback_resume(self, player_state):
self.is_player_in_pause = False
| en | 0.796462 | # -*- coding: utf-8 -*- Copyright (C) 2017 <NAME> (plugin.video.netflix) Copyright (C) 2019 Smeulf (original implementation module) Operations for changing the playback status SPDX-License-Identifier: MIT See LICENSES/MIT.md for more information. Operations for changing the playback status # Due to a bug on Kodi the resume on SRTM files not works correctly, so we force the skip to the resume point # Stops playback when paused for more than one hour. # Some users leave the playback paused also for more than 12 hours, # this complicates things to resume playback, because the manifest data expires and with it also all # the streams urls are no longer guaranteed, so we force the stop of the playback. | 2.451215 | 2 |
infrastructure/uxas/setup.py | VVCAS-Sean/OpenUxAS | 88 | 6631678 | <filename>infrastructure/uxas/setup.py
"""Module setup for uxas infrastructure."""
from setuptools import setup, find_packages
install_requires = [
"e3-core",
"e3-testsuite",
"pyzmq",
]
setup(
name="uxas",
version=0.1,
url="https://github.com/afrl-rq/OpenUxAS",
licence="GLPv3",
author="AFRL",
author_email="<EMAIL>",
description="Infrastructure support for OpenUxAS",
namespace_packages=["uxas"],
packages=find_packages(where="src"),
package_dir={"": "src"},
install_requires=install_requires,
)
| <filename>infrastructure/uxas/setup.py
"""Module setup for uxas infrastructure."""
from setuptools import setup, find_packages
install_requires = [
"e3-core",
"e3-testsuite",
"pyzmq",
]
setup(
name="uxas",
version=0.1,
url="https://github.com/afrl-rq/OpenUxAS",
licence="GLPv3",
author="AFRL",
author_email="<EMAIL>",
description="Infrastructure support for OpenUxAS",
namespace_packages=["uxas"],
packages=find_packages(where="src"),
package_dir={"": "src"},
install_requires=install_requires,
)
| en | 0.534133 | Module setup for uxas infrastructure. | 1.315597 | 1 |
code/python/FactSetOwnershipReportBuilder/v1/fds/sdk/FactSetOwnershipReportBuilder/models/__init__.py | factset/enterprise-sdk | 6 | 6631679 | <reponame>factset/enterprise-sdk<filename>code/python/FactSetOwnershipReportBuilder/v1/fds/sdk/FactSetOwnershipReportBuilder/models/__init__.py
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from fds.sdk.FactSetOwnershipReportBuilder.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from fds.sdk.FactSetOwnershipReportBuilder.model.category import Category
from fds.sdk.FactSetOwnershipReportBuilder.model.category_category import CategoryCategory
from fds.sdk.FactSetOwnershipReportBuilder.model.currency_code import CurrencyCode
from fds.sdk.FactSetOwnershipReportBuilder.model.currency_code_currency_code import CurrencyCodeCurrencyCode
from fds.sdk.FactSetOwnershipReportBuilder.model.currency_symbol import CurrencySymbol
from fds.sdk.FactSetOwnershipReportBuilder.model.currency_symbol_currency_symbol import CurrencySymbolCurrencySymbol
from fds.sdk.FactSetOwnershipReportBuilder.model.description import Description
from fds.sdk.FactSetOwnershipReportBuilder.model.description_description import DescriptionDescription
from fds.sdk.FactSetOwnershipReportBuilder.model.error_object import ErrorObject
from fds.sdk.FactSetOwnershipReportBuilder.model.error_object_links import ErrorObjectLinks
from fds.sdk.FactSetOwnershipReportBuilder.model.error_object_source import ErrorObjectSource
from fds.sdk.FactSetOwnershipReportBuilder.model.error_response import ErrorResponse
from fds.sdk.FactSetOwnershipReportBuilder.model.frequency import Frequency
from fds.sdk.FactSetOwnershipReportBuilder.model.frequency_frequency import FrequencyFrequency
from fds.sdk.FactSetOwnershipReportBuilder.model.fsym_id import FsymId
from fds.sdk.FactSetOwnershipReportBuilder.model.fsym_id_fsym_id import FsymIdFsymId
from fds.sdk.FactSetOwnershipReportBuilder.model.meta import Meta
from fds.sdk.FactSetOwnershipReportBuilder.model.metadata_entry import MetadataEntry
from fds.sdk.FactSetOwnershipReportBuilder.model.response import Response
from fds.sdk.FactSetOwnershipReportBuilder.model.scale import Scale
from fds.sdk.FactSetOwnershipReportBuilder.model.scale_scale import ScaleScale
from fds.sdk.FactSetOwnershipReportBuilder.model.value_type import ValueType
from fds.sdk.FactSetOwnershipReportBuilder.model.value_type_value_type import ValueTypeValueType
| # flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from fds.sdk.FactSetOwnershipReportBuilder.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from fds.sdk.FactSetOwnershipReportBuilder.model.category import Category
from fds.sdk.FactSetOwnershipReportBuilder.model.category_category import CategoryCategory
from fds.sdk.FactSetOwnershipReportBuilder.model.currency_code import CurrencyCode
from fds.sdk.FactSetOwnershipReportBuilder.model.currency_code_currency_code import CurrencyCodeCurrencyCode
from fds.sdk.FactSetOwnershipReportBuilder.model.currency_symbol import CurrencySymbol
from fds.sdk.FactSetOwnershipReportBuilder.model.currency_symbol_currency_symbol import CurrencySymbolCurrencySymbol
from fds.sdk.FactSetOwnershipReportBuilder.model.description import Description
from fds.sdk.FactSetOwnershipReportBuilder.model.description_description import DescriptionDescription
from fds.sdk.FactSetOwnershipReportBuilder.model.error_object import ErrorObject
from fds.sdk.FactSetOwnershipReportBuilder.model.error_object_links import ErrorObjectLinks
from fds.sdk.FactSetOwnershipReportBuilder.model.error_object_source import ErrorObjectSource
from fds.sdk.FactSetOwnershipReportBuilder.model.error_response import ErrorResponse
from fds.sdk.FactSetOwnershipReportBuilder.model.frequency import Frequency
from fds.sdk.FactSetOwnershipReportBuilder.model.frequency_frequency import FrequencyFrequency
from fds.sdk.FactSetOwnershipReportBuilder.model.fsym_id import FsymId
from fds.sdk.FactSetOwnershipReportBuilder.model.fsym_id_fsym_id import FsymIdFsymId
from fds.sdk.FactSetOwnershipReportBuilder.model.meta import Meta
from fds.sdk.FactSetOwnershipReportBuilder.model.metadata_entry import MetadataEntry
from fds.sdk.FactSetOwnershipReportBuilder.model.response import Response
from fds.sdk.FactSetOwnershipReportBuilder.model.scale import Scale
from fds.sdk.FactSetOwnershipReportBuilder.model.scale_scale import ScaleScale
from fds.sdk.FactSetOwnershipReportBuilder.model.value_type import ValueType
from fds.sdk.FactSetOwnershipReportBuilder.model.value_type_value_type import ValueTypeValueType | en | 0.808077 | # flake8: noqa # import all models into this package # if you have many models here with many references from one model to another this may # raise a RecursionError # to avoid this, import only the models that you directly need like: # from from fds.sdk.FactSetOwnershipReportBuilder.model.pet import Pet # or import this package, but before doing it, use: # import sys # sys.setrecursionlimit(n) | 1.729609 | 2 |
smaregipy/entities/account.py | shabaraba/SmaregiPy | 0 | 6631680 | <reponame>shabaraba/SmaregiPy
import datetime
import pytz
from typing import Optional
class Account():
def __init__(
self: 'Account',
contract_id: str,
sub: Optional[str]=None,
is_owner: Optional[bool]=False,
access_token: Optional[str]=None,
access_token_expiration_datetime: Optional[datetime.datetime]=None,
user_access_token: Optional[str]=None
):
self.contract_id = contract_id
self.sub = sub
self.is_owner = is_owner
if access_token is not None and access_token_expiration_datetime is not None:
self.access_token = Account.AccessToken(
access_token,
access_token_expiration_datetime
)
if user_access_token is not None:
self.user_access_token = Account.UserAccessToken(user_access_token)
class UserAccessToken():
def __init__(self, _access_token):
self._access_token = _access_token
@property
def access_token(self):
return self._access_token
class AccessToken():
def __init__(self, _token: str, _expiration_datetime: datetime.datetime):
self._token: str = _token
self._expiration_datetime: datetime.datetime = _expiration_datetime
@property
def token(self) -> str:
return self._token
@token.setter
def token(self, value):
self._token = value
@property
def expiration_datetime(self) -> datetime.datetime:
return self._expiration_datetime
@expiration_datetime.setter
def expiration_datetime(self, value):
if type(value) == str:
value = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S %z')
self._expiration_datetime = value
def is_available(self):
"""
有効期限を過ぎていないかどうか確認します
"""
if self.token is None:
return False
if self.expiration_datetime is not None:
now = datetime.datetime.now(pytz.timezone('Asia/Tokyo'))
if (self.expiration_datetime < now):
return False
return True
| import datetime
import pytz
from typing import Optional
class Account():
def __init__(
self: 'Account',
contract_id: str,
sub: Optional[str]=None,
is_owner: Optional[bool]=False,
access_token: Optional[str]=None,
access_token_expiration_datetime: Optional[datetime.datetime]=None,
user_access_token: Optional[str]=None
):
self.contract_id = contract_id
self.sub = sub
self.is_owner = is_owner
if access_token is not None and access_token_expiration_datetime is not None:
self.access_token = Account.AccessToken(
access_token,
access_token_expiration_datetime
)
if user_access_token is not None:
self.user_access_token = Account.UserAccessToken(user_access_token)
class UserAccessToken():
def __init__(self, _access_token):
self._access_token = _access_token
@property
def access_token(self):
return self._access_token
class AccessToken():
def __init__(self, _token: str, _expiration_datetime: datetime.datetime):
self._token: str = _token
self._expiration_datetime: datetime.datetime = _expiration_datetime
@property
def token(self) -> str:
return self._token
@token.setter
def token(self, value):
self._token = value
@property
def expiration_datetime(self) -> datetime.datetime:
return self._expiration_datetime
@expiration_datetime.setter
def expiration_datetime(self, value):
if type(value) == str:
value = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S %z')
self._expiration_datetime = value
def is_available(self):
"""
有効期限を過ぎていないかどうか確認します
"""
if self.token is None:
return False
if self.expiration_datetime is not None:
now = datetime.datetime.now(pytz.timezone('Asia/Tokyo'))
if (self.expiration_datetime < now):
return False
return True | ja | 0.999982 | 有効期限を過ぎていないかどうか確認します | 2.991499 | 3 |
pytest_localstack/__init__.py | Andrew-Wichmann/pytest-localstack | 63 | 6631681 | <reponame>Andrew-Wichmann/pytest-localstack<filename>pytest_localstack/__init__.py
import contextlib
import logging
import sys
import docker
import pytest
from pytest_localstack import plugin, session, utils
_start_timeout = None
_stop_timeout = None
def pytest_configure(config):
global _start_timeout, _stop_timeout
_start_timeout = config.getoption("--localstack-start-timeout")
_stop_timeout = config.getoption("--localstack-stop-timeout")
def pytest_addoption(parser):
"""Hook to add pytest_localstack command line options to pytest."""
group = parser.getgroup("localstack")
group.addoption(
"--localstack-start-timeout",
action="store",
type=int,
default=60,
help="max seconds for starting a localstack container",
)
group.addoption(
"--localstack-stop-timeout",
action="store",
type=int,
default=5,
help="max seconds for stopping a localstack container",
)
def session_fixture(
scope="function",
services=None,
autouse=False,
docker_client=None,
region_name=None,
kinesis_error_probability=0.0,
dynamodb_error_probability=0.0,
container_log_level=logging.DEBUG,
localstack_version="latest",
auto_remove=True,
pull_image=True,
container_name=None,
**kwargs
):
"""Create a pytest fixture that provides a LocalstackSession.
This is not a fixture! It is a factory to create them.
The fixtures that are created by this function will yield
a :class:`.LocalstackSession` instance.
This is useful for simulating multiple AWS accounts.
It does not automatically redirect botocore/boto3 traffic to Localstack
(although :class:`.LocalstackSession` has a method to do that.)
Args:
scope (str, optional): The pytest scope which this fixture will use.
Defaults to :const:`"function"`.
services (list, dict, optional): One of:
- A :class:`list` of AWS service names to start in the
Localstack container.
- A :class:`dict` of service names to the port they should run on.
Defaults to all services. Setting this can reduce container
startup time and therefore test time.
autouse (bool, optional): If :obj:`True`, automatically use this
fixture in applicable tests. Default: :obj:`False`
docker_client (:class:`~docker.client.DockerClient`, optional):
Docker client to run the Localstack container with.
Defaults to :func:`docker.client.from_env`.
region_name (str, optional): Region name to assume.
Each Localstack container acts like a single AWS region.
Defaults to :const:`"us-east-1"`.
kinesis_error_probability (float, optional): Decimal value between
0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors
into Kinesis API responses.
dynamodb_error_probability (float, optional): Decimal value
between 0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors into
DynamoDB API responses.
container_log_level (int, optional): The logging level to use
for Localstack container logs. Defaults to :data:`logging.DEBUG`.
localstack_version (str, optional): The version of the Localstack
image to use. Defaults to :const:`"latest"`.
auto_remove (bool, optional): If :obj:`True`, delete the Localstack
container when it stops. Default: :obj:`True`
pull_image (bool, optional): If :obj:`True`, pull the Localstack
image before running it. Default: :obj:`True`.
container_name (str, optional): The name for the Localstack
container. Defaults to a randomly generated id.
**kwargs: Additional kwargs will be passed to the
:class:`.LocalstackSession`.
Returns:
A :func:`pytest fixture <_pytest.fixtures.fixture>`.
"""
@pytest.fixture(scope=scope, autouse=autouse)
def _fixture(pytestconfig):
if not pytestconfig.pluginmanager.hasplugin("localstack"):
pytest.skip("skipping because localstack plugin isn't loaded")
with _make_session(
docker_client=docker_client,
services=services,
region_name=region_name,
kinesis_error_probability=kinesis_error_probability,
dynamodb_error_probability=dynamodb_error_probability,
container_log_level=container_log_level,
localstack_version=localstack_version,
auto_remove=auto_remove,
pull_image=pull_image,
container_name=container_name,
**kwargs
) as session:
yield session
return _fixture
@contextlib.contextmanager
def _make_session(docker_client, *args, **kwargs):
utils.check_proxy_env_vars()
if docker_client is None:
docker_client = docker.from_env()
try:
docker_client.ping() # Check connectivity
except docker.errors.APIError:
pytest.fail("Could not connect to Docker.")
_session = session.LocalstackSession(docker_client, *args, **kwargs)
_session.start(timeout=_start_timeout)
try:
yield _session
finally:
_session.stop(timeout=_stop_timeout)
# Register contrib modules
plugin.register_plugin_module("pytest_localstack.contrib.botocore")
plugin.register_plugin_module("pytest_localstack.contrib.boto3", False)
# Register 3rd-party modules
plugin.manager.load_setuptools_entrypoints("localstack")
# Trigger pytest_localstack_contribute_to_module hook
plugin.manager.hook.contribute_to_module.call_historic(
kwargs={"pytest_localstack": sys.modules[__name__]}
)
| import contextlib
import logging
import sys
import docker
import pytest
from pytest_localstack import plugin, session, utils
_start_timeout = None
_stop_timeout = None
def pytest_configure(config):
global _start_timeout, _stop_timeout
_start_timeout = config.getoption("--localstack-start-timeout")
_stop_timeout = config.getoption("--localstack-stop-timeout")
def pytest_addoption(parser):
"""Hook to add pytest_localstack command line options to pytest."""
group = parser.getgroup("localstack")
group.addoption(
"--localstack-start-timeout",
action="store",
type=int,
default=60,
help="max seconds for starting a localstack container",
)
group.addoption(
"--localstack-stop-timeout",
action="store",
type=int,
default=5,
help="max seconds for stopping a localstack container",
)
def session_fixture(
scope="function",
services=None,
autouse=False,
docker_client=None,
region_name=None,
kinesis_error_probability=0.0,
dynamodb_error_probability=0.0,
container_log_level=logging.DEBUG,
localstack_version="latest",
auto_remove=True,
pull_image=True,
container_name=None,
**kwargs
):
"""Create a pytest fixture that provides a LocalstackSession.
This is not a fixture! It is a factory to create them.
The fixtures that are created by this function will yield
a :class:`.LocalstackSession` instance.
This is useful for simulating multiple AWS accounts.
It does not automatically redirect botocore/boto3 traffic to Localstack
(although :class:`.LocalstackSession` has a method to do that.)
Args:
scope (str, optional): The pytest scope which this fixture will use.
Defaults to :const:`"function"`.
services (list, dict, optional): One of:
- A :class:`list` of AWS service names to start in the
Localstack container.
- A :class:`dict` of service names to the port they should run on.
Defaults to all services. Setting this can reduce container
startup time and therefore test time.
autouse (bool, optional): If :obj:`True`, automatically use this
fixture in applicable tests. Default: :obj:`False`
docker_client (:class:`~docker.client.DockerClient`, optional):
Docker client to run the Localstack container with.
Defaults to :func:`docker.client.from_env`.
region_name (str, optional): Region name to assume.
Each Localstack container acts like a single AWS region.
Defaults to :const:`"us-east-1"`.
kinesis_error_probability (float, optional): Decimal value between
0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors
into Kinesis API responses.
dynamodb_error_probability (float, optional): Decimal value
between 0.0 (default) and 1.0 to randomly inject
ProvisionedThroughputExceededException errors into
DynamoDB API responses.
container_log_level (int, optional): The logging level to use
for Localstack container logs. Defaults to :data:`logging.DEBUG`.
localstack_version (str, optional): The version of the Localstack
image to use. Defaults to :const:`"latest"`.
auto_remove (bool, optional): If :obj:`True`, delete the Localstack
container when it stops. Default: :obj:`True`
pull_image (bool, optional): If :obj:`True`, pull the Localstack
image before running it. Default: :obj:`True`.
container_name (str, optional): The name for the Localstack
container. Defaults to a randomly generated id.
**kwargs: Additional kwargs will be passed to the
:class:`.LocalstackSession`.
Returns:
A :func:`pytest fixture <_pytest.fixtures.fixture>`.
"""
@pytest.fixture(scope=scope, autouse=autouse)
def _fixture(pytestconfig):
if not pytestconfig.pluginmanager.hasplugin("localstack"):
pytest.skip("skipping because localstack plugin isn't loaded")
with _make_session(
docker_client=docker_client,
services=services,
region_name=region_name,
kinesis_error_probability=kinesis_error_probability,
dynamodb_error_probability=dynamodb_error_probability,
container_log_level=container_log_level,
localstack_version=localstack_version,
auto_remove=auto_remove,
pull_image=pull_image,
container_name=container_name,
**kwargs
) as session:
yield session
return _fixture
@contextlib.contextmanager
def _make_session(docker_client, *args, **kwargs):
utils.check_proxy_env_vars()
if docker_client is None:
docker_client = docker.from_env()
try:
docker_client.ping() # Check connectivity
except docker.errors.APIError:
pytest.fail("Could not connect to Docker.")
_session = session.LocalstackSession(docker_client, *args, **kwargs)
_session.start(timeout=_start_timeout)
try:
yield _session
finally:
_session.stop(timeout=_stop_timeout)
# Register contrib modules
plugin.register_plugin_module("pytest_localstack.contrib.botocore")
plugin.register_plugin_module("pytest_localstack.contrib.boto3", False)
# Register 3rd-party modules
plugin.manager.load_setuptools_entrypoints("localstack")
# Trigger pytest_localstack_contribute_to_module hook
plugin.manager.hook.contribute_to_module.call_historic(
kwargs={"pytest_localstack": sys.modules[__name__]}
) | en | 0.699688 | Hook to add pytest_localstack command line options to pytest. Create a pytest fixture that provides a LocalstackSession. This is not a fixture! It is a factory to create them. The fixtures that are created by this function will yield a :class:`.LocalstackSession` instance. This is useful for simulating multiple AWS accounts. It does not automatically redirect botocore/boto3 traffic to Localstack (although :class:`.LocalstackSession` has a method to do that.) Args: scope (str, optional): The pytest scope which this fixture will use. Defaults to :const:`"function"`. services (list, dict, optional): One of: - A :class:`list` of AWS service names to start in the Localstack container. - A :class:`dict` of service names to the port they should run on. Defaults to all services. Setting this can reduce container startup time and therefore test time. autouse (bool, optional): If :obj:`True`, automatically use this fixture in applicable tests. Default: :obj:`False` docker_client (:class:`~docker.client.DockerClient`, optional): Docker client to run the Localstack container with. Defaults to :func:`docker.client.from_env`. region_name (str, optional): Region name to assume. Each Localstack container acts like a single AWS region. Defaults to :const:`"us-east-1"`. kinesis_error_probability (float, optional): Decimal value between 0.0 (default) and 1.0 to randomly inject ProvisionedThroughputExceededException errors into Kinesis API responses. dynamodb_error_probability (float, optional): Decimal value between 0.0 (default) and 1.0 to randomly inject ProvisionedThroughputExceededException errors into DynamoDB API responses. container_log_level (int, optional): The logging level to use for Localstack container logs. Defaults to :data:`logging.DEBUG`. localstack_version (str, optional): The version of the Localstack image to use. Defaults to :const:`"latest"`. auto_remove (bool, optional): If :obj:`True`, delete the Localstack container when it stops. Default: :obj:`True` pull_image (bool, optional): If :obj:`True`, pull the Localstack image before running it. Default: :obj:`True`. container_name (str, optional): The name for the Localstack container. Defaults to a randomly generated id. **kwargs: Additional kwargs will be passed to the :class:`.LocalstackSession`. Returns: A :func:`pytest fixture <_pytest.fixtures.fixture>`. # Check connectivity # Register contrib modules # Register 3rd-party modules # Trigger pytest_localstack_contribute_to_module hook | 2.322824 | 2 |
api/app/data/tweets.py | biancarosa/emotion_analysis | 0 | 6631682 | """Deals with HealthCheck route."""
import os
import logging
import base64
import requests
from flask import jsonify
logging.basicConfig(level=logging.DEBUG)
def get_user_tweets():
"""Returns last 1000 user tweets"""
screen_name = '__biancarosa'
consumer_key = os.getenv('TWITTER_CONSUMER_KEY')
consumer_secret = os.getenv('TWITTER_CONSUMER_SECRET')
logging.debug(consumer_key)
logging.debug(consumer_secret)
r = requests.post(url='https://api.twitter.com/oauth2/token',
data={'grant_type': 'client_credentials'},
auth=(consumer_key, consumer_secret))
auth_data = r.json()
logging.debug(auth_data)
token = auth_data["access_token"]
logging.debug(token)
r = requests.get(url=f'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name={screen_name}&count=100',
headers={'Authorization': f'Bearer {token}'})
return jsonify(r.json())
| """Deals with HealthCheck route."""
import os
import logging
import base64
import requests
from flask import jsonify
logging.basicConfig(level=logging.DEBUG)
def get_user_tweets():
"""Returns last 1000 user tweets"""
screen_name = '__biancarosa'
consumer_key = os.getenv('TWITTER_CONSUMER_KEY')
consumer_secret = os.getenv('TWITTER_CONSUMER_SECRET')
logging.debug(consumer_key)
logging.debug(consumer_secret)
r = requests.post(url='https://api.twitter.com/oauth2/token',
data={'grant_type': 'client_credentials'},
auth=(consumer_key, consumer_secret))
auth_data = r.json()
logging.debug(auth_data)
token = auth_data["access_token"]
logging.debug(token)
r = requests.get(url=f'https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name={screen_name}&count=100',
headers={'Authorization': f'Bearer {token}'})
return jsonify(r.json())
| en | 0.64122 | Deals with HealthCheck route. Returns last 1000 user tweets | 2.62496 | 3 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/teams/views.py | osoco/better-ways-of-thinking-about-software | 3 | 6631683 | <reponame>osoco/better-ways-of-thinking-about-software
"""
HTTP endpoints for the Teams API.
"""
import logging
from collections import Counter
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import PermissionDenied
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.http import Http404, HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django_countries import countries
from edx_rest_framework_extensions.paginators import DefaultPagination, paginate_search_results
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_framework import permissions, status
from rest_framework.authentication import SessionAuthentication
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from common.djangoapps.student.models import CourseAccessRole, CourseEnrollment
from common.djangoapps.util.model_utils import truncate_fields
from lms.djangoapps.courseware.courses import get_course_with_access, has_access
from lms.djangoapps.discussion.django_comment_client.utils import has_discussion_privileges
from lms.djangoapps.teams.models import CourseTeam, CourseTeamMembership
from openedx.core.lib.api.authentication import BearerAuthentication
from openedx.core.lib.api.parsers import MergePatchParser
from openedx.core.lib.api.permissions import IsCourseStaffInstructor, IsStaffOrReadOnly
from openedx.core.lib.api.view_utils import (
ExpandableFieldViewMixin,
RetrievePatchAPIView,
add_serializer_errors,
build_api_error
)
from openedx.core.lib.teams_config import TeamsetType
from xmodule.modulestore.django import modulestore
from . import is_feature_enabled
from .api import (
OrganizationProtectionStatus,
add_team_count,
can_user_create_team_in_topic,
can_user_modify_team,
get_assignments_for_team,
has_course_staff_privileges,
has_specific_team_access,
has_specific_teamset_access,
has_team_api_access,
user_organization_protection_status
)
from .csv import TeamMembershipImportManager, load_team_membership_csv
from .errors import AlreadyOnTeamInTeamset, ElasticSearchConnectionError, NotEnrolledInCourseForTeam
from .search_indexes import CourseTeamIndexer
from .serializers import (
BulkTeamCountTopicSerializer,
CourseTeamCreationSerializer,
CourseTeamSerializer,
MembershipSerializer,
TopicSerializer
)
from .toggles import are_team_submissions_enabled
from .utils import emit_team_event
TEAM_MEMBERSHIPS_PER_PAGE = 5
TOPICS_PER_PAGE = 12
MAXIMUM_SEARCH_SIZE = 10000
log = logging.getLogger(__name__)
@receiver(post_save, sender=CourseTeam)
def team_post_save_callback(sender, instance, **kwargs): # pylint: disable=unused-argument
""" Emits signal after the team is saved. """
changed_fields = instance.field_tracker.changed()
# Don't emit events when we are first creating the team.
if not kwargs['created']:
for field in changed_fields:
if field not in instance.FIELD_BLACKLIST:
truncated_fields = truncate_fields(
str(changed_fields[field]),
str(getattr(instance, field))
)
truncated_fields['team_id'] = instance.team_id
truncated_fields['team_id'] = instance.team_id
truncated_fields['field'] = field
emit_team_event(
'edx.team.changed',
instance.course_id,
truncated_fields
)
class TopicsPagination(DefaultPagination):
"""Paginate topics. """
page_size = TOPICS_PER_PAGE
class MyTeamsPagination(DefaultPagination):
"""Paginate the user's teams. """
page_size = TEAM_MEMBERSHIPS_PER_PAGE
class TeamsDashboardView(GenericAPIView):
"""
View methods related to the teams dashboard.
"""
def get(self, request, course_id):
"""
Renders the teams dashboard, which is shown on the "Teams" tab.
Raises a 404 if the course specified by course_id does not exist, the
user is not registered for the course, or the teams feature is not enabled.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if not CourseEnrollment.is_enrolled(request.user, course.id) and \
not has_access(request.user, 'staff', course, course.id):
raise Http404
user = request.user
# Even though sorting is done outside of the serializer, sort_order needs to be passed
# to the serializer so that the paginated results indicate how they were sorted.
sort_order = 'name'
topics = get_alphabetical_topics(course)
topics = _filter_hidden_private_teamsets(user, topics, course)
organization_protection_status = user_organization_protection_status(request.user, course_key)
# We have some frontend logic that needs to know if we have any open, public, or managed teamsets,
# and it's easier to just figure that out here when we have them all already
teamset_counts_by_type = Counter([topic['type'] for topic in topics])
# Paginate and serialize topic data
# BulkTeamCountPaginatedTopicSerializer will add team counts to the topics in a single
# bulk operation per page.
topics_data = self._serialize_and_paginate(
TopicsPagination,
topics,
request,
BulkTeamCountTopicSerializer,
{
'course_id': course.id,
'organization_protection_status': organization_protection_status
},
)
topics_data["sort_order"] = sort_order # pylint: disable=unsupported-assignment-operation
filter_query = {
'membership__user': user,
'course_id': course.id,
}
if organization_protection_status != OrganizationProtectionStatus.protection_exempt:
is_user_org_protected = organization_protection_status == OrganizationProtectionStatus.protected
filter_query['organization_protected'] = is_user_org_protected
user_teams = CourseTeam.objects.filter(**filter_query).order_by('-last_activity_at', 'team_size')
user_teams_data = self._serialize_and_paginate(
MyTeamsPagination,
user_teams,
request,
CourseTeamSerializer,
{'expand': ('user',)}
)
context = {
"course": course,
"topics": topics_data,
# It is necessary to pass both privileged and staff because only privileged users can
# administer discussion threads, but both privileged and staff users are allowed to create
# multiple teams (since they are not automatically added to teams upon creation).
"user_info": {
"username": user.username,
"privileged": has_discussion_privileges(user, course_key),
"staff": bool(has_access(user, 'staff', course_key)),
"teams": user_teams_data
},
"has_open_teamset": bool(teamset_counts_by_type[TeamsetType.open.value]),
"has_public_managed_teamset": bool(teamset_counts_by_type[TeamsetType.public_managed.value]),
"has_managed_teamset": bool(
teamset_counts_by_type[TeamsetType.public_managed.value] +
teamset_counts_by_type[TeamsetType.private_managed.value]
),
"topic_url": reverse(
'topics_detail', kwargs={'topic_id': 'topic_id', 'course_id': str(course_id)}, request=request
),
"topics_url": reverse('topics_list', request=request),
"teams_url": reverse('teams_list', request=request),
"teams_detail_url": reverse('teams_detail', args=['team_id']),
"team_memberships_url": reverse('team_membership_list', request=request),
"my_teams_url": reverse('teams_list', request=request),
"team_membership_detail_url": reverse('team_membership_detail', args=['team_id', user.username]),
"team_membership_management_url": reverse(
'team_membership_bulk_management', request=request, kwargs={'course_id': course_id}
),
"languages": [[lang[0], _(lang[1])] for lang in settings.ALL_LANGUAGES], # pylint: disable=translation-of-non-string
"countries": list(countries),
"disable_courseware_js": True,
"teams_base_url": reverse('teams_dashboard', request=request, kwargs={'course_id': course_id}),
}
# Assignments are feature-flagged
if are_team_submissions_enabled(course_key):
context["teams_assignments_url"] = reverse('teams_assignments_list', args=['team_id'])
return render(request, "teams/teams.html", context)
def _serialize_and_paginate(self, pagination_cls, queryset, request, serializer_cls, serializer_ctx):
"""
Serialize and paginate objects in a queryset.
Arguments:
pagination_cls (pagination.Paginator class): Django Rest Framework Paginator subclass.
queryset (QuerySet): Django queryset to serialize/paginate.
serializer_cls (serializers.Serializer class): Django Rest Framework Serializer subclass.
serializer_ctx (dict): Context dictionary to pass to the serializer
Returns: dict
"""
# Django Rest Framework v3 requires that we pass the request
# into the serializer's context if the serialize contains
# hyperlink fields.
serializer_ctx["request"] = request
# Instantiate the paginator and use it to paginate the queryset
paginator = pagination_cls()
page = paginator.paginate_queryset(queryset, request)
# Serialize the page
serializer = serializer_cls(page, context=serializer_ctx, many=True)
# Use the paginator to construct the response data
# This will use the pagination subclass for the view to add additional
# fields to the response.
# For example, if the input data is a list, the output data would
# be a dictionary with keys "count", "next", "previous", and "results"
# (where "results" is set to the value of the original list)
return paginator.get_paginated_response(serializer.data).data
class TeamsListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Get or create a course team.
**Example Requests**:
GET /api/team/v0/teams
POST /api/team/v0/teams
**Query Parameters for GET**
* course_id: Filters the result to teams belonging to the given
course. Required.
* topic_id: Filters the result to teams associated with the given
topic.
* text_search: Searches for full word matches on the name, description,
country, and language fields. NOTES: Search is on full names for countries
and languages, not the ISO codes. Text_search cannot be requested along with
with order_by.
* order_by: Cannot be called along with with text_search. Must be one of the following:
* name: Orders results by case insensitive team name (default).
* open_slots: Orders results by most open slots (for tie-breaking,
last_activity_at is used, with most recent first).
* last_activity_at: Orders result by team activity, with most active first
(for tie-breaking, open_slots is used, with most open slots first).
* username: Return teams whose membership contains the given user.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of teams matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the teams matching the request.
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is associated
with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* last_activity_at: The date of the last activity of any team member
within the team.
* membership: A list of the users that are members of the team.
See membership endpoint for more detail.
* organization_protected: Whether the team consists of organization-protected
learners
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course specified by course_id or
is not course or global staff, a 403 error is returned.
If the specified course_id is not valid or the user attempts to
use an unsupported query parameter, a 400 error is returned.
If the response does not exist, a 404 error is returned. For
example, the course_id may not reference a real course or the page
number may be beyond the last page.
If the server is unable to connect to Elasticsearch, and
the text_search parameter is supplied, a 503 error is returned.
If the requesting user is a learner, the learner would only see organization
protected set of teams if the learner is enrolled in a degree bearing institution.
Otherwise, the learner will only see organization unprotected set of teams.
**Response Values for POST**
Any logged in user who has verified their email address can create
a team in an open teamset. The format mirrors that of a GET for an individual team,
but does not include the id, date_created, or membership fields.
id is automatically computed based on name.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course, is not course or
global staff, or does not have discussion privileges a 403 error
is returned.
If the course_id is not valid, or the topic_id is missing, or extra fields
are included in the request, a 400 error is returned.
If the specified course does not exist, a 404 error is returned.
If the specified teamset does not exist, a 404 error is returned.
If the specified teamset does exist, but the requesting user shouldn't be
able to see it, a 404 is returned.
"""
# BearerAuthentication must come first to return a 401 for unauthenticated users
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = CourseTeamSerializer
def get(self, request):
"""GET /api/team/v0/teams/"""
result_filter = {}
if 'course_id' not in request.query_params:
return Response(
build_api_error(ugettext_noop("course_id must be provided")),
status=status.HTTP_400_BAD_REQUEST
)
course_id_string = request.query_params['course_id']
try:
course_key = CourseKey.from_string(course_id_string)
course_module = modulestore().get_course(course_key)
except InvalidKeyError:
error = build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string,
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
# Ensure the course exists
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
result_filter.update({'course_id': course_key})
if not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
text_search = request.query_params.get('text_search', None)
if text_search and request.query_params.get('order_by', None):
return Response(
build_api_error(ugettext_noop("text_search and order_by cannot be provided together")),
status=status.HTTP_400_BAD_REQUEST
)
username = request.query_params.get('username', None)
if username is not None:
result_filter.update({'membership__user__username': username})
topic_id = request.query_params.get('topic_id', None)
if topic_id is not None:
if topic_id not in course_module.teamsets_by_id:
error = build_api_error(
ugettext_noop('The supplied topic id {topic_id} is not valid'),
topic_id=topic_id
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
result_filter.update({'topic_id': topic_id})
organization_protection_status = user_organization_protection_status(
request.user, course_key
)
if not organization_protection_status.is_exempt:
result_filter.update({
'organization_protected': organization_protection_status.is_protected
})
if text_search and CourseTeamIndexer.search_is_enabled():
try:
search_engine = CourseTeamIndexer.engine()
except ElasticSearchConnectionError:
return Response(
build_api_error(ugettext_noop('Error connecting to elasticsearch')),
status=status.HTTP_503_SERVICE_UNAVAILABLE
)
result_filter.update({'course_id': course_id_string})
search_results = search_engine.search(
query_string=text_search,
field_dictionary=result_filter,
size=MAXIMUM_SEARCH_SIZE,
)
# We need to manually exclude some potential private_managed teams from results, because
# it doesn't appear that the search supports "field__in" style lookups
# Non-staff users should not be able to see private_managed teams that they are not on.
# Staff shouldn't have any excluded teams.
excluded_private_team_ids = self._get_private_team_ids_to_exclude(course_module)
search_results['results'] = [
result for result in search_results['results']
if result['data']['id'] not in excluded_private_team_ids
]
search_results['total'] = len(search_results['results'])
paginated_results = paginate_search_results(
CourseTeam,
search_results,
self.paginator.get_page_size(request),
self.get_page()
)
emit_team_event('edx.team.searched', course_key, {
"number_of_results": search_results['total'],
"search_text": text_search,
"topic_id": topic_id,
})
page = self.paginate_queryset(paginated_results)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
ordering_schemes = {
'name': ('name',), # MySQL does case-insensitive order_by
'open_slots': ('team_size', '-last_activity_at'),
'last_activity_at': ('-last_activity_at', 'team_size'),
}
# hide private_managed courses from non-staff users that aren't members of those teams
excluded_private_team_ids = self._get_private_team_ids_to_exclude(course_module)
queryset = CourseTeam.objects.filter(**result_filter).exclude(team_id__in=excluded_private_team_ids)
order_by_input = request.query_params.get('order_by', 'name')
if order_by_input not in ordering_schemes:
return Response(
{
'developer_message': "unsupported order_by value {ordering}".format(
ordering=order_by_input,
),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _("The ordering {ordering} is not supported").format(
ordering=order_by_input,
),
},
status=status.HTTP_400_BAD_REQUEST,
)
queryset = queryset.order_by(*ordering_schemes[order_by_input])
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
response = self.get_paginated_response(serializer.data)
response.data['sort_order'] = order_by_input
return response
def post(self, request):
"""POST /api/team/v0/teams/"""
field_errors = {}
course_key = None
course_id = request.data.get('course_id')
#Handle field errors and check that the course exists
try:
course_key = CourseKey.from_string(course_id)
# Ensure the course exists
course_module = modulestore().get_course(course_key)
if not course_module:
return Response(status=status.HTTP_404_NOT_FOUND)
except InvalidKeyError:
field_errors['course_id'] = build_api_error(
ugettext_noop('The supplied course_id {course_id} is not valid.'),
course_id=course_id
)
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
topic_id = request.data.get('topic_id')
if not topic_id:
field_errors['topic_id'] = build_api_error(
ugettext_noop('topic_id is required'),
course_id=course_id
)
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
if course_key and not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
if topic_id not in course_module.teams_configuration.teamsets_by_id or (
not has_specific_teamset_access(request.user, course_module, topic_id)
):
return Response(status=status.HTTP_404_NOT_FOUND)
# The user has to have access to this teamset at this point, so we can return 403
# and not leak the existance of a private teamset
if not can_user_create_team_in_topic(request.user, course_key, topic_id):
return Response(
build_api_error(ugettext_noop("You can't create a team in an instructor managed topic.")),
status=status.HTTP_403_FORBIDDEN
)
# Course and global staff, as well as discussion "privileged" users, will not automatically
# be added to a team when they create it. They are allowed to create multiple teams.
is_team_administrator = (has_access(request.user, 'staff', course_key)
or has_discussion_privileges(request.user, course_key))
if not is_team_administrator and (
CourseTeamMembership.user_in_team_for_teamset(request.user, course_key, topic_id=topic_id)
):
error_message = build_api_error(
ugettext_noop('You are already in a team in this teamset.'),
course_id=course_id,
teamset_id=topic_id,
)
return Response(error_message, status=status.HTTP_400_BAD_REQUEST)
data = request.data.copy()
data['course_id'] = str(course_key)
organization_protection_status = user_organization_protection_status(request.user, course_key)
if organization_protection_status != OrganizationProtectionStatus.protection_exempt:
data['organization_protected'] = organization_protection_status == OrganizationProtectionStatus.protected
serializer = CourseTeamCreationSerializer(data=data)
add_serializer_errors(serializer, data, field_errors)
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
else:
team = serializer.save()
emit_team_event('edx.team.created', course_key, {
'team_id': team.team_id
})
if not is_team_administrator:
# Add the creating user to the team.
team.add_user(request.user)
emit_team_event(
'edx.team.learner_added',
course_key,
{
'team_id': team.team_id,
'user_id': request.user.id,
'add_method': 'added_on_create'
}
)
data = CourseTeamSerializer(team, context={"request": request}).data
return Response(data)
def get_page(self):
""" Returns page number specified in args, params, or defaults to 1. """
# This code is taken from within the GenericAPIView#paginate_queryset method.
# We need need access to the page outside of that method for our paginate_search_results method
page_kwarg = self.kwargs.get(self.paginator.page_query_param)
page_query_param = self.request.query_params.get(self.paginator.page_query_param)
return page_kwarg or page_query_param or 1
def _get_private_team_ids_to_exclude(self, course_module):
"""
Get the list of team ids that should be excluded from the response.
Staff can see all private teams.
Users should not be able to see teams in private teamsets that they are not a member of.
"""
if has_access(self.request.user, 'staff', course_module.id):
return set()
private_teamset_ids = [ts.teamset_id for ts in course_module.teamsets if ts.is_private_managed]
excluded_team_ids = CourseTeam.objects.filter(
course_id=course_module.id,
topic_id__in=private_teamset_ids
).exclude(
membership__user=self.request.user
).values_list('team_id', flat=True)
return set(excluded_team_ids)
class IsEnrolledOrIsStaff(permissions.BasePermission):
"""Permission that checks to see if the user is enrolled in the course or is staff."""
def has_object_permission(self, request, view, obj):
"""Returns true if the user is enrolled or is staff."""
return has_team_api_access(request.user, obj.course_id)
class IsStaffOrPrivilegedOrReadOnly(IsStaffOrReadOnly):
"""
Permission that checks to see if the user is global staff, course
staff, course admin, or has discussion privileges. If none of those conditions are
met, only read access will be granted.
"""
def has_object_permission(self, request, view, obj):
return (
has_discussion_privileges(request.user, obj.course_id) or
IsCourseStaffInstructor.has_object_permission(self, request, view, obj) or
super().has_object_permission(request, view, obj)
)
class HasSpecificTeamAccess(permissions.BasePermission):
"""
Permission that checks if the user has access to a specific team.
If the user doesn't have access to the team, the endpoint should behave as if
the team does not exist,
"""
def has_object_permission(self, request, view, obj):
if not has_specific_team_access(request.user, obj):
raise Http404
return True
class TeamsDetailView(ExpandableFieldViewMixin, RetrievePatchAPIView):
"""
**Use Cases**
Get, update, or delete a course team's information. Updates are supported
only through merge patch.
**Example Requests**:
GET /api/team/v0/teams/{team_id}}
PATCH /api/team/v0/teams/{team_id} "application/merge-patch+json"
DELETE /api/team/v0/teams/{team_id}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in, the response contains the following fields:
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is
associated with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* membership: A list of the users that are members of the team. See
membership endpoint for more detail.
* last_activity_at: The date of the last activity of any team member
within the team.
* organization_protected: Whether the team consists of organization-protected
learners
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not course or global staff, a 403 error is returned.
If the specified team does not exist, a 404 error is returned.
**Response Values for PATCH**
Only staff can patch teams.
If the user is anonymous or inactive, a 401 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
If the user is not course or global staff, does not have discussion
privileges, and the team does exist, a 403 is returned.
If "application/merge-patch+json" is not the specified content type,
a 415 error is returned.
If the update could not be completed due to validation errors, this
method returns a 400 error with all error messages in the
"field_errors" field of the returned JSON.
**Response Values for DELETE**
Only staff can delete teams. When a team is deleted, all
team memberships associated with that team are also
deleted. Returns 204 on successful deletion.
If the user is anonymous or inactive, a 401 is returned.
If the user is not course or global staff and does not
have discussion privileges, a 403 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (
permissions.IsAuthenticated,
IsEnrolledOrIsStaff,
HasSpecificTeamAccess,
IsStaffOrPrivilegedOrReadOnly,
)
lookup_field = 'team_id'
serializer_class = CourseTeamSerializer
parser_classes = (MergePatchParser,)
def get_queryset(self):
"""Returns the queryset used to access the given team."""
return CourseTeam.objects.all()
def delete(self, request, team_id):
"""DELETE /api/team/v0/teams/{team_id}"""
team = get_object_or_404(CourseTeam, team_id=team_id)
self.check_object_permissions(request, team)
# Note: list() forces the queryset to be evualuated before delete()
memberships = list(CourseTeamMembership.get_memberships(team_ids=[team_id]))
# Note: also deletes all team memberships associated with this team
team.delete()
log.info('user %d deleted team %s', request.user.id, team_id)
emit_team_event('edx.team.deleted', team.course_id, {
'team_id': team_id,
})
for member in memberships:
emit_team_event('edx.team.learner_removed', team.course_id, {
'team_id': team_id,
'remove_method': 'team_deleted',
'user_id': member.user_id
})
return Response(status=status.HTTP_204_NO_CONTENT)
class TeamsAssignmentsView(GenericAPIView):
"""
**Use Cases**
Get a team's assignments
**Example Requests**:
GET /api/team/v0/teams/{team_id}/assignments
**Response Values for GET**
If the user is logged in, the response is an array of the following data strcuture:
* display_name: The name of the assignment to display (currently the Unit title)
* location: The jump link to a specific assignments
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If team assignments are not enabled for course, a 503 is returned.
If the user is not logged in, a 401 error is returned.
If the user is unenrolled or does not have API access, a 403 error is returned.
If the supplied course/team is bad or the user is not permitted to
search in a protected team, a 404 error is returned as if the team does not exist.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (
permissions.IsAuthenticated,
IsEnrolledOrIsStaff,
HasSpecificTeamAccess,
IsStaffOrPrivilegedOrReadOnly,
)
def get(self, request, team_id):
"""GET v0/teams/{team_id_pattern}/assignments"""
course_team = get_object_or_404(CourseTeam, team_id=team_id)
user = request.user
course_id = course_team.course_id
if not are_team_submissions_enabled(course_id):
return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
if not has_specific_team_access(user, course_team):
return Response(status=status.HTTP_404_NOT_FOUND)
teamset_ora_blocks = get_assignments_for_team(user, course_team)
# Serialize info for display
assignments = [{
'display_name': self._display_name_for_ora_block(block),
'location': self._jump_location_for_block(course_id, block.location)
} for block in teamset_ora_blocks]
return Response(assignments)
def _display_name_for_ora_block(self, block):
""" Get the unit name where the ORA is located for better display naming """
unit = modulestore().get_item(block.parent)
section = modulestore().get_item(unit.parent)
return "{section}: {unit}".format(
section=section.display_name,
unit=unit.display_name
)
def _jump_location_for_block(self, course_id, location):
""" Get the URL for jumping to a designated XBlock in a course """
return reverse('jump_to', kwargs={'course_id': str(course_id), 'location': str(location)})
class TopicListView(GenericAPIView):
"""
**Use Cases**
Retrieve a list of topics associated with a single course.
**Example Requests**
GET /api/team/v0/topics/?course_id={course_id}
**Query Parameters for GET**
* course_id: Filters the result to topics belonging to the given
course (required).
* order_by: Orders the results. Currently only 'name' and 'team_count' are supported;
the default value is 'name'. If 'team_count' is specified, topics are returned first sorted
by number of teams per topic (descending), with a secondary sort of 'name'.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the course_id is not given or an unsupported value is passed for
order_by, returns a 400 error.
If the user is not logged in, is not enrolled in the course, or is
not course or global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following
fields:
* count: The total number of topics matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the topics matching the request.
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
* team_count: Number of teams created under the topic. If the requesting user
is enrolled into a degree bearing institution, the count only include the teams
with organization_protected attribute true. If the requesting user is not
affiliated with any institutions, the teams included in the count would only be
those teams whose members are outside of institutions affliation.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
pagination_class = TopicsPagination
queryset = []
def get(self, request):
"""GET /api/team/v0/topics/?course_id={course_id}"""
course_id_string = request.query_params.get('course_id', None)
if course_id_string is None:
return Response({
'field_errors': {
'course_id': build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string
)
}
}, status=status.HTTP_400_BAD_REQUEST)
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None: # course is None if not found
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
ordering = request.query_params.get('order_by', 'name')
if ordering not in ['name', 'team_count']:
return Response({
'developer_message': f"unsupported order_by value {ordering}",
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _("The ordering {ordering} is not supported").format(ordering=ordering),
}, status=status.HTTP_400_BAD_REQUEST)
# Always sort alphabetically, as it will be used as secondary sort
# in the case of "team_count".
organization_protection_status = user_organization_protection_status(request.user, course_id)
topics = get_alphabetical_topics(course_module)
topics = _filter_hidden_private_teamsets(request.user, topics, course_module)
if ordering == 'team_count':
add_team_count(request.user, topics, course_id, organization_protection_status)
topics.sort(key=lambda t: t['team_count'], reverse=True)
page = self.paginate_queryset(topics)
serializer = TopicSerializer(
page,
context={'course_id': course_id, 'user': request.user},
many=True,
)
else:
page = self.paginate_queryset(topics)
# Use the serializer that adds team_count in a bulk operation per page.
serializer = BulkTeamCountTopicSerializer(
page,
context={
'request': request,
'course_id': course_id,
'organization_protection_status': organization_protection_status
},
many=True
)
response = self.get_paginated_response(serializer.data)
response.data['sort_order'] = ordering
return response
def _filter_hidden_private_teamsets(user, teamsets, course_module):
"""
Return a filtered list of teamsets, removing any private teamsets that a user doesn't have access to.
Follows the same logic as `has_specific_teamset_access` but in bulk rather than for one teamset at a time
"""
if has_course_staff_privileges(user, course_module.id):
return teamsets
private_teamset_ids = [teamset.teamset_id for teamset in course_module.teamsets if teamset.is_private_managed]
teamset_ids_user_has_access_to = set(
CourseTeam.objects.filter(
course_id=course_module.id,
topic_id__in=private_teamset_ids,
membership__user=user
).values_list('topic_id', flat=True)
)
return [
teamset for teamset in teamsets
if teamset['type'] != TeamsetType.private_managed.value or teamset['id'] in teamset_ids_user_has_access_to
]
def get_alphabetical_topics(course_module):
"""Return a list of team topics sorted alphabetically.
Arguments:
course_module (xmodule): the course which owns the team topics
Returns:
list: a list of sorted team topics
"""
return sorted(
course_module.teams_configuration.cleaned_data['team_sets'],
key=lambda t: t['name'].lower(),
)
class TopicDetailView(APIView):
"""
**Use Cases**
Retrieve a single topic from a course.
**Example Requests**
GET /api/team/v0/topics/{topic_id},{course_id}
**Query Parameters for GET**
* topic_id: The ID of the topic to retrieve (required).
* course_id: The ID of the course to retrieve the topic from
(required).
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the topic_id course_id are not given or an unsupported value is
passed for order_by, returns a 400 error.
If the user is not enrolled in the course, or is not course or
global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following fields:
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
* team_count: Number of teams created under the topic. If the requesting user
is enrolled into a degree bearing institution, the count only include the teams
with organization_protected attribute true. If the requesting user is not
affiliated with any institutions, the teams included in the count would only be
those teams whose members are outside of institutions affliation.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, topic_id, course_id):
"""GET /api/team/v0/topics/{topic_id},{course_id}/"""
try:
course_id = CourseKey.from_string(course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
try:
topic = course_module.teamsets_by_id[topic_id]
except KeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_teamset_access(request.user, course_module, topic_id):
return Response(status=status.HTTP_404_NOT_FOUND)
organization_protection_status = user_organization_protection_status(request.user, course_id)
serializer = TopicSerializer(
topic.cleaned_data,
context={
'course_id': course_id,
'organization_protection_status': organization_protection_status,
'user': request.user
}
)
return Response(serializer.data)
class MembershipListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
List teamset team memberships or add a user to a teamset.
**Example Requests**:
GET /api/team/v0/team_membership
POST /api/team/v0/team_membership
**Query Parameters for GET**
At least one of username and team_id must be provided.
* username: Returns membership records only for the specified user.
If the requesting user is not staff then only memberships for
teams associated with courses in which the requesting user is
enrolled are returned.
* team_id: Returns only membership records associated with the
specified team. The requesting user must be staff or enrolled in
the course associated with the team.
* teamset_id: Returns membership records only for the specified teamset.
if teamset_id is specified, course_id must also be specified.
teamset_id and team_id are mutually exclusive. For open and public_managed
teamsets, the user must be staff or enrolled in the course. For
private_managed teamsets, the user must be course staff, or a member of the
specified teamset.
* course_id: Returns membership records only for the specified
course. Username must have access to this course, or else team_id
must be in this course.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of memberships matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the memberships matching the request.
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of the user
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If neither team_id nor username are provided, a 400 error is
returned.
If team_id is provided but the team does not exist, a 404 error is
returned.
If the specified course_id is invalid, a 404 error is returned.
This endpoint uses 404 error codes to avoid leaking information
about team or user existence. Specifically, a 404 error will be
returned if a logged in user specifies a team_id for a course
they are not enrolled in.
Additionally, when username is specified the list of returned
memberships will be filtered to memberships in teams associated
with courses that the requesting user is enrolled in.
If the course specified by course_id does not contain the team
specified by team_id, a 400 error is returned.
If the user is not enrolled in the course specified by course_id,
and does not have staff access to the course, a 400 error is
returned.
**Response Values for POST**
Any logged in user enrolled in a course can enroll themselves in a
team in the course. Course staff, global staff, and discussion
privileged users can enroll any user in a team, with a few
exceptions noted below.
If the user is not logged in and active, a 401 error is returned.
If username and team are not provided in the posted JSON, a 400
error is returned describing the missing fields.
If the specified team does not exist, a 404 error is returned.
If the user is not staff, does not have discussion privileges,
and is not enrolled in the course associated with the team they
are trying to join, or if they are trying to add a user other
than themselves to a team, a 404 error is returned. This is to
prevent leaking information about the existence of teams and users.
If the specified user does not exist, a 404 error is returned.
If the user is already a member of a team in the course associated
with the team they are trying to join, a 400 error is returned.
This applies to both staff and students.
If the user is not enrolled in the course associated with the team
they are trying to join, a 400 error is returned. This can occur
when a staff or discussion privileged user posts a request adding
another user to a team.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get(self, request): # lint-amnesty, pylint: disable=too-many-statements
"""GET /api/team/v0/team_membership"""
specified_username_or_team = False
username = None
team_ids = None
requested_course_id = None
requested_course_key = None
accessible_course_ids = None
if 'course_id' in request.query_params:
requested_course_id = request.query_params['course_id']
try:
requested_course_key = CourseKey.from_string(requested_course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
if 'team_id' in request.query_params and 'teamset_id' in request.query_params:
return Response(
build_api_error(ugettext_noop("teamset_id and team_id are mutually exclusive options.")),
status=status.HTTP_400_BAD_REQUEST
)
elif 'team_id' in request.query_params:
specified_username_or_team = True
team_id = request.query_params['team_id']
try:
team = CourseTeam.objects.get(team_id=team_id)
team_ids = [team.team_id]
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if requested_course_key is not None and requested_course_key != team.course_id:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_team_access(request.user, team):
return Response(status=status.HTTP_403_FORBIDDEN)
elif 'teamset_id' in request.query_params:
if 'course_id' not in request.query_params:
return Response(
build_api_error(ugettext_noop("teamset_id requires course_id to also be provided.")),
status=status.HTTP_400_BAD_REQUEST
)
if not has_team_api_access(request.user, requested_course_key):
return Response(status=status.HTTP_404_NOT_FOUND)
course_module = modulestore().get_course(requested_course_key)
if not course_module:
return Response(status=status.HTTP_404_NOT_FOUND)
specified_username_or_team = True
teamsets = course_module.teams_configuration.teamsets_by_id
teamset_id = request.query_params['teamset_id']
teamset = teamsets.get(teamset_id, None)
if not teamset:
return Response(
build_api_error(ugettext_noop("No teamset found in given course with given id")),
status=status.HTTP_404_NOT_FOUND
)
teamset_teams = CourseTeam.objects.filter(course_id=requested_course_key, topic_id=teamset_id)
if has_course_staff_privileges(request.user, requested_course_key):
teams_with_access = list(teamset_teams)
else:
teams_with_access = [
team for team in teamset_teams
if has_specific_team_access(request.user, team)
]
if teamset.is_private_managed and not teams_with_access:
return Response(
build_api_error(ugettext_noop("No teamset found in given course with given id")),
status=status.HTTP_404_NOT_FOUND
)
team_ids = [team.team_id for team in teams_with_access]
if 'username' in request.query_params:
specified_username_or_team = True
username = request.query_params['username']
if not request.user.is_staff:
enrolled_courses = (
CourseEnrollment.enrollments_for_user(request.user).values_list('course_id', flat=True)
)
staff_courses = (
CourseAccessRole.objects.filter(user=request.user, role='staff').values_list('course_id', flat=True)
)
accessible_course_ids = [item for sublist in (enrolled_courses, staff_courses) for item in sublist]
if requested_course_id is not None and requested_course_key not in accessible_course_ids:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not specified_username_or_team:
return Response(
build_api_error(ugettext_noop("username or (team_id or teamset_id) must be specified.")),
status=status.HTTP_400_BAD_REQUEST
)
course_keys = None
if requested_course_key is not None:
course_keys = [requested_course_key]
elif accessible_course_ids is not None:
course_keys = accessible_course_ids
queryset = CourseTeamMembership.get_memberships(username, course_keys, team_ids)
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def post(self, request):
"""POST /api/team/v0/team_membership"""
field_errors = {}
if 'username' not in request.data:
field_errors['username'] = build_api_error(ugettext_noop("Username is required."))
if 'team_id' not in request.data:
field_errors['team_id'] = build_api_error(ugettext_noop("Team id is required."))
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
try:
team = CourseTeam.objects.get(team_id=request.data['team_id'])
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
username = request.data['username']
if not has_team_api_access(request.user, team.course_id, access_username=username):
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_team_access(request.user, team):
return Response(status=status.HTTP_404_NOT_FOUND)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
course_module = modulestore().get_course(team.course_id)
# This should use `calc_max_team_size` instead of `default_max_team_size` (TODO MST-32).
max_team_size = course_module.teams_configuration.default_max_team_size
if max_team_size is not None and team.users.count() >= max_team_size:
return Response(
build_api_error(ugettext_noop("This team is already full.")),
status=status.HTTP_400_BAD_REQUEST
)
if not can_user_modify_team(request.user, team):
return Response(
build_api_error(ugettext_noop("You can't join an instructor managed team.")),
status=status.HTTP_403_FORBIDDEN
)
try:
membership = team.add_user(user)
emit_team_event(
'edx.team.learner_added',
team.course_id,
{
'team_id': team.team_id,
'user_id': user.id,
'add_method': 'joined_from_team_view' if user == request.user else 'added_by_another_user'
}
)
except AlreadyOnTeamInTeamset:
return Response(
build_api_error(
ugettext_noop("The user {username} is already a member of a team in this teamset."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
except NotEnrolledInCourseForTeam:
return Response(
build_api_error(
ugettext_noop("The user {username} is not enrolled in the course associated with this team."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
class MembershipDetailView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Gets individual course team memberships or removes a user from a course team.
**Example Requests**:
GET /api/team/v0/team_membership/{team_id},{username}
DELETE /api/team/v0/team_membership/{team_id},{username}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, or is course or global staff
the response contains:
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If specified team does not exist, a 404 error is returned.
If the user is logged in but is not enrolled in the course
associated with the specified team, or is not staff, a 404 error is
returned. This avoids leaking information about course or team
existence.
If the membership does not exist, a 404 error is returned.
**Response Values for DELETE**
Any logged in user enrolled in a course can remove themselves from
a team in the course. Course staff, global staff, and discussion
privileged users can remove any user from a team. Successfully
deleting a membership will return a 204 response with no content.
If the user is not logged in and active, a 401 error is returned.
If the specified team or username does not exist, a 404 error is
returned.
If the user is not staff or a discussion privileged user and is
attempting to remove another user from a team, a 404 error is
returned. This prevents leaking information about team and user
existence.
If the membership does not exist, a 404 error is returned.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get_team(self, team_id):
"""Returns the team with team_id, or throws Http404 if it does not exist."""
try:
return CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
def get_membership(self, username, team):
"""Returns the membership for the given user and team, or throws Http404 if it does not exist."""
try:
return CourseTeamMembership.objects.get(user__username=username, team=team)
except CourseTeamMembership.DoesNotExist:
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
def get(self, request, team_id, username):
"""GET /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_team_access(request.user, team):
return Response(status=status.HTTP_404_NOT_FOUND)
membership = self.get_membership(username, team)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
def delete(self, request, team_id, username):
"""DELETE /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if not has_team_api_access(request.user, team.course_id, access_username=username):
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_team_access(request.user, team):
return Response(status=status.HTTP_404_NOT_FOUND)
if not can_user_modify_team(request.user, team):
return Response(
build_api_error(ugettext_noop("You can't leave an instructor managed team.")),
status=status.HTTP_403_FORBIDDEN
)
membership = self.get_membership(username, team)
removal_method = 'self_removal'
if 'admin' in request.query_params:
removal_method = 'removed_by_admin'
membership.delete()
emit_team_event(
'edx.team.learner_removed',
team.course_id,
{
'team_id': team.team_id,
'user_id': membership.user.id,
'remove_method': removal_method
}
)
return Response(status=status.HTTP_204_NO_CONTENT)
class MembershipBulkManagementView(GenericAPIView):
"""
View for uploading and downloading team membership CSVs.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, **_kwargs):
"""
Download CSV with team membership data for given course run.
"""
self.check_access()
response = HttpResponse(content_type='text/csv')
filename = "team-membership_{}_{}_{}.csv".format(
self.course.id.org, self.course.id.course, self.course.id.run
)
response['Content-Disposition'] = f'attachment; filename="{filename}"'
load_team_membership_csv(self.course, response)
return response
def post(self, request, **_kwargs):
"""
Process uploaded CSV to modify team memberships for given course run.
"""
self.check_access()
inputfile_handle = request.FILES['csv']
team_import_manager = TeamMembershipImportManager(self.course)
team_import_manager.set_team_membership_from_csv(inputfile_handle)
if team_import_manager.import_succeeded:
msg = f"{team_import_manager.number_of_learners_assigned} learners were affected."
return JsonResponse({'message': msg}, status=status.HTTP_201_CREATED)
else:
return JsonResponse({
'errors': team_import_manager.validation_errors
}, status=status.HTTP_400_BAD_REQUEST)
def check_access(self):
"""
Raises 403 if user does not have access to this endpoint.
"""
if not has_course_staff_privileges(self.request.user, self.course.id):
raise PermissionDenied(
"To manage team membership of {}, you must be course staff.".format(
self.course.id
)
)
@cached_property
def course(self):
"""
Return a CourseBlock based on the `course_id` kwarg.
If invalid or not found, raise 404.
"""
course_id_string = self.kwargs.get('course_id')
if not course_id_string:
raise Http404('No course key provided.')
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
raise Http404(f'Invalid course key: {course_id_string}') # lint-amnesty, pylint: disable=raise-missing-from
course_module = modulestore().get_course(course_id)
if not course_module:
raise Http404(f'Course not found: {course_id}')
return course_module
| """
HTTP endpoints for the Teams API.
"""
import logging
from collections import Counter
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import PermissionDenied
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.http import Http404, HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django_countries import countries
from edx_rest_framework_extensions.paginators import DefaultPagination, paginate_search_results
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_framework import permissions, status
from rest_framework.authentication import SessionAuthentication
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from common.djangoapps.student.models import CourseAccessRole, CourseEnrollment
from common.djangoapps.util.model_utils import truncate_fields
from lms.djangoapps.courseware.courses import get_course_with_access, has_access
from lms.djangoapps.discussion.django_comment_client.utils import has_discussion_privileges
from lms.djangoapps.teams.models import CourseTeam, CourseTeamMembership
from openedx.core.lib.api.authentication import BearerAuthentication
from openedx.core.lib.api.parsers import MergePatchParser
from openedx.core.lib.api.permissions import IsCourseStaffInstructor, IsStaffOrReadOnly
from openedx.core.lib.api.view_utils import (
ExpandableFieldViewMixin,
RetrievePatchAPIView,
add_serializer_errors,
build_api_error
)
from openedx.core.lib.teams_config import TeamsetType
from xmodule.modulestore.django import modulestore
from . import is_feature_enabled
from .api import (
OrganizationProtectionStatus,
add_team_count,
can_user_create_team_in_topic,
can_user_modify_team,
get_assignments_for_team,
has_course_staff_privileges,
has_specific_team_access,
has_specific_teamset_access,
has_team_api_access,
user_organization_protection_status
)
from .csv import TeamMembershipImportManager, load_team_membership_csv
from .errors import AlreadyOnTeamInTeamset, ElasticSearchConnectionError, NotEnrolledInCourseForTeam
from .search_indexes import CourseTeamIndexer
from .serializers import (
BulkTeamCountTopicSerializer,
CourseTeamCreationSerializer,
CourseTeamSerializer,
MembershipSerializer,
TopicSerializer
)
from .toggles import are_team_submissions_enabled
from .utils import emit_team_event
TEAM_MEMBERSHIPS_PER_PAGE = 5
TOPICS_PER_PAGE = 12
MAXIMUM_SEARCH_SIZE = 10000
log = logging.getLogger(__name__)
@receiver(post_save, sender=CourseTeam)
def team_post_save_callback(sender, instance, **kwargs): # pylint: disable=unused-argument
""" Emits signal after the team is saved. """
changed_fields = instance.field_tracker.changed()
# Don't emit events when we are first creating the team.
if not kwargs['created']:
for field in changed_fields:
if field not in instance.FIELD_BLACKLIST:
truncated_fields = truncate_fields(
str(changed_fields[field]),
str(getattr(instance, field))
)
truncated_fields['team_id'] = instance.team_id
truncated_fields['team_id'] = instance.team_id
truncated_fields['field'] = field
emit_team_event(
'edx.team.changed',
instance.course_id,
truncated_fields
)
class TopicsPagination(DefaultPagination):
"""Paginate topics. """
page_size = TOPICS_PER_PAGE
class MyTeamsPagination(DefaultPagination):
"""Paginate the user's teams. """
page_size = TEAM_MEMBERSHIPS_PER_PAGE
class TeamsDashboardView(GenericAPIView):
"""
View methods related to the teams dashboard.
"""
def get(self, request, course_id):
"""
Renders the teams dashboard, which is shown on the "Teams" tab.
Raises a 404 if the course specified by course_id does not exist, the
user is not registered for the course, or the teams feature is not enabled.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if not CourseEnrollment.is_enrolled(request.user, course.id) and \
not has_access(request.user, 'staff', course, course.id):
raise Http404
user = request.user
# Even though sorting is done outside of the serializer, sort_order needs to be passed
# to the serializer so that the paginated results indicate how they were sorted.
sort_order = 'name'
topics = get_alphabetical_topics(course)
topics = _filter_hidden_private_teamsets(user, topics, course)
organization_protection_status = user_organization_protection_status(request.user, course_key)
# We have some frontend logic that needs to know if we have any open, public, or managed teamsets,
# and it's easier to just figure that out here when we have them all already
teamset_counts_by_type = Counter([topic['type'] for topic in topics])
# Paginate and serialize topic data
# BulkTeamCountPaginatedTopicSerializer will add team counts to the topics in a single
# bulk operation per page.
topics_data = self._serialize_and_paginate(
TopicsPagination,
topics,
request,
BulkTeamCountTopicSerializer,
{
'course_id': course.id,
'organization_protection_status': organization_protection_status
},
)
topics_data["sort_order"] = sort_order # pylint: disable=unsupported-assignment-operation
filter_query = {
'membership__user': user,
'course_id': course.id,
}
if organization_protection_status != OrganizationProtectionStatus.protection_exempt:
is_user_org_protected = organization_protection_status == OrganizationProtectionStatus.protected
filter_query['organization_protected'] = is_user_org_protected
user_teams = CourseTeam.objects.filter(**filter_query).order_by('-last_activity_at', 'team_size')
user_teams_data = self._serialize_and_paginate(
MyTeamsPagination,
user_teams,
request,
CourseTeamSerializer,
{'expand': ('user',)}
)
context = {
"course": course,
"topics": topics_data,
# It is necessary to pass both privileged and staff because only privileged users can
# administer discussion threads, but both privileged and staff users are allowed to create
# multiple teams (since they are not automatically added to teams upon creation).
"user_info": {
"username": user.username,
"privileged": has_discussion_privileges(user, course_key),
"staff": bool(has_access(user, 'staff', course_key)),
"teams": user_teams_data
},
"has_open_teamset": bool(teamset_counts_by_type[TeamsetType.open.value]),
"has_public_managed_teamset": bool(teamset_counts_by_type[TeamsetType.public_managed.value]),
"has_managed_teamset": bool(
teamset_counts_by_type[TeamsetType.public_managed.value] +
teamset_counts_by_type[TeamsetType.private_managed.value]
),
"topic_url": reverse(
'topics_detail', kwargs={'topic_id': 'topic_id', 'course_id': str(course_id)}, request=request
),
"topics_url": reverse('topics_list', request=request),
"teams_url": reverse('teams_list', request=request),
"teams_detail_url": reverse('teams_detail', args=['team_id']),
"team_memberships_url": reverse('team_membership_list', request=request),
"my_teams_url": reverse('teams_list', request=request),
"team_membership_detail_url": reverse('team_membership_detail', args=['team_id', user.username]),
"team_membership_management_url": reverse(
'team_membership_bulk_management', request=request, kwargs={'course_id': course_id}
),
"languages": [[lang[0], _(lang[1])] for lang in settings.ALL_LANGUAGES], # pylint: disable=translation-of-non-string
"countries": list(countries),
"disable_courseware_js": True,
"teams_base_url": reverse('teams_dashboard', request=request, kwargs={'course_id': course_id}),
}
# Assignments are feature-flagged
if are_team_submissions_enabled(course_key):
context["teams_assignments_url"] = reverse('teams_assignments_list', args=['team_id'])
return render(request, "teams/teams.html", context)
def _serialize_and_paginate(self, pagination_cls, queryset, request, serializer_cls, serializer_ctx):
"""
Serialize and paginate objects in a queryset.
Arguments:
pagination_cls (pagination.Paginator class): Django Rest Framework Paginator subclass.
queryset (QuerySet): Django queryset to serialize/paginate.
serializer_cls (serializers.Serializer class): Django Rest Framework Serializer subclass.
serializer_ctx (dict): Context dictionary to pass to the serializer
Returns: dict
"""
# Django Rest Framework v3 requires that we pass the request
# into the serializer's context if the serialize contains
# hyperlink fields.
serializer_ctx["request"] = request
# Instantiate the paginator and use it to paginate the queryset
paginator = pagination_cls()
page = paginator.paginate_queryset(queryset, request)
# Serialize the page
serializer = serializer_cls(page, context=serializer_ctx, many=True)
# Use the paginator to construct the response data
# This will use the pagination subclass for the view to add additional
# fields to the response.
# For example, if the input data is a list, the output data would
# be a dictionary with keys "count", "next", "previous", and "results"
# (where "results" is set to the value of the original list)
return paginator.get_paginated_response(serializer.data).data
class TeamsListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Get or create a course team.
**Example Requests**:
GET /api/team/v0/teams
POST /api/team/v0/teams
**Query Parameters for GET**
* course_id: Filters the result to teams belonging to the given
course. Required.
* topic_id: Filters the result to teams associated with the given
topic.
* text_search: Searches for full word matches on the name, description,
country, and language fields. NOTES: Search is on full names for countries
and languages, not the ISO codes. Text_search cannot be requested along with
with order_by.
* order_by: Cannot be called along with with text_search. Must be one of the following:
* name: Orders results by case insensitive team name (default).
* open_slots: Orders results by most open slots (for tie-breaking,
last_activity_at is used, with most recent first).
* last_activity_at: Orders result by team activity, with most active first
(for tie-breaking, open_slots is used, with most open slots first).
* username: Return teams whose membership contains the given user.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of teams matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the teams matching the request.
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is associated
with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* last_activity_at: The date of the last activity of any team member
within the team.
* membership: A list of the users that are members of the team.
See membership endpoint for more detail.
* organization_protected: Whether the team consists of organization-protected
learners
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course specified by course_id or
is not course or global staff, a 403 error is returned.
If the specified course_id is not valid or the user attempts to
use an unsupported query parameter, a 400 error is returned.
If the response does not exist, a 404 error is returned. For
example, the course_id may not reference a real course or the page
number may be beyond the last page.
If the server is unable to connect to Elasticsearch, and
the text_search parameter is supplied, a 503 error is returned.
If the requesting user is a learner, the learner would only see organization
protected set of teams if the learner is enrolled in a degree bearing institution.
Otherwise, the learner will only see organization unprotected set of teams.
**Response Values for POST**
Any logged in user who has verified their email address can create
a team in an open teamset. The format mirrors that of a GET for an individual team,
but does not include the id, date_created, or membership fields.
id is automatically computed based on name.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course, is not course or
global staff, or does not have discussion privileges a 403 error
is returned.
If the course_id is not valid, or the topic_id is missing, or extra fields
are included in the request, a 400 error is returned.
If the specified course does not exist, a 404 error is returned.
If the specified teamset does not exist, a 404 error is returned.
If the specified teamset does exist, but the requesting user shouldn't be
able to see it, a 404 is returned.
"""
# BearerAuthentication must come first to return a 401 for unauthenticated users
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = CourseTeamSerializer
def get(self, request):
"""GET /api/team/v0/teams/"""
result_filter = {}
if 'course_id' not in request.query_params:
return Response(
build_api_error(ugettext_noop("course_id must be provided")),
status=status.HTTP_400_BAD_REQUEST
)
course_id_string = request.query_params['course_id']
try:
course_key = CourseKey.from_string(course_id_string)
course_module = modulestore().get_course(course_key)
except InvalidKeyError:
error = build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string,
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
# Ensure the course exists
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
result_filter.update({'course_id': course_key})
if not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
text_search = request.query_params.get('text_search', None)
if text_search and request.query_params.get('order_by', None):
return Response(
build_api_error(ugettext_noop("text_search and order_by cannot be provided together")),
status=status.HTTP_400_BAD_REQUEST
)
username = request.query_params.get('username', None)
if username is not None:
result_filter.update({'membership__user__username': username})
topic_id = request.query_params.get('topic_id', None)
if topic_id is not None:
if topic_id not in course_module.teamsets_by_id:
error = build_api_error(
ugettext_noop('The supplied topic id {topic_id} is not valid'),
topic_id=topic_id
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
result_filter.update({'topic_id': topic_id})
organization_protection_status = user_organization_protection_status(
request.user, course_key
)
if not organization_protection_status.is_exempt:
result_filter.update({
'organization_protected': organization_protection_status.is_protected
})
if text_search and CourseTeamIndexer.search_is_enabled():
try:
search_engine = CourseTeamIndexer.engine()
except ElasticSearchConnectionError:
return Response(
build_api_error(ugettext_noop('Error connecting to elasticsearch')),
status=status.HTTP_503_SERVICE_UNAVAILABLE
)
result_filter.update({'course_id': course_id_string})
search_results = search_engine.search(
query_string=text_search,
field_dictionary=result_filter,
size=MAXIMUM_SEARCH_SIZE,
)
# We need to manually exclude some potential private_managed teams from results, because
# it doesn't appear that the search supports "field__in" style lookups
# Non-staff users should not be able to see private_managed teams that they are not on.
# Staff shouldn't have any excluded teams.
excluded_private_team_ids = self._get_private_team_ids_to_exclude(course_module)
search_results['results'] = [
result for result in search_results['results']
if result['data']['id'] not in excluded_private_team_ids
]
search_results['total'] = len(search_results['results'])
paginated_results = paginate_search_results(
CourseTeam,
search_results,
self.paginator.get_page_size(request),
self.get_page()
)
emit_team_event('edx.team.searched', course_key, {
"number_of_results": search_results['total'],
"search_text": text_search,
"topic_id": topic_id,
})
page = self.paginate_queryset(paginated_results)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
ordering_schemes = {
'name': ('name',), # MySQL does case-insensitive order_by
'open_slots': ('team_size', '-last_activity_at'),
'last_activity_at': ('-last_activity_at', 'team_size'),
}
# hide private_managed courses from non-staff users that aren't members of those teams
excluded_private_team_ids = self._get_private_team_ids_to_exclude(course_module)
queryset = CourseTeam.objects.filter(**result_filter).exclude(team_id__in=excluded_private_team_ids)
order_by_input = request.query_params.get('order_by', 'name')
if order_by_input not in ordering_schemes:
return Response(
{
'developer_message': "unsupported order_by value {ordering}".format(
ordering=order_by_input,
),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _("The ordering {ordering} is not supported").format(
ordering=order_by_input,
),
},
status=status.HTTP_400_BAD_REQUEST,
)
queryset = queryset.order_by(*ordering_schemes[order_by_input])
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
response = self.get_paginated_response(serializer.data)
response.data['sort_order'] = order_by_input
return response
def post(self, request):
"""POST /api/team/v0/teams/"""
field_errors = {}
course_key = None
course_id = request.data.get('course_id')
#Handle field errors and check that the course exists
try:
course_key = CourseKey.from_string(course_id)
# Ensure the course exists
course_module = modulestore().get_course(course_key)
if not course_module:
return Response(status=status.HTTP_404_NOT_FOUND)
except InvalidKeyError:
field_errors['course_id'] = build_api_error(
ugettext_noop('The supplied course_id {course_id} is not valid.'),
course_id=course_id
)
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
topic_id = request.data.get('topic_id')
if not topic_id:
field_errors['topic_id'] = build_api_error(
ugettext_noop('topic_id is required'),
course_id=course_id
)
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
if course_key and not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
if topic_id not in course_module.teams_configuration.teamsets_by_id or (
not has_specific_teamset_access(request.user, course_module, topic_id)
):
return Response(status=status.HTTP_404_NOT_FOUND)
# The user has to have access to this teamset at this point, so we can return 403
# and not leak the existance of a private teamset
if not can_user_create_team_in_topic(request.user, course_key, topic_id):
return Response(
build_api_error(ugettext_noop("You can't create a team in an instructor managed topic.")),
status=status.HTTP_403_FORBIDDEN
)
# Course and global staff, as well as discussion "privileged" users, will not automatically
# be added to a team when they create it. They are allowed to create multiple teams.
is_team_administrator = (has_access(request.user, 'staff', course_key)
or has_discussion_privileges(request.user, course_key))
if not is_team_administrator and (
CourseTeamMembership.user_in_team_for_teamset(request.user, course_key, topic_id=topic_id)
):
error_message = build_api_error(
ugettext_noop('You are already in a team in this teamset.'),
course_id=course_id,
teamset_id=topic_id,
)
return Response(error_message, status=status.HTTP_400_BAD_REQUEST)
data = request.data.copy()
data['course_id'] = str(course_key)
organization_protection_status = user_organization_protection_status(request.user, course_key)
if organization_protection_status != OrganizationProtectionStatus.protection_exempt:
data['organization_protected'] = organization_protection_status == OrganizationProtectionStatus.protected
serializer = CourseTeamCreationSerializer(data=data)
add_serializer_errors(serializer, data, field_errors)
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
else:
team = serializer.save()
emit_team_event('edx.team.created', course_key, {
'team_id': team.team_id
})
if not is_team_administrator:
# Add the creating user to the team.
team.add_user(request.user)
emit_team_event(
'edx.team.learner_added',
course_key,
{
'team_id': team.team_id,
'user_id': request.user.id,
'add_method': 'added_on_create'
}
)
data = CourseTeamSerializer(team, context={"request": request}).data
return Response(data)
def get_page(self):
""" Returns page number specified in args, params, or defaults to 1. """
# This code is taken from within the GenericAPIView#paginate_queryset method.
# We need need access to the page outside of that method for our paginate_search_results method
page_kwarg = self.kwargs.get(self.paginator.page_query_param)
page_query_param = self.request.query_params.get(self.paginator.page_query_param)
return page_kwarg or page_query_param or 1
def _get_private_team_ids_to_exclude(self, course_module):
"""
Get the list of team ids that should be excluded from the response.
Staff can see all private teams.
Users should not be able to see teams in private teamsets that they are not a member of.
"""
if has_access(self.request.user, 'staff', course_module.id):
return set()
private_teamset_ids = [ts.teamset_id for ts in course_module.teamsets if ts.is_private_managed]
excluded_team_ids = CourseTeam.objects.filter(
course_id=course_module.id,
topic_id__in=private_teamset_ids
).exclude(
membership__user=self.request.user
).values_list('team_id', flat=True)
return set(excluded_team_ids)
class IsEnrolledOrIsStaff(permissions.BasePermission):
"""Permission that checks to see if the user is enrolled in the course or is staff."""
def has_object_permission(self, request, view, obj):
"""Returns true if the user is enrolled or is staff."""
return has_team_api_access(request.user, obj.course_id)
class IsStaffOrPrivilegedOrReadOnly(IsStaffOrReadOnly):
"""
Permission that checks to see if the user is global staff, course
staff, course admin, or has discussion privileges. If none of those conditions are
met, only read access will be granted.
"""
def has_object_permission(self, request, view, obj):
return (
has_discussion_privileges(request.user, obj.course_id) or
IsCourseStaffInstructor.has_object_permission(self, request, view, obj) or
super().has_object_permission(request, view, obj)
)
class HasSpecificTeamAccess(permissions.BasePermission):
"""
Permission that checks if the user has access to a specific team.
If the user doesn't have access to the team, the endpoint should behave as if
the team does not exist,
"""
def has_object_permission(self, request, view, obj):
if not has_specific_team_access(request.user, obj):
raise Http404
return True
class TeamsDetailView(ExpandableFieldViewMixin, RetrievePatchAPIView):
"""
**Use Cases**
Get, update, or delete a course team's information. Updates are supported
only through merge patch.
**Example Requests**:
GET /api/team/v0/teams/{team_id}}
PATCH /api/team/v0/teams/{team_id} "application/merge-patch+json"
DELETE /api/team/v0/teams/{team_id}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in, the response contains the following fields:
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is
associated with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* membership: A list of the users that are members of the team. See
membership endpoint for more detail.
* last_activity_at: The date of the last activity of any team member
within the team.
* organization_protected: Whether the team consists of organization-protected
learners
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not course or global staff, a 403 error is returned.
If the specified team does not exist, a 404 error is returned.
**Response Values for PATCH**
Only staff can patch teams.
If the user is anonymous or inactive, a 401 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
If the user is not course or global staff, does not have discussion
privileges, and the team does exist, a 403 is returned.
If "application/merge-patch+json" is not the specified content type,
a 415 error is returned.
If the update could not be completed due to validation errors, this
method returns a 400 error with all error messages in the
"field_errors" field of the returned JSON.
**Response Values for DELETE**
Only staff can delete teams. When a team is deleted, all
team memberships associated with that team are also
deleted. Returns 204 on successful deletion.
If the user is anonymous or inactive, a 401 is returned.
If the user is not course or global staff and does not
have discussion privileges, a 403 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (
permissions.IsAuthenticated,
IsEnrolledOrIsStaff,
HasSpecificTeamAccess,
IsStaffOrPrivilegedOrReadOnly,
)
lookup_field = 'team_id'
serializer_class = CourseTeamSerializer
parser_classes = (MergePatchParser,)
def get_queryset(self):
"""Returns the queryset used to access the given team."""
return CourseTeam.objects.all()
def delete(self, request, team_id):
"""DELETE /api/team/v0/teams/{team_id}"""
team = get_object_or_404(CourseTeam, team_id=team_id)
self.check_object_permissions(request, team)
# Note: list() forces the queryset to be evualuated before delete()
memberships = list(CourseTeamMembership.get_memberships(team_ids=[team_id]))
# Note: also deletes all team memberships associated with this team
team.delete()
log.info('user %d deleted team %s', request.user.id, team_id)
emit_team_event('edx.team.deleted', team.course_id, {
'team_id': team_id,
})
for member in memberships:
emit_team_event('edx.team.learner_removed', team.course_id, {
'team_id': team_id,
'remove_method': 'team_deleted',
'user_id': member.user_id
})
return Response(status=status.HTTP_204_NO_CONTENT)
class TeamsAssignmentsView(GenericAPIView):
"""
**Use Cases**
Get a team's assignments
**Example Requests**:
GET /api/team/v0/teams/{team_id}/assignments
**Response Values for GET**
If the user is logged in, the response is an array of the following data strcuture:
* display_name: The name of the assignment to display (currently the Unit title)
* location: The jump link to a specific assignments
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If team assignments are not enabled for course, a 503 is returned.
If the user is not logged in, a 401 error is returned.
If the user is unenrolled or does not have API access, a 403 error is returned.
If the supplied course/team is bad or the user is not permitted to
search in a protected team, a 404 error is returned as if the team does not exist.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (
permissions.IsAuthenticated,
IsEnrolledOrIsStaff,
HasSpecificTeamAccess,
IsStaffOrPrivilegedOrReadOnly,
)
def get(self, request, team_id):
"""GET v0/teams/{team_id_pattern}/assignments"""
course_team = get_object_or_404(CourseTeam, team_id=team_id)
user = request.user
course_id = course_team.course_id
if not are_team_submissions_enabled(course_id):
return Response(status=status.HTTP_503_SERVICE_UNAVAILABLE)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
if not has_specific_team_access(user, course_team):
return Response(status=status.HTTP_404_NOT_FOUND)
teamset_ora_blocks = get_assignments_for_team(user, course_team)
# Serialize info for display
assignments = [{
'display_name': self._display_name_for_ora_block(block),
'location': self._jump_location_for_block(course_id, block.location)
} for block in teamset_ora_blocks]
return Response(assignments)
def _display_name_for_ora_block(self, block):
""" Get the unit name where the ORA is located for better display naming """
unit = modulestore().get_item(block.parent)
section = modulestore().get_item(unit.parent)
return "{section}: {unit}".format(
section=section.display_name,
unit=unit.display_name
)
def _jump_location_for_block(self, course_id, location):
""" Get the URL for jumping to a designated XBlock in a course """
return reverse('jump_to', kwargs={'course_id': str(course_id), 'location': str(location)})
class TopicListView(GenericAPIView):
"""
**Use Cases**
Retrieve a list of topics associated with a single course.
**Example Requests**
GET /api/team/v0/topics/?course_id={course_id}
**Query Parameters for GET**
* course_id: Filters the result to topics belonging to the given
course (required).
* order_by: Orders the results. Currently only 'name' and 'team_count' are supported;
the default value is 'name'. If 'team_count' is specified, topics are returned first sorted
by number of teams per topic (descending), with a secondary sort of 'name'.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the course_id is not given or an unsupported value is passed for
order_by, returns a 400 error.
If the user is not logged in, is not enrolled in the course, or is
not course or global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following
fields:
* count: The total number of topics matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the topics matching the request.
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
* team_count: Number of teams created under the topic. If the requesting user
is enrolled into a degree bearing institution, the count only include the teams
with organization_protected attribute true. If the requesting user is not
affiliated with any institutions, the teams included in the count would only be
those teams whose members are outside of institutions affliation.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
pagination_class = TopicsPagination
queryset = []
def get(self, request):
"""GET /api/team/v0/topics/?course_id={course_id}"""
course_id_string = request.query_params.get('course_id', None)
if course_id_string is None:
return Response({
'field_errors': {
'course_id': build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string
)
}
}, status=status.HTTP_400_BAD_REQUEST)
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None: # course is None if not found
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
ordering = request.query_params.get('order_by', 'name')
if ordering not in ['name', 'team_count']:
return Response({
'developer_message': f"unsupported order_by value {ordering}",
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _("The ordering {ordering} is not supported").format(ordering=ordering),
}, status=status.HTTP_400_BAD_REQUEST)
# Always sort alphabetically, as it will be used as secondary sort
# in the case of "team_count".
organization_protection_status = user_organization_protection_status(request.user, course_id)
topics = get_alphabetical_topics(course_module)
topics = _filter_hidden_private_teamsets(request.user, topics, course_module)
if ordering == 'team_count':
add_team_count(request.user, topics, course_id, organization_protection_status)
topics.sort(key=lambda t: t['team_count'], reverse=True)
page = self.paginate_queryset(topics)
serializer = TopicSerializer(
page,
context={'course_id': course_id, 'user': request.user},
many=True,
)
else:
page = self.paginate_queryset(topics)
# Use the serializer that adds team_count in a bulk operation per page.
serializer = BulkTeamCountTopicSerializer(
page,
context={
'request': request,
'course_id': course_id,
'organization_protection_status': organization_protection_status
},
many=True
)
response = self.get_paginated_response(serializer.data)
response.data['sort_order'] = ordering
return response
def _filter_hidden_private_teamsets(user, teamsets, course_module):
"""
Return a filtered list of teamsets, removing any private teamsets that a user doesn't have access to.
Follows the same logic as `has_specific_teamset_access` but in bulk rather than for one teamset at a time
"""
if has_course_staff_privileges(user, course_module.id):
return teamsets
private_teamset_ids = [teamset.teamset_id for teamset in course_module.teamsets if teamset.is_private_managed]
teamset_ids_user_has_access_to = set(
CourseTeam.objects.filter(
course_id=course_module.id,
topic_id__in=private_teamset_ids,
membership__user=user
).values_list('topic_id', flat=True)
)
return [
teamset for teamset in teamsets
if teamset['type'] != TeamsetType.private_managed.value or teamset['id'] in teamset_ids_user_has_access_to
]
def get_alphabetical_topics(course_module):
"""Return a list of team topics sorted alphabetically.
Arguments:
course_module (xmodule): the course which owns the team topics
Returns:
list: a list of sorted team topics
"""
return sorted(
course_module.teams_configuration.cleaned_data['team_sets'],
key=lambda t: t['name'].lower(),
)
class TopicDetailView(APIView):
"""
**Use Cases**
Retrieve a single topic from a course.
**Example Requests**
GET /api/team/v0/topics/{topic_id},{course_id}
**Query Parameters for GET**
* topic_id: The ID of the topic to retrieve (required).
* course_id: The ID of the course to retrieve the topic from
(required).
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the topic_id course_id are not given or an unsupported value is
passed for order_by, returns a 400 error.
If the user is not enrolled in the course, or is not course or
global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following fields:
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
* team_count: Number of teams created under the topic. If the requesting user
is enrolled into a degree bearing institution, the count only include the teams
with organization_protected attribute true. If the requesting user is not
affiliated with any institutions, the teams included in the count would only be
those teams whose members are outside of institutions affliation.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, topic_id, course_id):
"""GET /api/team/v0/topics/{topic_id},{course_id}/"""
try:
course_id = CourseKey.from_string(course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
try:
topic = course_module.teamsets_by_id[topic_id]
except KeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_teamset_access(request.user, course_module, topic_id):
return Response(status=status.HTTP_404_NOT_FOUND)
organization_protection_status = user_organization_protection_status(request.user, course_id)
serializer = TopicSerializer(
topic.cleaned_data,
context={
'course_id': course_id,
'organization_protection_status': organization_protection_status,
'user': request.user
}
)
return Response(serializer.data)
class MembershipListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
List teamset team memberships or add a user to a teamset.
**Example Requests**:
GET /api/team/v0/team_membership
POST /api/team/v0/team_membership
**Query Parameters for GET**
At least one of username and team_id must be provided.
* username: Returns membership records only for the specified user.
If the requesting user is not staff then only memberships for
teams associated with courses in which the requesting user is
enrolled are returned.
* team_id: Returns only membership records associated with the
specified team. The requesting user must be staff or enrolled in
the course associated with the team.
* teamset_id: Returns membership records only for the specified teamset.
if teamset_id is specified, course_id must also be specified.
teamset_id and team_id are mutually exclusive. For open and public_managed
teamsets, the user must be staff or enrolled in the course. For
private_managed teamsets, the user must be course staff, or a member of the
specified teamset.
* course_id: Returns membership records only for the specified
course. Username must have access to this course, or else team_id
must be in this course.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of memberships matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the memberships matching the request.
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of the user
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If neither team_id nor username are provided, a 400 error is
returned.
If team_id is provided but the team does not exist, a 404 error is
returned.
If the specified course_id is invalid, a 404 error is returned.
This endpoint uses 404 error codes to avoid leaking information
about team or user existence. Specifically, a 404 error will be
returned if a logged in user specifies a team_id for a course
they are not enrolled in.
Additionally, when username is specified the list of returned
memberships will be filtered to memberships in teams associated
with courses that the requesting user is enrolled in.
If the course specified by course_id does not contain the team
specified by team_id, a 400 error is returned.
If the user is not enrolled in the course specified by course_id,
and does not have staff access to the course, a 400 error is
returned.
**Response Values for POST**
Any logged in user enrolled in a course can enroll themselves in a
team in the course. Course staff, global staff, and discussion
privileged users can enroll any user in a team, with a few
exceptions noted below.
If the user is not logged in and active, a 401 error is returned.
If username and team are not provided in the posted JSON, a 400
error is returned describing the missing fields.
If the specified team does not exist, a 404 error is returned.
If the user is not staff, does not have discussion privileges,
and is not enrolled in the course associated with the team they
are trying to join, or if they are trying to add a user other
than themselves to a team, a 404 error is returned. This is to
prevent leaking information about the existence of teams and users.
If the specified user does not exist, a 404 error is returned.
If the user is already a member of a team in the course associated
with the team they are trying to join, a 400 error is returned.
This applies to both staff and students.
If the user is not enrolled in the course associated with the team
they are trying to join, a 400 error is returned. This can occur
when a staff or discussion privileged user posts a request adding
another user to a team.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get(self, request): # lint-amnesty, pylint: disable=too-many-statements
"""GET /api/team/v0/team_membership"""
specified_username_or_team = False
username = None
team_ids = None
requested_course_id = None
requested_course_key = None
accessible_course_ids = None
if 'course_id' in request.query_params:
requested_course_id = request.query_params['course_id']
try:
requested_course_key = CourseKey.from_string(requested_course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
if 'team_id' in request.query_params and 'teamset_id' in request.query_params:
return Response(
build_api_error(ugettext_noop("teamset_id and team_id are mutually exclusive options.")),
status=status.HTTP_400_BAD_REQUEST
)
elif 'team_id' in request.query_params:
specified_username_or_team = True
team_id = request.query_params['team_id']
try:
team = CourseTeam.objects.get(team_id=team_id)
team_ids = [team.team_id]
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if requested_course_key is not None and requested_course_key != team.course_id:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_team_access(request.user, team):
return Response(status=status.HTTP_403_FORBIDDEN)
elif 'teamset_id' in request.query_params:
if 'course_id' not in request.query_params:
return Response(
build_api_error(ugettext_noop("teamset_id requires course_id to also be provided.")),
status=status.HTTP_400_BAD_REQUEST
)
if not has_team_api_access(request.user, requested_course_key):
return Response(status=status.HTTP_404_NOT_FOUND)
course_module = modulestore().get_course(requested_course_key)
if not course_module:
return Response(status=status.HTTP_404_NOT_FOUND)
specified_username_or_team = True
teamsets = course_module.teams_configuration.teamsets_by_id
teamset_id = request.query_params['teamset_id']
teamset = teamsets.get(teamset_id, None)
if not teamset:
return Response(
build_api_error(ugettext_noop("No teamset found in given course with given id")),
status=status.HTTP_404_NOT_FOUND
)
teamset_teams = CourseTeam.objects.filter(course_id=requested_course_key, topic_id=teamset_id)
if has_course_staff_privileges(request.user, requested_course_key):
teams_with_access = list(teamset_teams)
else:
teams_with_access = [
team for team in teamset_teams
if has_specific_team_access(request.user, team)
]
if teamset.is_private_managed and not teams_with_access:
return Response(
build_api_error(ugettext_noop("No teamset found in given course with given id")),
status=status.HTTP_404_NOT_FOUND
)
team_ids = [team.team_id for team in teams_with_access]
if 'username' in request.query_params:
specified_username_or_team = True
username = request.query_params['username']
if not request.user.is_staff:
enrolled_courses = (
CourseEnrollment.enrollments_for_user(request.user).values_list('course_id', flat=True)
)
staff_courses = (
CourseAccessRole.objects.filter(user=request.user, role='staff').values_list('course_id', flat=True)
)
accessible_course_ids = [item for sublist in (enrolled_courses, staff_courses) for item in sublist]
if requested_course_id is not None and requested_course_key not in accessible_course_ids:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not specified_username_or_team:
return Response(
build_api_error(ugettext_noop("username or (team_id or teamset_id) must be specified.")),
status=status.HTTP_400_BAD_REQUEST
)
course_keys = None
if requested_course_key is not None:
course_keys = [requested_course_key]
elif accessible_course_ids is not None:
course_keys = accessible_course_ids
queryset = CourseTeamMembership.get_memberships(username, course_keys, team_ids)
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def post(self, request):
"""POST /api/team/v0/team_membership"""
field_errors = {}
if 'username' not in request.data:
field_errors['username'] = build_api_error(ugettext_noop("Username is required."))
if 'team_id' not in request.data:
field_errors['team_id'] = build_api_error(ugettext_noop("Team id is required."))
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
try:
team = CourseTeam.objects.get(team_id=request.data['team_id'])
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
username = request.data['username']
if not has_team_api_access(request.user, team.course_id, access_username=username):
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_team_access(request.user, team):
return Response(status=status.HTTP_404_NOT_FOUND)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
course_module = modulestore().get_course(team.course_id)
# This should use `calc_max_team_size` instead of `default_max_team_size` (TODO MST-32).
max_team_size = course_module.teams_configuration.default_max_team_size
if max_team_size is not None and team.users.count() >= max_team_size:
return Response(
build_api_error(ugettext_noop("This team is already full.")),
status=status.HTTP_400_BAD_REQUEST
)
if not can_user_modify_team(request.user, team):
return Response(
build_api_error(ugettext_noop("You can't join an instructor managed team.")),
status=status.HTTP_403_FORBIDDEN
)
try:
membership = team.add_user(user)
emit_team_event(
'edx.team.learner_added',
team.course_id,
{
'team_id': team.team_id,
'user_id': user.id,
'add_method': 'joined_from_team_view' if user == request.user else 'added_by_another_user'
}
)
except AlreadyOnTeamInTeamset:
return Response(
build_api_error(
ugettext_noop("The user {username} is already a member of a team in this teamset."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
except NotEnrolledInCourseForTeam:
return Response(
build_api_error(
ugettext_noop("The user {username} is not enrolled in the course associated with this team."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
class MembershipDetailView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Gets individual course team memberships or removes a user from a course team.
**Example Requests**:
GET /api/team/v0/team_membership/{team_id},{username}
DELETE /api/team/v0/team_membership/{team_id},{username}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, or is course or global staff
the response contains:
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If specified team does not exist, a 404 error is returned.
If the user is logged in but is not enrolled in the course
associated with the specified team, or is not staff, a 404 error is
returned. This avoids leaking information about course or team
existence.
If the membership does not exist, a 404 error is returned.
**Response Values for DELETE**
Any logged in user enrolled in a course can remove themselves from
a team in the course. Course staff, global staff, and discussion
privileged users can remove any user from a team. Successfully
deleting a membership will return a 204 response with no content.
If the user is not logged in and active, a 401 error is returned.
If the specified team or username does not exist, a 404 error is
returned.
If the user is not staff or a discussion privileged user and is
attempting to remove another user from a team, a 404 error is
returned. This prevents leaking information about team and user
existence.
If the membership does not exist, a 404 error is returned.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get_team(self, team_id):
"""Returns the team with team_id, or throws Http404 if it does not exist."""
try:
return CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
def get_membership(self, username, team):
"""Returns the membership for the given user and team, or throws Http404 if it does not exist."""
try:
return CourseTeamMembership.objects.get(user__username=username, team=team)
except CourseTeamMembership.DoesNotExist:
raise Http404 # lint-amnesty, pylint: disable=raise-missing-from
def get(self, request, team_id, username):
"""GET /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_team_access(request.user, team):
return Response(status=status.HTTP_404_NOT_FOUND)
membership = self.get_membership(username, team)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
def delete(self, request, team_id, username):
"""DELETE /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if not has_team_api_access(request.user, team.course_id, access_username=username):
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_specific_team_access(request.user, team):
return Response(status=status.HTTP_404_NOT_FOUND)
if not can_user_modify_team(request.user, team):
return Response(
build_api_error(ugettext_noop("You can't leave an instructor managed team.")),
status=status.HTTP_403_FORBIDDEN
)
membership = self.get_membership(username, team)
removal_method = 'self_removal'
if 'admin' in request.query_params:
removal_method = 'removed_by_admin'
membership.delete()
emit_team_event(
'edx.team.learner_removed',
team.course_id,
{
'team_id': team.team_id,
'user_id': membership.user.id,
'remove_method': removal_method
}
)
return Response(status=status.HTTP_204_NO_CONTENT)
class MembershipBulkManagementView(GenericAPIView):
"""
View for uploading and downloading team membership CSVs.
"""
authentication_classes = (BearerAuthentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, **_kwargs):
"""
Download CSV with team membership data for given course run.
"""
self.check_access()
response = HttpResponse(content_type='text/csv')
filename = "team-membership_{}_{}_{}.csv".format(
self.course.id.org, self.course.id.course, self.course.id.run
)
response['Content-Disposition'] = f'attachment; filename="{filename}"'
load_team_membership_csv(self.course, response)
return response
def post(self, request, **_kwargs):
"""
Process uploaded CSV to modify team memberships for given course run.
"""
self.check_access()
inputfile_handle = request.FILES['csv']
team_import_manager = TeamMembershipImportManager(self.course)
team_import_manager.set_team_membership_from_csv(inputfile_handle)
if team_import_manager.import_succeeded:
msg = f"{team_import_manager.number_of_learners_assigned} learners were affected."
return JsonResponse({'message': msg}, status=status.HTTP_201_CREATED)
else:
return JsonResponse({
'errors': team_import_manager.validation_errors
}, status=status.HTTP_400_BAD_REQUEST)
def check_access(self):
"""
Raises 403 if user does not have access to this endpoint.
"""
if not has_course_staff_privileges(self.request.user, self.course.id):
raise PermissionDenied(
"To manage team membership of {}, you must be course staff.".format(
self.course.id
)
)
@cached_property
def course(self):
"""
Return a CourseBlock based on the `course_id` kwarg.
If invalid or not found, raise 404.
"""
course_id_string = self.kwargs.get('course_id')
if not course_id_string:
raise Http404('No course key provided.')
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
raise Http404(f'Invalid course key: {course_id_string}') # lint-amnesty, pylint: disable=raise-missing-from
course_module = modulestore().get_course(course_id)
if not course_module:
raise Http404(f'Course not found: {course_id}')
return course_module | en | 0.887511 | HTTP endpoints for the Teams API. # lint-amnesty, pylint: disable=imported-auth-user # pylint: disable=unused-argument Emits signal after the team is saved. # Don't emit events when we are first creating the team. Paginate topics. Paginate the user's teams. View methods related to the teams dashboard. Renders the teams dashboard, which is shown on the "Teams" tab. Raises a 404 if the course specified by course_id does not exist, the user is not registered for the course, or the teams feature is not enabled. # Even though sorting is done outside of the serializer, sort_order needs to be passed # to the serializer so that the paginated results indicate how they were sorted. # We have some frontend logic that needs to know if we have any open, public, or managed teamsets, # and it's easier to just figure that out here when we have them all already # Paginate and serialize topic data # BulkTeamCountPaginatedTopicSerializer will add team counts to the topics in a single # bulk operation per page. # pylint: disable=unsupported-assignment-operation # It is necessary to pass both privileged and staff because only privileged users can # administer discussion threads, but both privileged and staff users are allowed to create # multiple teams (since they are not automatically added to teams upon creation). # pylint: disable=translation-of-non-string # Assignments are feature-flagged Serialize and paginate objects in a queryset. Arguments: pagination_cls (pagination.Paginator class): Django Rest Framework Paginator subclass. queryset (QuerySet): Django queryset to serialize/paginate. serializer_cls (serializers.Serializer class): Django Rest Framework Serializer subclass. serializer_ctx (dict): Context dictionary to pass to the serializer Returns: dict # Django Rest Framework v3 requires that we pass the request # into the serializer's context if the serialize contains # hyperlink fields. # Instantiate the paginator and use it to paginate the queryset # Serialize the page # Use the paginator to construct the response data # This will use the pagination subclass for the view to add additional # fields to the response. # For example, if the input data is a list, the output data would # be a dictionary with keys "count", "next", "previous", and "results" # (where "results" is set to the value of the original list) **Use Cases** Get or create a course team. **Example Requests**: GET /api/team/v0/teams POST /api/team/v0/teams **Query Parameters for GET** * course_id: Filters the result to teams belonging to the given course. Required. * topic_id: Filters the result to teams associated with the given topic. * text_search: Searches for full word matches on the name, description, country, and language fields. NOTES: Search is on full names for countries and languages, not the ISO codes. Text_search cannot be requested along with with order_by. * order_by: Cannot be called along with with text_search. Must be one of the following: * name: Orders results by case insensitive team name (default). * open_slots: Orders results by most open slots (for tie-breaking, last_activity_at is used, with most recent first). * last_activity_at: Orders result by team activity, with most active first (for tie-breaking, open_slots is used, with most open slots first). * username: Return teams whose membership contains the given user. * page_size: Number of results to return per page. * page: Page number to retrieve. * expand: Comma separated list of types for which to return expanded representations. Supports "user" and "team". **Response Values for GET** If the user is logged in and enrolled, the response contains: * count: The total number of teams matching the request. * next: The URL to the next page of results, or null if this is the last page. * previous: The URL to the previous page of results, or null if this is the first page. * num_pages: The total number of pages in the result. * results: A list of the teams matching the request. * id: The team's unique identifier. * discussion_topic_id: The unique id of the comments service discussion topic associated with this team. * name: The name of the team. * course_id: The identifier for the course this team belongs to. * topic_id: Optionally specifies which topic the team is associated with. * date_created: Date and time when the team was created. * description: A description of the team. * country: Optionally specifies which country the team is associated with. * language: Optionally specifies which language the team is associated with. * last_activity_at: The date of the last activity of any team member within the team. * membership: A list of the users that are members of the team. See membership endpoint for more detail. * organization_protected: Whether the team consists of organization-protected learners For all text fields, clients rendering the values should take care to HTML escape them to avoid script injections, as the data is stored exactly as specified. The intention is that plain text is supported, not HTML. If the user is not logged in, a 401 error is returned. If the user is not enrolled in the course specified by course_id or is not course or global staff, a 403 error is returned. If the specified course_id is not valid or the user attempts to use an unsupported query parameter, a 400 error is returned. If the response does not exist, a 404 error is returned. For example, the course_id may not reference a real course or the page number may be beyond the last page. If the server is unable to connect to Elasticsearch, and the text_search parameter is supplied, a 503 error is returned. If the requesting user is a learner, the learner would only see organization protected set of teams if the learner is enrolled in a degree bearing institution. Otherwise, the learner will only see organization unprotected set of teams. **Response Values for POST** Any logged in user who has verified their email address can create a team in an open teamset. The format mirrors that of a GET for an individual team, but does not include the id, date_created, or membership fields. id is automatically computed based on name. If the user is not logged in, a 401 error is returned. If the user is not enrolled in the course, is not course or global staff, or does not have discussion privileges a 403 error is returned. If the course_id is not valid, or the topic_id is missing, or extra fields are included in the request, a 400 error is returned. If the specified course does not exist, a 404 error is returned. If the specified teamset does not exist, a 404 error is returned. If the specified teamset does exist, but the requesting user shouldn't be able to see it, a 404 is returned. # BearerAuthentication must come first to return a 401 for unauthenticated users GET /api/team/v0/teams/ # Ensure the course exists # We need to manually exclude some potential private_managed teams from results, because # it doesn't appear that the search supports "field__in" style lookups # Non-staff users should not be able to see private_managed teams that they are not on. # Staff shouldn't have any excluded teams. # MySQL does case-insensitive order_by # hide private_managed courses from non-staff users that aren't members of those teams # Translators: 'ordering' is a string describing a way # of ordering a list. For example, {ordering} may be # 'name', indicating that the user wants to sort the # list by lower case name. POST /api/team/v0/teams/ #Handle field errors and check that the course exists # Ensure the course exists # The user has to have access to this teamset at this point, so we can return 403 # and not leak the existance of a private teamset # Course and global staff, as well as discussion "privileged" users, will not automatically # be added to a team when they create it. They are allowed to create multiple teams. # Add the creating user to the team. Returns page number specified in args, params, or defaults to 1. # This code is taken from within the GenericAPIView#paginate_queryset method. # We need need access to the page outside of that method for our paginate_search_results method Get the list of team ids that should be excluded from the response. Staff can see all private teams. Users should not be able to see teams in private teamsets that they are not a member of. Permission that checks to see if the user is enrolled in the course or is staff. Returns true if the user is enrolled or is staff. Permission that checks to see if the user is global staff, course staff, course admin, or has discussion privileges. If none of those conditions are met, only read access will be granted. Permission that checks if the user has access to a specific team. If the user doesn't have access to the team, the endpoint should behave as if the team does not exist, **Use Cases** Get, update, or delete a course team's information. Updates are supported only through merge patch. **Example Requests**: GET /api/team/v0/teams/{team_id}} PATCH /api/team/v0/teams/{team_id} "application/merge-patch+json" DELETE /api/team/v0/teams/{team_id} **Query Parameters for GET** * expand: Comma separated list of types for which to return expanded representations. Supports "user" and "team". **Response Values for GET** If the user is logged in, the response contains the following fields: * id: The team's unique identifier. * discussion_topic_id: The unique id of the comments service discussion topic associated with this team. * name: The name of the team. * course_id: The identifier for the course this team belongs to. * topic_id: Optionally specifies which topic the team is associated with. * date_created: Date and time when the team was created. * description: A description of the team. * country: Optionally specifies which country the team is associated with. * language: Optionally specifies which language the team is associated with. * membership: A list of the users that are members of the team. See membership endpoint for more detail. * last_activity_at: The date of the last activity of any team member within the team. * organization_protected: Whether the team consists of organization-protected learners For all text fields, clients rendering the values should take care to HTML escape them to avoid script injections, as the data is stored exactly as specified. The intention is that plain text is supported, not HTML. If the user is not logged in, a 401 error is returned. If the user is not course or global staff, a 403 error is returned. If the specified team does not exist, a 404 error is returned. **Response Values for PATCH** Only staff can patch teams. If the user is anonymous or inactive, a 401 is returned. If the user is logged in and the team does not exist, a 404 is returned. If the user is not course or global staff, does not have discussion privileges, and the team does exist, a 403 is returned. If "application/merge-patch+json" is not the specified content type, a 415 error is returned. If the update could not be completed due to validation errors, this method returns a 400 error with all error messages in the "field_errors" field of the returned JSON. **Response Values for DELETE** Only staff can delete teams. When a team is deleted, all team memberships associated with that team are also deleted. Returns 204 on successful deletion. If the user is anonymous or inactive, a 401 is returned. If the user is not course or global staff and does not have discussion privileges, a 403 is returned. If the user is logged in and the team does not exist, a 404 is returned. Returns the queryset used to access the given team. DELETE /api/team/v0/teams/{team_id} # Note: list() forces the queryset to be evualuated before delete() # Note: also deletes all team memberships associated with this team **Use Cases** Get a team's assignments **Example Requests**: GET /api/team/v0/teams/{team_id}/assignments **Response Values for GET** If the user is logged in, the response is an array of the following data strcuture: * display_name: The name of the assignment to display (currently the Unit title) * location: The jump link to a specific assignments For all text fields, clients rendering the values should take care to HTML escape them to avoid script injections, as the data is stored exactly as specified. The intention is that plain text is supported, not HTML. If team assignments are not enabled for course, a 503 is returned. If the user is not logged in, a 401 error is returned. If the user is unenrolled or does not have API access, a 403 error is returned. If the supplied course/team is bad or the user is not permitted to search in a protected team, a 404 error is returned as if the team does not exist. GET v0/teams/{team_id_pattern}/assignments # Serialize info for display Get the unit name where the ORA is located for better display naming Get the URL for jumping to a designated XBlock in a course **Use Cases** Retrieve a list of topics associated with a single course. **Example Requests** GET /api/team/v0/topics/?course_id={course_id} **Query Parameters for GET** * course_id: Filters the result to topics belonging to the given course (required). * order_by: Orders the results. Currently only 'name' and 'team_count' are supported; the default value is 'name'. If 'team_count' is specified, topics are returned first sorted by number of teams per topic (descending), with a secondary sort of 'name'. * page_size: Number of results to return per page. * page: Page number to retrieve. **Response Values for GET** If the user is not logged in, a 401 error is returned. If the course_id is not given or an unsupported value is passed for order_by, returns a 400 error. If the user is not logged in, is not enrolled in the course, or is not course or global staff, returns a 403 error. If the course does not exist, returns a 404 error. Otherwise, a 200 response is returned containing the following fields: * count: The total number of topics matching the request. * next: The URL to the next page of results, or null if this is the last page. * previous: The URL to the previous page of results, or null if this is the first page. * num_pages: The total number of pages in the result. * results: A list of the topics matching the request. * id: The topic's unique identifier. * name: The name of the topic. * description: A description of the topic. * team_count: Number of teams created under the topic. If the requesting user is enrolled into a degree bearing institution, the count only include the teams with organization_protected attribute true. If the requesting user is not affiliated with any institutions, the teams included in the count would only be those teams whose members are outside of institutions affliation. GET /api/team/v0/topics/?course_id={course_id} # Ensure the course exists # course is None if not found # Translators: 'ordering' is a string describing a way # of ordering a list. For example, {ordering} may be # 'name', indicating that the user wants to sort the # list by lower case name. # Always sort alphabetically, as it will be used as secondary sort # in the case of "team_count". # Use the serializer that adds team_count in a bulk operation per page. Return a filtered list of teamsets, removing any private teamsets that a user doesn't have access to. Follows the same logic as `has_specific_teamset_access` but in bulk rather than for one teamset at a time Return a list of team topics sorted alphabetically. Arguments: course_module (xmodule): the course which owns the team topics Returns: list: a list of sorted team topics **Use Cases** Retrieve a single topic from a course. **Example Requests** GET /api/team/v0/topics/{topic_id},{course_id} **Query Parameters for GET** * topic_id: The ID of the topic to retrieve (required). * course_id: The ID of the course to retrieve the topic from (required). **Response Values for GET** If the user is not logged in, a 401 error is returned. If the topic_id course_id are not given or an unsupported value is passed for order_by, returns a 400 error. If the user is not enrolled in the course, or is not course or global staff, returns a 403 error. If the course does not exist, returns a 404 error. Otherwise, a 200 response is returned containing the following fields: * id: The topic's unique identifier. * name: The name of the topic. * description: A description of the topic. * team_count: Number of teams created under the topic. If the requesting user is enrolled into a degree bearing institution, the count only include the teams with organization_protected attribute true. If the requesting user is not affiliated with any institutions, the teams included in the count would only be those teams whose members are outside of institutions affliation. GET /api/team/v0/topics/{topic_id},{course_id}/ # Ensure the course exists **Use Cases** List teamset team memberships or add a user to a teamset. **Example Requests**: GET /api/team/v0/team_membership POST /api/team/v0/team_membership **Query Parameters for GET** At least one of username and team_id must be provided. * username: Returns membership records only for the specified user. If the requesting user is not staff then only memberships for teams associated with courses in which the requesting user is enrolled are returned. * team_id: Returns only membership records associated with the specified team. The requesting user must be staff or enrolled in the course associated with the team. * teamset_id: Returns membership records only for the specified teamset. if teamset_id is specified, course_id must also be specified. teamset_id and team_id are mutually exclusive. For open and public_managed teamsets, the user must be staff or enrolled in the course. For private_managed teamsets, the user must be course staff, or a member of the specified teamset. * course_id: Returns membership records only for the specified course. Username must have access to this course, or else team_id must be in this course. * page_size: Number of results to return per page. * page: Page number to retrieve. * expand: Comma separated list of types for which to return expanded representations. Supports "user" and "team". **Response Values for GET** If the user is logged in and enrolled, the response contains: * count: The total number of memberships matching the request. * next: The URL to the next page of results, or null if this is the last page. * previous: The URL to the previous page of results, or null if this is the first page. * num_pages: The total number of pages in the result. * results: A list of the memberships matching the request. * user: The user associated with the membership. This field may contain an expanded or collapsed representation. * team: The team associated with the membership. This field may contain an expanded or collapsed representation. * date_joined: The date and time the membership was created. * last_activity_at: The date of the last activity of the user within the team. For all text fields, clients rendering the values should take care to HTML escape them to avoid script injections, as the data is stored exactly as specified. The intention is that plain text is supported, not HTML. If the user is not logged in and active, a 401 error is returned. If neither team_id nor username are provided, a 400 error is returned. If team_id is provided but the team does not exist, a 404 error is returned. If the specified course_id is invalid, a 404 error is returned. This endpoint uses 404 error codes to avoid leaking information about team or user existence. Specifically, a 404 error will be returned if a logged in user specifies a team_id for a course they are not enrolled in. Additionally, when username is specified the list of returned memberships will be filtered to memberships in teams associated with courses that the requesting user is enrolled in. If the course specified by course_id does not contain the team specified by team_id, a 400 error is returned. If the user is not enrolled in the course specified by course_id, and does not have staff access to the course, a 400 error is returned. **Response Values for POST** Any logged in user enrolled in a course can enroll themselves in a team in the course. Course staff, global staff, and discussion privileged users can enroll any user in a team, with a few exceptions noted below. If the user is not logged in and active, a 401 error is returned. If username and team are not provided in the posted JSON, a 400 error is returned describing the missing fields. If the specified team does not exist, a 404 error is returned. If the user is not staff, does not have discussion privileges, and is not enrolled in the course associated with the team they are trying to join, or if they are trying to add a user other than themselves to a team, a 404 error is returned. This is to prevent leaking information about the existence of teams and users. If the specified user does not exist, a 404 error is returned. If the user is already a member of a team in the course associated with the team they are trying to join, a 400 error is returned. This applies to both staff and students. If the user is not enrolled in the course associated with the team they are trying to join, a 400 error is returned. This can occur when a staff or discussion privileged user posts a request adding another user to a team. # lint-amnesty, pylint: disable=too-many-statements GET /api/team/v0/team_membership POST /api/team/v0/team_membership # This should use `calc_max_team_size` instead of `default_max_team_size` (TODO MST-32). **Use Cases** Gets individual course team memberships or removes a user from a course team. **Example Requests**: GET /api/team/v0/team_membership/{team_id},{username} DELETE /api/team/v0/team_membership/{team_id},{username} **Query Parameters for GET** * expand: Comma separated list of types for which to return expanded representations. Supports "user" and "team". **Response Values for GET** If the user is logged in and enrolled, or is course or global staff the response contains: * user: The user associated with the membership. This field may contain an expanded or collapsed representation. * team: The team associated with the membership. This field may contain an expanded or collapsed representation. * date_joined: The date and time the membership was created. * last_activity_at: The date of the last activity of any team member within the team. For all text fields, clients rendering the values should take care to HTML escape them to avoid script injections, as the data is stored exactly as specified. The intention is that plain text is supported, not HTML. If the user is not logged in and active, a 401 error is returned. If specified team does not exist, a 404 error is returned. If the user is logged in but is not enrolled in the course associated with the specified team, or is not staff, a 404 error is returned. This avoids leaking information about course or team existence. If the membership does not exist, a 404 error is returned. **Response Values for DELETE** Any logged in user enrolled in a course can remove themselves from a team in the course. Course staff, global staff, and discussion privileged users can remove any user from a team. Successfully deleting a membership will return a 204 response with no content. If the user is not logged in and active, a 401 error is returned. If the specified team or username does not exist, a 404 error is returned. If the user is not staff or a discussion privileged user and is attempting to remove another user from a team, a 404 error is returned. This prevents leaking information about team and user existence. If the membership does not exist, a 404 error is returned. Returns the team with team_id, or throws Http404 if it does not exist. # lint-amnesty, pylint: disable=raise-missing-from Returns the membership for the given user and team, or throws Http404 if it does not exist. # lint-amnesty, pylint: disable=raise-missing-from GET /api/team/v0/team_membership/{team_id},{username} DELETE /api/team/v0/team_membership/{team_id},{username} View for uploading and downloading team membership CSVs. Download CSV with team membership data for given course run. Process uploaded CSV to modify team memberships for given course run. Raises 403 if user does not have access to this endpoint. Return a CourseBlock based on the `course_id` kwarg. If invalid or not found, raise 404. # lint-amnesty, pylint: disable=raise-missing-from | 1.623611 | 2 |
python/ray/data/tests/test_optimize.py | dsctt/ray | 0 | 6631684 | <reponame>dsctt/ray
import pytest
import pandas as pd
import os
import ray
from ray.data.context import DatasetContext
from ray.data.datasource.csv_datasource import CSVDatasource
from ray.tests.conftest import * # noqa
def expect_stages(pipe, num_stages_expected, stage_names):
stats = pipe.stats()
for name in stage_names:
name = " " + name + ":"
assert name in stats, (name, stats)
assert len(pipe._optimized_stages) == num_stages_expected, pipe._optimized_stages
def test_optimize_fuse(ray_start_regular_shared):
context = DatasetContext.get_current()
def build_pipe():
pipe = ray.data.range(3).repeat(2)
pipe = pipe.map_batches(lambda x: x)
pipe = pipe.map_batches(lambda x: x)
pipe = pipe.random_shuffle_each_window()
results = [sorted(p.take()) for p in pipe.iter_epochs()]
assert results == [[0, 1, 2], [0, 1, 2]], results
return pipe
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = True
context.optimize_fuse_shuffle_stages = True
expect_stages(
build_pipe(),
1,
["read->map_batches->map_batches->random_shuffle_map", "random_shuffle_reduce"],
)
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = False
context.optimize_fuse_shuffle_stages = True
expect_stages(
build_pipe(),
1,
[
"read",
"map_batches->map_batches->random_shuffle_map",
"random_shuffle_reduce",
],
)
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = False
context.optimize_fuse_shuffle_stages = False
expect_stages(
build_pipe(),
2,
[
"read",
"map_batches->map_batches",
"random_shuffle_map",
"random_shuffle_reduce",
],
)
context.optimize_fuse_stages = False
context.optimize_fuse_read_stages = False
context.optimize_fuse_shuffle_stages = False
expect_stages(
build_pipe(),
3,
[
"read",
"map_batches",
"map_batches",
"random_shuffle_map",
"random_shuffle_reduce",
],
)
def test_optimize_incompatible_stages(ray_start_regular_shared):
context = DatasetContext.get_current()
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = True
context.optimize_fuse_shuffle_stages = True
pipe = ray.data.range(3).repeat(2)
pipe = pipe.map_batches(lambda x: x, compute="actors")
pipe = pipe.map_batches(lambda x: x, compute="tasks")
pipe = pipe.random_shuffle_each_window()
pipe.take()
expect_stages(
pipe,
3,
[
"read",
"map_batches",
"map_batches->random_shuffle_map",
"random_shuffle_reduce",
],
)
pipe = ray.data.range(3).repeat(2)
pipe = pipe.map_batches(lambda x: x, compute="tasks")
pipe = pipe.map_batches(lambda x: x, num_cpus=0.75)
pipe = pipe.random_shuffle_each_window()
pipe.take()
expect_stages(
pipe,
3,
[
"read->map_batches",
"map_batches",
"random_shuffle_map",
"random_shuffle_reduce",
],
)
@ray.remote
class Counter:
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
return self.value
def get(self):
return self.value
def reset(self):
self.value = 0
class MySource(CSVDatasource):
def __init__(self, counter):
self.counter = counter
def _read_stream(self, f, path: str, **reader_args):
count = self.counter.increment.remote()
ray.get(count)
for block in CSVDatasource._read_stream(self, f, path, **reader_args):
yield block
def test_optimize_reread_base_data(ray_start_regular_shared, local_path):
context = DatasetContext.get_current()
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = True
context.optimize_fuse_shuffle_stages = True
# Re-read on.
N = 4
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(local_path, "test1.csv")
df1.to_csv(path1, index=False, storage_options={})
counter = Counter.remote()
source = MySource(counter)
ds1 = ray.data.read_datasource(source, parallelism=1, paths=path1)
pipe = ds1.repeat(N)
pipe.take()
num_reads = ray.get(counter.get.remote())
assert num_reads == N + 1, num_reads
# Re-read off.
context.optimize_fuse_read_stages = False
ray.get(counter.reset.remote())
ds1 = ray.data.read_datasource(source, parallelism=1, paths=path1)
pipe = ds1.repeat(N)
pipe.take()
num_reads = ray.get(counter.get.remote())
assert num_reads == 1, num_reads
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| import pytest
import pandas as pd
import os
import ray
from ray.data.context import DatasetContext
from ray.data.datasource.csv_datasource import CSVDatasource
from ray.tests.conftest import * # noqa
def expect_stages(pipe, num_stages_expected, stage_names):
stats = pipe.stats()
for name in stage_names:
name = " " + name + ":"
assert name in stats, (name, stats)
assert len(pipe._optimized_stages) == num_stages_expected, pipe._optimized_stages
def test_optimize_fuse(ray_start_regular_shared):
context = DatasetContext.get_current()
def build_pipe():
pipe = ray.data.range(3).repeat(2)
pipe = pipe.map_batches(lambda x: x)
pipe = pipe.map_batches(lambda x: x)
pipe = pipe.random_shuffle_each_window()
results = [sorted(p.take()) for p in pipe.iter_epochs()]
assert results == [[0, 1, 2], [0, 1, 2]], results
return pipe
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = True
context.optimize_fuse_shuffle_stages = True
expect_stages(
build_pipe(),
1,
["read->map_batches->map_batches->random_shuffle_map", "random_shuffle_reduce"],
)
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = False
context.optimize_fuse_shuffle_stages = True
expect_stages(
build_pipe(),
1,
[
"read",
"map_batches->map_batches->random_shuffle_map",
"random_shuffle_reduce",
],
)
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = False
context.optimize_fuse_shuffle_stages = False
expect_stages(
build_pipe(),
2,
[
"read",
"map_batches->map_batches",
"random_shuffle_map",
"random_shuffle_reduce",
],
)
context.optimize_fuse_stages = False
context.optimize_fuse_read_stages = False
context.optimize_fuse_shuffle_stages = False
expect_stages(
build_pipe(),
3,
[
"read",
"map_batches",
"map_batches",
"random_shuffle_map",
"random_shuffle_reduce",
],
)
def test_optimize_incompatible_stages(ray_start_regular_shared):
context = DatasetContext.get_current()
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = True
context.optimize_fuse_shuffle_stages = True
pipe = ray.data.range(3).repeat(2)
pipe = pipe.map_batches(lambda x: x, compute="actors")
pipe = pipe.map_batches(lambda x: x, compute="tasks")
pipe = pipe.random_shuffle_each_window()
pipe.take()
expect_stages(
pipe,
3,
[
"read",
"map_batches",
"map_batches->random_shuffle_map",
"random_shuffle_reduce",
],
)
pipe = ray.data.range(3).repeat(2)
pipe = pipe.map_batches(lambda x: x, compute="tasks")
pipe = pipe.map_batches(lambda x: x, num_cpus=0.75)
pipe = pipe.random_shuffle_each_window()
pipe.take()
expect_stages(
pipe,
3,
[
"read->map_batches",
"map_batches",
"random_shuffle_map",
"random_shuffle_reduce",
],
)
@ray.remote
class Counter:
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
return self.value
def get(self):
return self.value
def reset(self):
self.value = 0
class MySource(CSVDatasource):
def __init__(self, counter):
self.counter = counter
def _read_stream(self, f, path: str, **reader_args):
count = self.counter.increment.remote()
ray.get(count)
for block in CSVDatasource._read_stream(self, f, path, **reader_args):
yield block
def test_optimize_reread_base_data(ray_start_regular_shared, local_path):
context = DatasetContext.get_current()
context.optimize_fuse_stages = True
context.optimize_fuse_read_stages = True
context.optimize_fuse_shuffle_stages = True
# Re-read on.
N = 4
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
path1 = os.path.join(local_path, "test1.csv")
df1.to_csv(path1, index=False, storage_options={})
counter = Counter.remote()
source = MySource(counter)
ds1 = ray.data.read_datasource(source, parallelism=1, paths=path1)
pipe = ds1.repeat(N)
pipe.take()
num_reads = ray.get(counter.get.remote())
assert num_reads == N + 1, num_reads
# Re-read off.
context.optimize_fuse_read_stages = False
ray.get(counter.reset.remote())
ds1 = ray.data.read_datasource(source, parallelism=1, paths=path1)
pipe = ds1.repeat(N)
pipe.take()
num_reads = ray.get(counter.get.remote())
assert num_reads == 1, num_reads
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__])) | en | 0.918434 | # noqa # Re-read on. # Re-read off. | 2.177351 | 2 |
run.py | leiyue/microblog | 0 | 6631685 | <gh_stars>0
#!env/Scripts/python
# -*- coding: utf-8 -*-
# @Date : 2016-01-21 12:36
# @Author : leiyue (<EMAIL>)
# @Link : https://leiyue.wordpress.com/
from app import app
app.run()
| #!env/Scripts/python
# -*- coding: utf-8 -*-
# @Date : 2016-01-21 12:36
# @Author : leiyue (<EMAIL>)
# @Link : https://leiyue.wordpress.com/
from app import app
app.run() | en | 0.32384 | #!env/Scripts/python # -*- coding: utf-8 -*- # @Date : 2016-01-21 12:36 # @Author : leiyue (<EMAIL>) # @Link : https://leiyue.wordpress.com/ | 1.076668 | 1 |
setup.py | markreidvfx/pyavb | 21 | 6631686 | <gh_stars>10-100
import sys
import os
from setuptools import setup
import setuptools.command.build_py
from distutils.extension import Extension
PROJECT_METADATA = {
"version": "0.1.0.dev2",
"author": '<NAME>',
"author_email": '<EMAIL>',
"license": 'MIT',
}
METADATA_TEMPLATE = """
__version__ = "{version}"
__author__ = "{author}"
__author_email__ = "{author_email}"
__license__ = "{license}"
"""
sourcefiles = [
"avb/_ext.pyx",
]
extensions =[]
try:
from Cython.Build import cythonize
if int(os.environ.get("PYAVB_BUILD_EXT", '1')):
extensions = cythonize([Extension("avb._ext",
sourcefiles,
language="c++")])
except ImportError as e:
print('unable to build optional cython extension')
class AddMetadata(setuptools.command.build_py.build_py):
"""Stamps PROJECT_METADATA into __init__ files."""
def run(self):
setuptools.command.build_py.build_py.run(self)
if self.dry_run:
return
target_file = os.path.join(self.build_lib, 'avb', "__init__.py")
source_file = os.path.join(os.path.dirname(__file__), 'avb', "__init__.py")
# get the base data from the original file
with open(source_file, 'r') as fi:
src_data = fi.read()
# write that + the suffix to the target file
with open(target_file, 'w') as fo:
fo.write(src_data)
fo.write(METADATA_TEMPLATE.format(**PROJECT_METADATA))
setup(
name='pyavb',
description='A python module for the reading and writing Avid Bin Files files.',
url='https://github.com/markreidvfx/pyavb',
project_urls={
'Source':
'https://github.com/markreidvfx/pyavb',
'Documentation':
'http://pyavb.readthedocs.io',
'Issues':
'https://github.com/markreidvfx/pyavb/issues',
},
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Multimedia :: Video :: Non-Linear Editor',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Natural Language :: English',
],
keywords='film tv editing editorial edit non-linear edl time',
platforms='any',
packages=[
'avb',
],
cmdclass={'build_py': AddMetadata},
ext_modules = extensions,
extras_require= {'cython' : ['cython']},
**PROJECT_METADATA
)
| import sys
import os
from setuptools import setup
import setuptools.command.build_py
from distutils.extension import Extension
PROJECT_METADATA = {
"version": "0.1.0.dev2",
"author": '<NAME>',
"author_email": '<EMAIL>',
"license": 'MIT',
}
METADATA_TEMPLATE = """
__version__ = "{version}"
__author__ = "{author}"
__author_email__ = "{author_email}"
__license__ = "{license}"
"""
sourcefiles = [
"avb/_ext.pyx",
]
extensions =[]
try:
from Cython.Build import cythonize
if int(os.environ.get("PYAVB_BUILD_EXT", '1')):
extensions = cythonize([Extension("avb._ext",
sourcefiles,
language="c++")])
except ImportError as e:
print('unable to build optional cython extension')
class AddMetadata(setuptools.command.build_py.build_py):
"""Stamps PROJECT_METADATA into __init__ files."""
def run(self):
setuptools.command.build_py.build_py.run(self)
if self.dry_run:
return
target_file = os.path.join(self.build_lib, 'avb', "__init__.py")
source_file = os.path.join(os.path.dirname(__file__), 'avb', "__init__.py")
# get the base data from the original file
with open(source_file, 'r') as fi:
src_data = fi.read()
# write that + the suffix to the target file
with open(target_file, 'w') as fo:
fo.write(src_data)
fo.write(METADATA_TEMPLATE.format(**PROJECT_METADATA))
setup(
name='pyavb',
description='A python module for the reading and writing Avid Bin Files files.',
url='https://github.com/markreidvfx/pyavb',
project_urls={
'Source':
'https://github.com/markreidvfx/pyavb',
'Documentation':
'http://pyavb.readthedocs.io',
'Issues':
'https://github.com/markreidvfx/pyavb/issues',
},
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Multimedia :: Video :: Non-Linear Editor',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Natural Language :: English',
],
keywords='film tv editing editorial edit non-linear edl time',
platforms='any',
packages=[
'avb',
],
cmdclass={'build_py': AddMetadata},
ext_modules = extensions,
extras_require= {'cython' : ['cython']},
**PROJECT_METADATA
) | en | 0.63826 | __version__ = "{version}" __author__ = "{author}" __author_email__ = "{author_email}" __license__ = "{license}" Stamps PROJECT_METADATA into __init__ files. # get the base data from the original file # write that + the suffix to the target file | 2.007316 | 2 |
poky-dunfell/meta/lib/oeqa/selftest/cases/imagefeatures.py | lacie-life/YoctoPi | 0 | 6631687 | #
# SPDX-License-Identifier: MIT
#
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
from oeqa.utils.sshcontrol import SSHControl
import os
import json
class ImageFeatures(OESelftestTestCase):
test_user = 'tester'
root_user = 'root'
def test_non_root_user_can_connect_via_ssh_without_password(self):
"""
Summary: Check if non root user can connect via ssh without password
Expected: 1. Connection to the image via ssh using root user without providing a password should be allowed.
2. Connection to the image via ssh using tester user without providing a password should be allowed.
Product: oe-core
Author: <NAME> <<EMAIL>>
AutomatedBy: <NAME> <<EMAIL>>
"""
features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh empty-root-password allow-empty-password allow-root-login"\n'
features += 'INHERIT += "extrausers"\n'
features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
self.write_config(features)
# Build a core-image-minimal
bitbake('core-image-minimal')
with runqemu("core-image-minimal") as qemu:
# Attempt to ssh with each user into qemu with empty password
for user in [self.root_user, self.test_user]:
ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
status, output = ssh.run("true")
self.assertEqual(status, 0, 'ssh to user %s failed with %s' % (user, output))
def test_all_users_can_connect_via_ssh_without_password(self):
"""
Summary: Check if all users can connect via ssh without password
Expected: 1. Connection to the image via ssh using root user without providing a password should NOT be allowed.
2. Connection to the image via ssh using tester user without providing a password should be allowed.
Product: oe-core
Author: <NAME> <<EMAIL>>
AutomatedBy: <NAME> <<EMAIL>>
"""
features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh allow-empty-password allow-root-login"\n'
features += 'INHERIT += "extrausers"\n'
features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
self.write_config(features)
# Build a core-image-minimal
bitbake('core-image-minimal')
with runqemu("core-image-minimal") as qemu:
# Attempt to ssh with each user into qemu with empty password
for user in [self.root_user, self.test_user]:
ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
status, output = ssh.run("true")
if user == 'root':
self.assertNotEqual(status, 0, 'ssh to user root was allowed when it should not have been')
else:
self.assertEqual(status, 0, 'ssh to user tester failed with %s' % output)
def test_clutter_image_can_be_built(self):
"""
Summary: Check if clutter image can be built
Expected: 1. core-image-clutter can be built
Product: oe-core
Author: <NAME> <<EMAIL>>
AutomatedBy: <NAME> <<EMAIL>>
"""
# Build a core-image-clutter
bitbake('core-image-clutter')
def test_wayland_support_in_image(self):
"""
Summary: Check Wayland support in image
Expected: 1. Wayland image can be build
2. Wayland feature can be installed
Product: oe-core
Author: <NAME> <<EMAIL>>
AutomatedBy: <NAME> <<EMAIL>>
"""
distro_features = get_bb_var('DISTRO_FEATURES')
if not ('opengl' in distro_features and 'wayland' in distro_features):
self.skipTest('neither opengl nor wayland present on DISTRO_FEATURES so core-image-weston cannot be built')
# Build a core-image-weston
bitbake('core-image-weston')
def test_bmap(self):
"""
Summary: Check bmap support
Expected: 1. core-image-minimal can be build with bmap support
2. core-image-minimal is sparse
Product: oe-core
Author: <NAME> <<EMAIL>>
"""
features = 'IMAGE_FSTYPES += " ext4 ext4.bmap ext4.bmap.gz"'
self.write_config(features)
image_name = 'core-image-minimal'
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
image_path = os.path.join(deploy_dir_image, "%s.ext4" % link_name)
bmap_path = "%s.bmap" % image_path
gzip_path = "%s.gz" % bmap_path
# check if result image, bmap and bmap.gz files are in deploy directory
self.assertTrue(os.path.exists(image_path))
self.assertTrue(os.path.exists(bmap_path))
self.assertTrue(os.path.exists(gzip_path))
# check if result image is sparse
image_stat = os.stat(image_path)
self.assertGreater(image_stat.st_size, image_stat.st_blocks * 512)
# check if the resulting gzip is valid
self.assertTrue(runCmd('gzip -t %s' % gzip_path))
def test_hypervisor_fmts(self):
"""
Summary: Check various hypervisor formats
Expected: 1. core-image-minimal can be built with vmdk, vdi and
qcow2 support.
2. qemu-img says each image has the expected format
Product: oe-core
Author: <NAME> <<EMAIL>>
"""
img_types = [ 'vmdk', 'vdi', 'qcow2' ]
features = ""
for itype in img_types:
features += 'IMAGE_FSTYPES += "wic.%s"\n' % itype
self.write_config(features)
image_name = 'core-image-minimal'
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
for itype in img_types:
image_path = os.path.join(deploy_dir_image, "%s.wic.%s" %
(link_name, itype))
# check if result image file is in deploy directory
self.assertTrue(os.path.exists(image_path))
# check if result image is vmdk
sysroot = get_bb_var('STAGING_DIR_NATIVE', 'core-image-minimal')
result = runCmd('qemu-img info --output json %s' % image_path,
native_sysroot=sysroot)
try:
data = json.loads(result.output)
self.assertEqual(data.get('format'), itype,
msg="Unexpected format in '%s'" % (result.output))
except json.decoder.JSONDecodeError:
self.fail("Could not parse '%ss'" % result.output)
def test_long_chain_conversion(self):
"""
Summary: Check for chaining many CONVERSION_CMDs together
Expected: 1. core-image-minimal can be built with
ext4.bmap.gz.bz2.lzo.xz.u-boot and also create a
sha256sum
2. The above image has a valid sha256sum
Product: oe-core
Author: <NAME> <<EMAIL>>
"""
conv = "ext4.bmap.gz.bz2.lzo.xz.u-boot"
features = 'IMAGE_FSTYPES += "%s %s.sha256sum"' % (conv, conv)
self.write_config(features)
image_name = 'core-image-minimal'
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
image_path = os.path.join(deploy_dir_image, "%s.%s" %
(link_name, conv))
# check if resulting image is in the deploy directory
self.assertTrue(os.path.exists(image_path))
self.assertTrue(os.path.exists(image_path + ".sha256sum"))
# check if the resulting sha256sum agrees
self.assertTrue(runCmd('cd %s;sha256sum -c %s.%s.sha256sum' %
(deploy_dir_image, link_name, conv)))
def test_image_fstypes(self):
"""
Summary: Check if image of supported image fstypes can be built
Expected: core-image-minimal can be built for various image types
Product: oe-core
Author: <NAME> <<EMAIL>>
"""
image_name = 'core-image-minimal'
all_image_types = set(get_bb_var("IMAGE_TYPES", image_name).split())
blacklist = set(('container', 'elf', 'f2fs', 'multiubi', 'tar.zst'))
img_types = all_image_types - blacklist
config = 'IMAGE_FSTYPES += "%s"\n'\
'MKUBIFS_ARGS ?= "-m 2048 -e 129024 -c 2047"\n'\
'UBINIZE_ARGS ?= "-m 2048 -p 128KiB -s 512"' % ' '.join(img_types)
self.write_config(config)
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
for itype in img_types:
image_path = os.path.join(deploy_dir_image, "%s.%s" % (link_name, itype))
# check if result image is in deploy directory
self.assertTrue(os.path.exists(image_path),
"%s image %s doesn't exist" % (itype, image_path))
def test_useradd_static(self):
config = """
USERADDEXTENSION = "useradd-staticids"
USERADD_ERROR_DYNAMIC = "skip"
USERADD_UID_TABLES += "files/static-passwd"
USERADD_GID_TABLES += "files/static-group"
"""
self.write_config(config)
bitbake("core-image-base")
def test_no_busybox_base_utils(self):
config = """
# Enable x11
DISTRO_FEATURES_append = " x11"
# Switch to systemd
DISTRO_FEATURES += "systemd"
VIRTUAL-RUNTIME_init_manager = "systemd"
VIRTUAL-RUNTIME_initscripts = ""
VIRTUAL-RUNTIME_syslog = ""
VIRTUAL-RUNTIME_login_manager = "shadow-base"
DISTRO_FEATURES_BACKFILL_CONSIDERED = "sysvinit"
# Replace busybox
PREFERRED_PROVIDER_virtual/base-utils = "packagegroup-core-base-utils"
VIRTUAL-RUNTIME_base-utils = "packagegroup-core-base-utils"
VIRTUAL-RUNTIME_base-utils-hwclock = "util-linux-hwclock"
VIRTUAL-RUNTIME_base-utils-syslog = ""
# Blacklist busybox
PNBLACKLIST[busybox] = "Don't build this"
"""
self.write_config(config)
bitbake("--graphviz core-image-sato")
def test_image_gen_debugfs(self):
"""
Summary: Check debugfs generation
Expected: 1. core-image-minimal can be build with IMAGE_GEN_DEBUGFS variable set
2. debug filesystem is created when variable set
3. debug symbols available
Product: oe-core
Author: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
import glob
image_name = 'core-image-minimal'
features = 'IMAGE_GEN_DEBUGFS = "1"\n'
features += 'IMAGE_FSTYPES_DEBUGFS = "tar.bz2"\n'
features += 'MACHINE = "genericx86-64"\n'
self.write_config(features)
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
dbg_tar_file = os.path.join(deploy_dir_image, "*-dbg.rootfs.tar.bz2")
debug_files = glob.glob(dbg_tar_file)
self.assertNotEqual(len(debug_files), 0, 'debug filesystem not generated at %s' % dbg_tar_file)
result = runCmd('cd %s; tar xvf %s' % (deploy_dir_image, dbg_tar_file))
self.assertEqual(result.status, 0, msg='Failed to extract %s: %s' % (dbg_tar_file, result.output))
result = runCmd('find %s -name %s' % (deploy_dir_image, "udevadm"))
self.assertTrue("udevadm" in result.output, msg='Failed to find udevadm: %s' % result.output)
dbg_symbols_targets = result.output.splitlines()
self.assertTrue(dbg_symbols_targets, msg='Failed to split udevadm: %s' % dbg_symbols_targets)
for t in dbg_symbols_targets:
result = runCmd('objdump --syms %s | grep debug' % t)
self.assertTrue("debug" in result.output, msg='Failed to find debug symbol: %s' % result.output)
| #
# SPDX-License-Identifier: MIT
#
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
from oeqa.utils.sshcontrol import SSHControl
import os
import json
class ImageFeatures(OESelftestTestCase):
test_user = 'tester'
root_user = 'root'
def test_non_root_user_can_connect_via_ssh_without_password(self):
"""
Summary: Check if non root user can connect via ssh without password
Expected: 1. Connection to the image via ssh using root user without providing a password should be allowed.
2. Connection to the image via ssh using tester user without providing a password should be allowed.
Product: oe-core
Author: <NAME> <<EMAIL>>
AutomatedBy: <NAME> <<EMAIL>>
"""
features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh empty-root-password allow-empty-password allow-root-login"\n'
features += 'INHERIT += "extrausers"\n'
features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
self.write_config(features)
# Build a core-image-minimal
bitbake('core-image-minimal')
with runqemu("core-image-minimal") as qemu:
# Attempt to ssh with each user into qemu with empty password
for user in [self.root_user, self.test_user]:
ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
status, output = ssh.run("true")
self.assertEqual(status, 0, 'ssh to user %s failed with %s' % (user, output))
def test_all_users_can_connect_via_ssh_without_password(self):
"""
Summary: Check if all users can connect via ssh without password
Expected: 1. Connection to the image via ssh using root user without providing a password should NOT be allowed.
2. Connection to the image via ssh using tester user without providing a password should be allowed.
Product: oe-core
Author: <NAME> <<EMAIL>>
AutomatedBy: <NAME> <<EMAIL>>
"""
features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh allow-empty-password allow-root-login"\n'
features += 'INHERIT += "extrausers"\n'
features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
self.write_config(features)
# Build a core-image-minimal
bitbake('core-image-minimal')
with runqemu("core-image-minimal") as qemu:
# Attempt to ssh with each user into qemu with empty password
for user in [self.root_user, self.test_user]:
ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
status, output = ssh.run("true")
if user == 'root':
self.assertNotEqual(status, 0, 'ssh to user root was allowed when it should not have been')
else:
self.assertEqual(status, 0, 'ssh to user tester failed with %s' % output)
def test_clutter_image_can_be_built(self):
"""
Summary: Check if clutter image can be built
Expected: 1. core-image-clutter can be built
Product: oe-core
Author: <NAME> <<EMAIL>>
AutomatedBy: <NAME> <<EMAIL>>
"""
# Build a core-image-clutter
bitbake('core-image-clutter')
def test_wayland_support_in_image(self):
"""
Summary: Check Wayland support in image
Expected: 1. Wayland image can be build
2. Wayland feature can be installed
Product: oe-core
Author: <NAME> <<EMAIL>>
AutomatedBy: <NAME> <<EMAIL>>
"""
distro_features = get_bb_var('DISTRO_FEATURES')
if not ('opengl' in distro_features and 'wayland' in distro_features):
self.skipTest('neither opengl nor wayland present on DISTRO_FEATURES so core-image-weston cannot be built')
# Build a core-image-weston
bitbake('core-image-weston')
def test_bmap(self):
"""
Summary: Check bmap support
Expected: 1. core-image-minimal can be build with bmap support
2. core-image-minimal is sparse
Product: oe-core
Author: <NAME> <<EMAIL>>
"""
features = 'IMAGE_FSTYPES += " ext4 ext4.bmap ext4.bmap.gz"'
self.write_config(features)
image_name = 'core-image-minimal'
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
image_path = os.path.join(deploy_dir_image, "%s.ext4" % link_name)
bmap_path = "%s.bmap" % image_path
gzip_path = "%s.gz" % bmap_path
# check if result image, bmap and bmap.gz files are in deploy directory
self.assertTrue(os.path.exists(image_path))
self.assertTrue(os.path.exists(bmap_path))
self.assertTrue(os.path.exists(gzip_path))
# check if result image is sparse
image_stat = os.stat(image_path)
self.assertGreater(image_stat.st_size, image_stat.st_blocks * 512)
# check if the resulting gzip is valid
self.assertTrue(runCmd('gzip -t %s' % gzip_path))
def test_hypervisor_fmts(self):
"""
Summary: Check various hypervisor formats
Expected: 1. core-image-minimal can be built with vmdk, vdi and
qcow2 support.
2. qemu-img says each image has the expected format
Product: oe-core
Author: <NAME> <<EMAIL>>
"""
img_types = [ 'vmdk', 'vdi', 'qcow2' ]
features = ""
for itype in img_types:
features += 'IMAGE_FSTYPES += "wic.%s"\n' % itype
self.write_config(features)
image_name = 'core-image-minimal'
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
for itype in img_types:
image_path = os.path.join(deploy_dir_image, "%s.wic.%s" %
(link_name, itype))
# check if result image file is in deploy directory
self.assertTrue(os.path.exists(image_path))
# check if result image is vmdk
sysroot = get_bb_var('STAGING_DIR_NATIVE', 'core-image-minimal')
result = runCmd('qemu-img info --output json %s' % image_path,
native_sysroot=sysroot)
try:
data = json.loads(result.output)
self.assertEqual(data.get('format'), itype,
msg="Unexpected format in '%s'" % (result.output))
except json.decoder.JSONDecodeError:
self.fail("Could not parse '%ss'" % result.output)
def test_long_chain_conversion(self):
"""
Summary: Check for chaining many CONVERSION_CMDs together
Expected: 1. core-image-minimal can be built with
ext4.bmap.gz.bz2.lzo.xz.u-boot and also create a
sha256sum
2. The above image has a valid sha256sum
Product: oe-core
Author: <NAME> <<EMAIL>>
"""
conv = "ext4.bmap.gz.bz2.lzo.xz.u-boot"
features = 'IMAGE_FSTYPES += "%s %s.sha256sum"' % (conv, conv)
self.write_config(features)
image_name = 'core-image-minimal'
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
image_path = os.path.join(deploy_dir_image, "%s.%s" %
(link_name, conv))
# check if resulting image is in the deploy directory
self.assertTrue(os.path.exists(image_path))
self.assertTrue(os.path.exists(image_path + ".sha256sum"))
# check if the resulting sha256sum agrees
self.assertTrue(runCmd('cd %s;sha256sum -c %s.%s.sha256sum' %
(deploy_dir_image, link_name, conv)))
def test_image_fstypes(self):
"""
Summary: Check if image of supported image fstypes can be built
Expected: core-image-minimal can be built for various image types
Product: oe-core
Author: <NAME> <<EMAIL>>
"""
image_name = 'core-image-minimal'
all_image_types = set(get_bb_var("IMAGE_TYPES", image_name).split())
blacklist = set(('container', 'elf', 'f2fs', 'multiubi', 'tar.zst'))
img_types = all_image_types - blacklist
config = 'IMAGE_FSTYPES += "%s"\n'\
'MKUBIFS_ARGS ?= "-m 2048 -e 129024 -c 2047"\n'\
'UBINIZE_ARGS ?= "-m 2048 -p 128KiB -s 512"' % ' '.join(img_types)
self.write_config(config)
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
for itype in img_types:
image_path = os.path.join(deploy_dir_image, "%s.%s" % (link_name, itype))
# check if result image is in deploy directory
self.assertTrue(os.path.exists(image_path),
"%s image %s doesn't exist" % (itype, image_path))
def test_useradd_static(self):
config = """
USERADDEXTENSION = "useradd-staticids"
USERADD_ERROR_DYNAMIC = "skip"
USERADD_UID_TABLES += "files/static-passwd"
USERADD_GID_TABLES += "files/static-group"
"""
self.write_config(config)
bitbake("core-image-base")
def test_no_busybox_base_utils(self):
config = """
# Enable x11
DISTRO_FEATURES_append = " x11"
# Switch to systemd
DISTRO_FEATURES += "systemd"
VIRTUAL-RUNTIME_init_manager = "systemd"
VIRTUAL-RUNTIME_initscripts = ""
VIRTUAL-RUNTIME_syslog = ""
VIRTUAL-RUNTIME_login_manager = "shadow-base"
DISTRO_FEATURES_BACKFILL_CONSIDERED = "sysvinit"
# Replace busybox
PREFERRED_PROVIDER_virtual/base-utils = "packagegroup-core-base-utils"
VIRTUAL-RUNTIME_base-utils = "packagegroup-core-base-utils"
VIRTUAL-RUNTIME_base-utils-hwclock = "util-linux-hwclock"
VIRTUAL-RUNTIME_base-utils-syslog = ""
# Blacklist busybox
PNBLACKLIST[busybox] = "Don't build this"
"""
self.write_config(config)
bitbake("--graphviz core-image-sato")
def test_image_gen_debugfs(self):
"""
Summary: Check debugfs generation
Expected: 1. core-image-minimal can be build with IMAGE_GEN_DEBUGFS variable set
2. debug filesystem is created when variable set
3. debug symbols available
Product: oe-core
Author: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
import glob
image_name = 'core-image-minimal'
features = 'IMAGE_GEN_DEBUGFS = "1"\n'
features += 'IMAGE_FSTYPES_DEBUGFS = "tar.bz2"\n'
features += 'MACHINE = "genericx86-64"\n'
self.write_config(features)
bitbake(image_name)
deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
dbg_tar_file = os.path.join(deploy_dir_image, "*-dbg.rootfs.tar.bz2")
debug_files = glob.glob(dbg_tar_file)
self.assertNotEqual(len(debug_files), 0, 'debug filesystem not generated at %s' % dbg_tar_file)
result = runCmd('cd %s; tar xvf %s' % (deploy_dir_image, dbg_tar_file))
self.assertEqual(result.status, 0, msg='Failed to extract %s: %s' % (dbg_tar_file, result.output))
result = runCmd('find %s -name %s' % (deploy_dir_image, "udevadm"))
self.assertTrue("udevadm" in result.output, msg='Failed to find udevadm: %s' % result.output)
dbg_symbols_targets = result.output.splitlines()
self.assertTrue(dbg_symbols_targets, msg='Failed to split udevadm: %s' % dbg_symbols_targets)
for t in dbg_symbols_targets:
result = runCmd('objdump --syms %s | grep debug' % t)
self.assertTrue("debug" in result.output, msg='Failed to find debug symbol: %s' % result.output)
| en | 0.665616 | # # SPDX-License-Identifier: MIT # Summary: Check if non root user can connect via ssh without password Expected: 1. Connection to the image via ssh using root user without providing a password should be allowed. 2. Connection to the image via ssh using tester user without providing a password should be allowed. Product: oe-core Author: <NAME> <<EMAIL>> AutomatedBy: <NAME> <<EMAIL>> # Build a core-image-minimal # Attempt to ssh with each user into qemu with empty password Summary: Check if all users can connect via ssh without password Expected: 1. Connection to the image via ssh using root user without providing a password should NOT be allowed. 2. Connection to the image via ssh using tester user without providing a password should be allowed. Product: oe-core Author: <NAME> <<EMAIL>> AutomatedBy: <NAME> <<EMAIL>> # Build a core-image-minimal # Attempt to ssh with each user into qemu with empty password Summary: Check if clutter image can be built Expected: 1. core-image-clutter can be built Product: oe-core Author: <NAME> <<EMAIL>> AutomatedBy: <NAME> <<EMAIL>> # Build a core-image-clutter Summary: Check Wayland support in image Expected: 1. Wayland image can be build 2. Wayland feature can be installed Product: oe-core Author: <NAME> <<EMAIL>> AutomatedBy: <NAME> <<EMAIL>> # Build a core-image-weston Summary: Check bmap support Expected: 1. core-image-minimal can be build with bmap support 2. core-image-minimal is sparse Product: oe-core Author: <NAME> <<EMAIL>> # check if result image, bmap and bmap.gz files are in deploy directory # check if result image is sparse # check if the resulting gzip is valid Summary: Check various hypervisor formats Expected: 1. core-image-minimal can be built with vmdk, vdi and qcow2 support. 2. qemu-img says each image has the expected format Product: oe-core Author: <NAME> <<EMAIL>> # check if result image file is in deploy directory # check if result image is vmdk Summary: Check for chaining many CONVERSION_CMDs together Expected: 1. core-image-minimal can be built with ext4.bmap.gz.bz2.lzo.xz.u-boot and also create a sha256sum 2. The above image has a valid sha256sum Product: oe-core Author: <NAME> <<EMAIL>> # check if resulting image is in the deploy directory # check if the resulting sha256sum agrees Summary: Check if image of supported image fstypes can be built Expected: core-image-minimal can be built for various image types Product: oe-core Author: <NAME> <<EMAIL>> # check if result image is in deploy directory USERADDEXTENSION = "useradd-staticids" USERADD_ERROR_DYNAMIC = "skip" USERADD_UID_TABLES += "files/static-passwd" USERADD_GID_TABLES += "files/static-group" # Enable x11 DISTRO_FEATURES_append = " x11" # Switch to systemd DISTRO_FEATURES += "systemd" VIRTUAL-RUNTIME_init_manager = "systemd" VIRTUAL-RUNTIME_initscripts = "" VIRTUAL-RUNTIME_syslog = "" VIRTUAL-RUNTIME_login_manager = "shadow-base" DISTRO_FEATURES_BACKFILL_CONSIDERED = "sysvinit" # Replace busybox PREFERRED_PROVIDER_virtual/base-utils = "packagegroup-core-base-utils" VIRTUAL-RUNTIME_base-utils = "packagegroup-core-base-utils" VIRTUAL-RUNTIME_base-utils-hwclock = "util-linux-hwclock" VIRTUAL-RUNTIME_base-utils-syslog = "" # Blacklist busybox PNBLACKLIST[busybox] = "Don't build this" Summary: Check debugfs generation Expected: 1. core-image-minimal can be build with IMAGE_GEN_DEBUGFS variable set 2. debug filesystem is created when variable set 3. debug symbols available Product: oe-core Author: <NAME> <<EMAIL>> <NAME> <<EMAIL>> | 2.398176 | 2 |
pyamg/aggregation/__init__.py | Alexey-Voronin/pyamg-1 | 0 | 6631688 | <reponame>Alexey-Voronin/pyamg-1<gh_stars>0
"""Aggregation-based AMG"""
from .adaptive import *
from .aggregate import *
from .aggregation import *
from .tentative import *
from .smooth import *
from .rootnode import *
__all__ = [s for s in dir() if not s.startswith('_')]
| """Aggregation-based AMG"""
from .adaptive import *
from .aggregate import *
from .aggregation import *
from .tentative import *
from .smooth import *
from .rootnode import *
__all__ = [s for s in dir() if not s.startswith('_')] | en | 0.889414 | Aggregation-based AMG | 1.314104 | 1 |
src/azure-cli/azure/cli/command_modules/appconfig/commands.py | xaliciayang/azure-cli | 4 | 6631689 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
from ._client_factory import cf_configstore, cf_configstore_operations
from ._format import (configstore_credential_format,
configstore_identity_format,
configstore_output_format,
keyvalue_entry_format,
featureflag_entry_format,
featurefilter_entry_format)
def load_command_table(self, _):
configstore_custom_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.custom#{}',
table_transformer=configstore_output_format,
client_factory=cf_configstore
)
configstore_identity_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.custom#{}',
table_transformer=configstore_identity_format,
client_factory=cf_configstore
)
configstore_credential_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.custom#{}',
table_transformer=configstore_credential_format,
client_factory=cf_configstore
)
configstore_keyvalue_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.keyvalue#{}',
table_transformer=keyvalue_entry_format,
client_factory=cf_configstore_operations
)
def get_custom_sdk(custom_module, client_factory, table_transformer):
"""Returns a CliCommandType instance with specified operation template based on the given custom module name.
This is useful when the command is not defined in the default 'custom' module but instead in a module under
'operations' package."""
return CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.{}#'.format(custom_module) + '{}',
client_factory=client_factory,
table_transformer=table_transformer
)
# Management Plane Commands
with self.command_group('appconfig', configstore_custom_util) as g:
g.command('create', 'create_configstore')
g.command('delete', 'delete_configstore')
g.command('update', 'update_configstore')
g.command('list', 'list_configstore')
g.show_command('show', 'show_configstore')
with self.command_group('appconfig credential', configstore_credential_util) as g:
g.command('list', 'list_credential')
g.command('regenerate', 'regenerate_credential')
with self.command_group('appconfig identity', configstore_identity_util, is_preview=True) as g:
g.command('assign', 'assign_managed_identity')
g.command('remove', 'remove_managed_identity')
g.show_command('show', 'show_managed_identity')
with self.command_group('appconfig revision', configstore_keyvalue_util) as g:
g.command('list', 'list_revision')
# Data Plane Commands
with self.command_group('appconfig kv', configstore_keyvalue_util) as g:
g.command('set', 'set_key')
g.command('delete', 'delete_key')
g.show_command('show', 'show_key')
g.command('list', 'list_key')
g.command('lock', 'lock_key')
g.command('unlock', 'unlock_key')
g.command('restore', 'restore_key')
g.command('import', 'import_config')
g.command('export', 'export_config')
g.command('set-keyvault', 'set_keyvault')
# FeatureManagement Commands
with self.command_group('appconfig feature',
custom_command_type=get_custom_sdk('feature',
cf_configstore_operations,
featureflag_entry_format),
is_preview=True) as g:
g.custom_command('set', 'set_feature')
g.custom_command('delete', 'delete_feature')
g.custom_show_command('show', 'show_feature')
g.custom_command('list', 'list_feature')
g.custom_command('lock', 'lock_feature')
g.custom_command('unlock', 'unlock_feature')
g.custom_command('enable', 'enable_feature')
g.custom_command('disable', 'disable_feature')
with self.command_group('appconfig feature filter',
custom_command_type=get_custom_sdk('feature',
cf_configstore_operations,
featurefilter_entry_format)) as g:
g.custom_command('add', 'add_filter')
g.custom_command('delete', 'delete_filter')
g.custom_show_command('show', 'show_filter')
g.custom_command('list', 'list_filter')
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
from ._client_factory import cf_configstore, cf_configstore_operations
from ._format import (configstore_credential_format,
configstore_identity_format,
configstore_output_format,
keyvalue_entry_format,
featureflag_entry_format,
featurefilter_entry_format)
def load_command_table(self, _):
configstore_custom_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.custom#{}',
table_transformer=configstore_output_format,
client_factory=cf_configstore
)
configstore_identity_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.custom#{}',
table_transformer=configstore_identity_format,
client_factory=cf_configstore
)
configstore_credential_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.custom#{}',
table_transformer=configstore_credential_format,
client_factory=cf_configstore
)
configstore_keyvalue_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.keyvalue#{}',
table_transformer=keyvalue_entry_format,
client_factory=cf_configstore_operations
)
def get_custom_sdk(custom_module, client_factory, table_transformer):
"""Returns a CliCommandType instance with specified operation template based on the given custom module name.
This is useful when the command is not defined in the default 'custom' module but instead in a module under
'operations' package."""
return CliCommandType(
operations_tmpl='azure.cli.command_modules.appconfig.{}#'.format(custom_module) + '{}',
client_factory=client_factory,
table_transformer=table_transformer
)
# Management Plane Commands
with self.command_group('appconfig', configstore_custom_util) as g:
g.command('create', 'create_configstore')
g.command('delete', 'delete_configstore')
g.command('update', 'update_configstore')
g.command('list', 'list_configstore')
g.show_command('show', 'show_configstore')
with self.command_group('appconfig credential', configstore_credential_util) as g:
g.command('list', 'list_credential')
g.command('regenerate', 'regenerate_credential')
with self.command_group('appconfig identity', configstore_identity_util, is_preview=True) as g:
g.command('assign', 'assign_managed_identity')
g.command('remove', 'remove_managed_identity')
g.show_command('show', 'show_managed_identity')
with self.command_group('appconfig revision', configstore_keyvalue_util) as g:
g.command('list', 'list_revision')
# Data Plane Commands
with self.command_group('appconfig kv', configstore_keyvalue_util) as g:
g.command('set', 'set_key')
g.command('delete', 'delete_key')
g.show_command('show', 'show_key')
g.command('list', 'list_key')
g.command('lock', 'lock_key')
g.command('unlock', 'unlock_key')
g.command('restore', 'restore_key')
g.command('import', 'import_config')
g.command('export', 'export_config')
g.command('set-keyvault', 'set_keyvault')
# FeatureManagement Commands
with self.command_group('appconfig feature',
custom_command_type=get_custom_sdk('feature',
cf_configstore_operations,
featureflag_entry_format),
is_preview=True) as g:
g.custom_command('set', 'set_feature')
g.custom_command('delete', 'delete_feature')
g.custom_show_command('show', 'show_feature')
g.custom_command('list', 'list_feature')
g.custom_command('lock', 'lock_feature')
g.custom_command('unlock', 'unlock_feature')
g.custom_command('enable', 'enable_feature')
g.custom_command('disable', 'disable_feature')
with self.command_group('appconfig feature filter',
custom_command_type=get_custom_sdk('feature',
cf_configstore_operations,
featurefilter_entry_format)) as g:
g.custom_command('add', 'add_filter')
g.custom_command('delete', 'delete_filter')
g.custom_show_command('show', 'show_filter')
g.custom_command('list', 'list_filter')
| en | 0.467525 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- #{}', #{}', #{}', #{}', Returns a CliCommandType instance with specified operation template based on the given custom module name. This is useful when the command is not defined in the default 'custom' module but instead in a module under 'operations' package. #'.format(custom_module) + '{}', # Management Plane Commands # Data Plane Commands # FeatureManagement Commands | 1.729493 | 2 |
nw_products/app/api.py | mateifl/microdjango | 0 | 6631690 | <filename>nw_products/app/api.py
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS
from tastypie import fields
from tastypie.authorization import Authorization
from app.models import Product, Category, Supplier
class CategoryResource(ModelResource):
class Meta:
queryset = Category.objects.all()
resource_name = "category"
filtering = {"category_name": ('exact', 'startswith',)}
class ProductResource(ModelResource):
category = fields.ForeignKey(CategoryResource, "category")
class Meta:
queryset = Product.objects.all()
resource_name = "product"
filtering = {"product_name": ('exact', 'startswith',), "category": ALL_WITH_RELATIONS}
authorization = Authorization()
class SupplierResource(ModelResource):
class Meta:
queryset = Supplier.objects.all()
resource_name = "supplier"
| <filename>nw_products/app/api.py
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS
from tastypie import fields
from tastypie.authorization import Authorization
from app.models import Product, Category, Supplier
class CategoryResource(ModelResource):
class Meta:
queryset = Category.objects.all()
resource_name = "category"
filtering = {"category_name": ('exact', 'startswith',)}
class ProductResource(ModelResource):
category = fields.ForeignKey(CategoryResource, "category")
class Meta:
queryset = Product.objects.all()
resource_name = "product"
filtering = {"product_name": ('exact', 'startswith',), "category": ALL_WITH_RELATIONS}
authorization = Authorization()
class SupplierResource(ModelResource):
class Meta:
queryset = Supplier.objects.all()
resource_name = "supplier"
| none | 1 | 2.153564 | 2 |
|
indel_prediction/model_testing/test_model.py | kaskamal/SelfTarget | 20 | 6631691 | import pandas as pd
import numpy as np
import random
from mpi4py import MPI
import io, os, sys, csv, time
from multiprocessing import Process, Pipe
from scipy.stats import pearsonr, spearmanr
from sklearn.model_selection import KFold
from sklearn import metrics
from selftarget.data import getSampleSelectors, getAllDataDirs
from selftarget.oligo import loadOldNewMapping, partitionGuides, getFileForOligoIdx, getOligoIdxFromId
from selftarget.profile import getProfileCounts
from predictor.model import writeTheta, readTheta, printAndFlush, trainModelParallel, testModelParallel, recordPredictions
comm = MPI.COMM_WORLD
mpi_rank = comm.Get_rank()
mpi_size = comm.Get_size()
NUM_OLIGO = -1
FOLD = 2
OUT_PREFIX = 'model_output'
def getModelDevGuideSet(guideset_file):
f = io.open(guideset_file)
guideset = [line[:-1] for line in f]
f.cloes()
return np.array(guideset)
def loadFeatureLabels(oligo_id):
data = loadOligoFeaturesAndReadCounts(oligo_id, [], FEATURES_DIR)
return [x for x in data.columns if x not in ['Oligo ID','Indel','Frac Sample Reads','Left','Right','Inserted Seq']]
def runAnalysis(guideset_file = 'model_development_guideset.txt'):
guideset = getFullModelDevGuideSet(guideset_file)
sample_names = ['ST_Feb_2018_CAS9_12NA_1600X_DPI7', 'ST_June_2017_K562_800x_LV7A_DPI7', 'ST_June_2017_K562_800x_LV7B_DPI7']
feature_columns= loadFeatureLabels([x for x in guideset][0])
if NUM_OLIGO != -1:
guideset = random.sample([x for x in guideset],NUM_OLIGO)
kf = KFold(n_splits=2)
for i,(train_idx, test_idx) in enumerate(kf.split(guideset)):
printAndFlush('Cross Validation Fold %d' % (i+1))
train_set, test_set = np.array(guideset)[train_idx], np.array(guideset)[test_idx]
outfile = OUT_THETA_FILE + '_cf%d.txt' % i
theta0 = None
tmp_file = 'tmp_%s_%d.txt' % (OUT_THETA_FILE, i)
if os.path.isfile(tmp_file):
printAndFlush('Loading from previous tmp file')
theta0, rec_train_set, feature_columns = readTheta(tmp_file)
test_set = [x for x in ([y for y in train_set] + [y for y in test_set]) if x not in rec_train_set][:int(NUM_OLIGO/2)]
train_set = rec_train_set
printAndFlush('Training')
theta = trainModelParallel(train_set, sample_names, feature_columns, theta0, cv_idx=i)
testModelParallel( theta, train_set, sample_names, feature_columns ) #Check final training result with lambda=0
writeTheta(OUT_THETA_FILE + '_cf%d.txt' % i, feature_columns, theta, train_set)
recordPredictions(OUT_PROFILE_DIR + '_train_%d' % i, theta, train_set, feature_columns )
printAndFlush('Testing')
testModelParallel( theta, test_set, sample_names, feature_columns )
recordPredictions(OUT_PROFILE_DIR + '_test_%d' % i, theta, test_set, feature_columns )
if __name__ == '__main__':
if len(sys.argv) > 1: NUM_OLIGO = eval(sys.argv[1])
if len(sys.argv) > 3: REG_CONST = eval(sys.argv[3])
if len(sys.argv) > 4: OUT_PREFIX = sys.argv[4]
else:
rand_val = np.random.normal(loc=0.0, scale=1.0)
rand_val = comm.bcast(rand_val, root=0)
OUT_PREFIX = 'model_output_%d_%.8f_%.3f' % (NUM_OLIGO, REG_CONST, rand_val )
OUT_PROFILE_DIR = OUT_PREFIX + '_predictions'
OUT_THETA_FILE = OUT_PREFIX + '_theta.txt'
runAnalysis()
| import pandas as pd
import numpy as np
import random
from mpi4py import MPI
import io, os, sys, csv, time
from multiprocessing import Process, Pipe
from scipy.stats import pearsonr, spearmanr
from sklearn.model_selection import KFold
from sklearn import metrics
from selftarget.data import getSampleSelectors, getAllDataDirs
from selftarget.oligo import loadOldNewMapping, partitionGuides, getFileForOligoIdx, getOligoIdxFromId
from selftarget.profile import getProfileCounts
from predictor.model import writeTheta, readTheta, printAndFlush, trainModelParallel, testModelParallel, recordPredictions
comm = MPI.COMM_WORLD
mpi_rank = comm.Get_rank()
mpi_size = comm.Get_size()
NUM_OLIGO = -1
FOLD = 2
OUT_PREFIX = 'model_output'
def getModelDevGuideSet(guideset_file):
f = io.open(guideset_file)
guideset = [line[:-1] for line in f]
f.cloes()
return np.array(guideset)
def loadFeatureLabels(oligo_id):
data = loadOligoFeaturesAndReadCounts(oligo_id, [], FEATURES_DIR)
return [x for x in data.columns if x not in ['Oligo ID','Indel','Frac Sample Reads','Left','Right','Inserted Seq']]
def runAnalysis(guideset_file = 'model_development_guideset.txt'):
guideset = getFullModelDevGuideSet(guideset_file)
sample_names = ['ST_Feb_2018_CAS9_12NA_1600X_DPI7', 'ST_June_2017_K562_800x_LV7A_DPI7', 'ST_June_2017_K562_800x_LV7B_DPI7']
feature_columns= loadFeatureLabels([x for x in guideset][0])
if NUM_OLIGO != -1:
guideset = random.sample([x for x in guideset],NUM_OLIGO)
kf = KFold(n_splits=2)
for i,(train_idx, test_idx) in enumerate(kf.split(guideset)):
printAndFlush('Cross Validation Fold %d' % (i+1))
train_set, test_set = np.array(guideset)[train_idx], np.array(guideset)[test_idx]
outfile = OUT_THETA_FILE + '_cf%d.txt' % i
theta0 = None
tmp_file = 'tmp_%s_%d.txt' % (OUT_THETA_FILE, i)
if os.path.isfile(tmp_file):
printAndFlush('Loading from previous tmp file')
theta0, rec_train_set, feature_columns = readTheta(tmp_file)
test_set = [x for x in ([y for y in train_set] + [y for y in test_set]) if x not in rec_train_set][:int(NUM_OLIGO/2)]
train_set = rec_train_set
printAndFlush('Training')
theta = trainModelParallel(train_set, sample_names, feature_columns, theta0, cv_idx=i)
testModelParallel( theta, train_set, sample_names, feature_columns ) #Check final training result with lambda=0
writeTheta(OUT_THETA_FILE + '_cf%d.txt' % i, feature_columns, theta, train_set)
recordPredictions(OUT_PROFILE_DIR + '_train_%d' % i, theta, train_set, feature_columns )
printAndFlush('Testing')
testModelParallel( theta, test_set, sample_names, feature_columns )
recordPredictions(OUT_PROFILE_DIR + '_test_%d' % i, theta, test_set, feature_columns )
if __name__ == '__main__':
if len(sys.argv) > 1: NUM_OLIGO = eval(sys.argv[1])
if len(sys.argv) > 3: REG_CONST = eval(sys.argv[3])
if len(sys.argv) > 4: OUT_PREFIX = sys.argv[4]
else:
rand_val = np.random.normal(loc=0.0, scale=1.0)
rand_val = comm.bcast(rand_val, root=0)
OUT_PREFIX = 'model_output_%d_%.8f_%.3f' % (NUM_OLIGO, REG_CONST, rand_val )
OUT_PROFILE_DIR = OUT_PREFIX + '_predictions'
OUT_THETA_FILE = OUT_PREFIX + '_theta.txt'
runAnalysis()
| en | 0.876216 | #Check final training result with lambda=0 | 1.932912 | 2 |
quaternion_calc/__init__.py | dhaystead/Quaternion | 0 | 6631692 | import quaternion_calc.util
from quaternion_calc.quat import Quaternion
| import quaternion_calc.util
from quaternion_calc.quat import Quaternion
| none | 1 | 1.195639 | 1 |
|
MeshToolkit/File/Obj.py | microy/MeshToolkit | 4 | 6631693 | <reponame>microy/MeshToolkit
# -*- coding:utf-8 -*-
#
# Import OBJ files
#
# External dependencies
import os
import numpy as np
import MeshToolkit as mtk
# Import a mesh from a OBJ / SMF file
def ReadObj( filename ) :
# Initialisation
vertices = []
faces = []
normals = []
colors = []
texcoords = []
material = ""
# Read each line in the file
for line in open( filename, "r" ) :
# Empty line / Comment
if line.isspace() or line.startswith( '#' ) : continue
# Split values in the line
values = line.split()
# Vertex
if values[0] == 'v' :
vertices.append( list( map( float, values[1:4] ) ) )
# Face (index starts at 1)
elif values[0] == 'f' :
faces.append( list( map( int, [ (v.split('/'))[0] for v in values[1:4] ] ) ) )
# Normal
elif values[0] == 'vn' :
normals.append( list( map( float, values[1:4] ) ) )
# Color
elif values[0] == 'c' :
colors.append( list( map( float, values[1:4] ) ) )
# Texture
elif values[0] == 'vt' :
texcoords.append( list( map( float, values[1:3] ) ) )
# Texture filename
elif values[0] == 'mtllib' :
material = values[1]
# Remap face indices
faces = np.array( faces ) - 1
# Return the final mesh
return mtk.Mesh( os.path.splitext(os.path.basename(filename))[0], vertices, faces, colors, material, texcoords, [], normals )
| # -*- coding:utf-8 -*-
#
# Import OBJ files
#
# External dependencies
import os
import numpy as np
import MeshToolkit as mtk
# Import a mesh from a OBJ / SMF file
def ReadObj( filename ) :
# Initialisation
vertices = []
faces = []
normals = []
colors = []
texcoords = []
material = ""
# Read each line in the file
for line in open( filename, "r" ) :
# Empty line / Comment
if line.isspace() or line.startswith( '#' ) : continue
# Split values in the line
values = line.split()
# Vertex
if values[0] == 'v' :
vertices.append( list( map( float, values[1:4] ) ) )
# Face (index starts at 1)
elif values[0] == 'f' :
faces.append( list( map( int, [ (v.split('/'))[0] for v in values[1:4] ] ) ) )
# Normal
elif values[0] == 'vn' :
normals.append( list( map( float, values[1:4] ) ) )
# Color
elif values[0] == 'c' :
colors.append( list( map( float, values[1:4] ) ) )
# Texture
elif values[0] == 'vt' :
texcoords.append( list( map( float, values[1:3] ) ) )
# Texture filename
elif values[0] == 'mtllib' :
material = values[1]
# Remap face indices
faces = np.array( faces ) - 1
# Return the final mesh
return mtk.Mesh( os.path.splitext(os.path.basename(filename))[0], vertices, faces, colors, material, texcoords, [], normals ) | en | 0.636067 | # -*- coding:utf-8 -*- # # Import OBJ files # # External dependencies # Import a mesh from a OBJ / SMF file # Initialisation # Read each line in the file # Empty line / Comment # Split values in the line # Vertex # Face (index starts at 1) # Normal # Color # Texture # Texture filename # Remap face indices # Return the final mesh | 2.721408 | 3 |
pywingui/wtl_core.py | Answeror/lit | 0 | 6631694 | ## Copyright (c) 2003 <NAME>
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from .windows import *
from ctypes import *
from .winuser import MAKEINTRESOURCE, LoadIcon, LoadCursor
import sys
import weakref
quit = False
class HandleMap(dict):
"""a special weakreference map for mapping window handles to python instances
when a python instance becomes garbage, the __dispose__ method of HandleMap
is called, deleting the handle from the map and freeing OS resources by calling
the method stored in the __dispose__ variable of the garbage python instance.
This latter method should be bound to a windows-free-routine corresponding to the
type of the handle"""
def freeze_method(self, fndisp, dbgstr):
def freeze_func(wref = 0, fndisp = fndisp, dbgstr = dbgstr):
self.__dispose__(handle, wref, fndisp, dbgstr)
return freeze_func
def __setitem__(self, handle, value):
# watch the lambda closure, freezing the binding of:
# - fndisp to the __dispose__ variable of the value object
# - handle to the provided windows-handle in the first actual parameter
lmdisp = lambda wr, fndisp = value.__dispose__, dbgstr = str(value.__class__): self.__dispose__(handle, wr, fndisp, dbgstr)
#lmdisp = self.freeze_method(value.__dispose__, str(value.__class__))
dict.__setitem__(self, handle, weakref.ref(value, lmdisp))
def __getitem__(self, handle):
return dict.__getitem__(self, handle)() # weak refs are 'called' to return the referred object
def get(self, k, d = None):
#~ if self.has_key(k):
if k in self:
return self[k]
else:
return d
def __dispose__(self, handle, wr, fndisp, dbgstr): # callback of weakref wr, called when wr() is garbage
global quit
self.__delitem__(handle)
if not quit:
try:
fndisp(handle)
except:
print(('ERROR HandleMap %d, %s, %s, %s' % (handle, repr(wr), repr(fndisp), dbgstr)))
hndlMap = HandleMap() #contains the mapping from python instances (of Window) to windows HANDLES
createHndlMap = {} #used while handling messages during CreateWindow(Ex)
def globalWndProc(hWnd, nMsg, wParam, lParam):
"""The purpose of globalWndProc is route messages coming in on the global queue
to the appropriate python Window instance for handling.
Also it establishes the mapping from python instances to window HANDLES by processing the WM_NCCREATE message
"""
try:
if nMsg == WM_NCCREATE:
#a venster window is being creaated trough CreateWindowEx,
#establish the mapping between the windows HANDLE and the Window instance
#the window instance is to be found in the createHndlMap by looking up
#the key that was given as a parameter to CreateWindowEx
#~ print createHndlMap
createStruct = CREATESTRUCT.from_address(int(lParam))
#~ window = createHndlMap.get(int(createStruct.lpCreateParams), None)
window = createHndlMap.get(createStruct.lpCreateParams, None)
#~ window = createHndlMap.get(cast(lParam, POINTER(CREATESTRUCT)).lpCreateParams, None)
if window:
#it is a venster window being created, establish the mapping:
WindowsObject.__init__(window, hWnd)
handled = False
result = None
window = hndlMap.get(hWnd, None)
if window:
#this is a known venster window, let the window process its own msgs
handled, result = window.WndProc(hWnd, nMsg, wParam, lParam)
if not handled and window._issubclassed_:
#its a subclassed window, try old window proc
result = CallWindowProc(window._old_wnd_proc_, hWnd, nMsg, wParam, lParam)
handled = True #always handled, either by old window proc, or old window proc called default handling
if not handled:
#still not handled, perform default processing
try:
return DefWindowProc(hWnd, nMsg, wParam, lParam) #windows default processing
except:
print(('ERROR perform default processing: DefWindowProc(%d, %d, %d, %d)' % (hWnd, nMsg, wParam, lParam)))
else:
return result
except:
try:
import traceback
traceback.print_exc()
except:
pass #this happens when the python runtime is already exitting, but we are still registered
#as a window proc and windows keeps calling the callback
cGlobalWndProc = WNDPROC(globalWndProc)
def handle(obj):
if not obj:
return NULL
elif hasattr(obj, 'handle'):
return obj.handle
else:
return obj
def instanceFromHandle(handle):
return hndlMap.get(handle, None)
def instanceOrHandle(handle):
return hndlMap.get(handle, handle)
def windowFromHandle(handle):
"""returns None if handle = 0, the python Window instance if
handle is known, or a new pseudo window if handle != 0 and not known"""
if not handle:
return None
window = hndlMap.get(handle, None)
if not window:
window = Window(hWnd = handle)
return window
class WindowsObject(object):
m_handle = 0
def __init__(self, handle, managed = True):
"""managed objects are stored in a global map so that they can
be looked up by their handle, also this allows for calling the
appropriate destructor function (__dispose__) whenever the object
becomes garbage"""
self.m_handle = handle
if managed:
hndlMap[handle] = self
handle = property(lambda self: self.m_handle)
def __str__(self):
return '<%s handle: %d>' % (self.__class__.__name__, self.handle)
def __equals__(self, other):
return self.handle == other.handle
class Event(object):
def __init__(self, hWnd, nMsg, wParam, lParam):
self.hWnd = hWnd
self.nMsg = nMsg
self.lParam = lParam
self.wParam = wParam
self.handled = 0
def structure(self, nmStructure):
return nmStructure.from_address(int(self.lParam))
def __str__(self):
return "<event hWnd: %d, nMsg: %d, lParam: %d, wParam: %d>" % (self.hWnd, self.nMsg, self.lParam, self.wParam)
class MSG_MAP(list):
def __init__(self, entries):
list.__init__(self, entries)
self._msg_map_ = {}
for entry in self:
self.append(entry)
def append(self, entry):
entry.__install__(self)
def Handle(self, receiver, hWnd, nMsg, wParam, lParam, clazz):
handler = self._msg_map_.get(nMsg, None)
if handler:
event = Event(hWnd, nMsg, wParam, lParam)
event.handled = True #the presence of a handler means that by default we assume the event to be handled
#if the handler wants to force further processing by parent class map
#the handler will set event.handled to False
result = handler(receiver, event)
if event.handled:
if result == None:
return (True, NULL)
else:
return (True, int(result))
return (False, NULL)
def HandleBaseClasses(self, receiver, hWnd, nMsg, wParam, lParam, clazz):
for baseClass in clazz.__bases__:
if issubclass(baseClass, Window):
handled, result = baseClass._msg_map_.Dispatch(receiver, hWnd, nMsg, wParam, lParam, baseClass)
if handled:
return (True, result)
return (False, NULL)
def Dispatch(self, receiver, hWnd, nMsg, wParam, lParam, clazz = None):
clazz = clazz or receiver.__class__
handled, result = self.Handle(receiver, hWnd, nMsg, wParam, lParam, clazz)
if handled:
return (True, result)
handled, result = self.HandleBaseClasses(receiver, hWnd, nMsg, wParam, lParam, clazz)
if handled:
return (True, result)
#nobody handled msg
return (False, NULL)
def DispatchMSG(self, receiver, msg):
return self.Dispatch(receiver, msg.hWnd, msg.message, msg.wParam, msg.lParam)
def __str__(self):
return str(self._msg_map_)
class WindowType(type):
def __init__(cls, name, bases, dct):
#make sure every window class has its own msg map
#~ if not dct.has_key('_msg_map_'):
if not '_msg_map_' in dct:
cls._msg_map_ = MSG_MAP([])
super(WindowType, cls).__init__(name, bases, dct)
#see if decorators were used to map events to handlers,
#and install the handlers in the msgmap
for item in list(dct.values()):
if hasattr(item, 'handler'):
cls._msg_map_.append(item.handler)
hInstance = GetModuleHandle(None)
wndClasses = []
RCDEFAULT = RECT(top = CW_USEDEFAULT, left = CW_USEDEFAULT, right = 0, bottom = 0)
class Window(WindowsObject, metaclass=WindowType):
_window_class_ = ''
_window_title_ = ''
_window_style_ = WS_OVERLAPPEDWINDOW | WS_VISIBLE
_window_style_ex_ = 0
_window_icon_ = LoadIcon(NULL, MAKEINTRESOURCE(IDI_APPLICATION))
_window_icon_sm_ = LoadIcon(NULL, MAKEINTRESOURCE(IDI_APPLICATION))
_window_background_ = 0
_window_class_style_ = 0
_window_style_clip_children_and_siblings_ = True
_window_dbg_msg_ = False
_window_width_ = CW_USEDEFAULT
_window_height_ = CW_USEDEFAULT
__dispose__ = DestroyWindow
def __init__(self, title = '', style = None, exStyle = None, parent = None, menu = None, rcPos = RCDEFAULT, orStyle = None, orExStyle = None, nandStyle = None, nandExStyle = None, width = CW_USEDEFAULT, height = CW_USEDEFAULT, hWnd = None):
if hWnd: #wrapping instead of creating
self.m_handle = hWnd #note client is responsible for deleting
return
windowClassExists = False
cls = WNDCLASSEX()
if self._window_class_:
if GetClassInfo(hInstance, self._window_class_, byref(cls)):
windowClassExists = True
#determine whether we are going to subclass an existing window class
#or create a new windowclass
self._issubclassed_ = self._window_class_ and windowClassExists
atom = 0
if not self._issubclassed_:
#if no _window_class_ is given, generate a new one
className = self._window_class_ or "venster_wtl_%d" % id(self.__class__)
cls = WNDCLASSEX()
cls.cbSize = sizeof(cls)
cls.lpszClassName = className
cls.hInstance = hInstance
cls.lpfnWndProc = cGlobalWndProc
cls.style = self._window_class_style_
cls.hbrBackground = self._window_background_
cls.hIcon = handle(self._window_icon_)
cls.hIconSm = handle(self._window_icon_sm_)
cls.hCursor = LoadCursor(NULL, MAKEINTRESOURCE(IDC_ARROW))
#cls structure needs to stay on heap
wndClasses.append(cls)
atom = RegisterClassEx(pointer(cls))
#~ print('atom %d' % atom)
else:
#subclass existing window class.
className = self._window_class_
title = title or self._window_title_
if style is None:
style = self._window_style_
if exStyle is None:
exStyle = self._window_style_ex_
if orStyle:
style |= orStyle
if orExStyle:
exStyle |= orExStyle
if self._window_style_clip_children_and_siblings_:
style |= WS_CLIPCHILDREN
style |= WS_CLIPSIBLINGS
if nandStyle:
style &= ~nandStyle
left, right = rcPos.left, rcPos.right
top, bottom = rcPos.top, rcPos.bottom
if width == CW_USEDEFAULT:
width = self._window_width_
if left == CW_USEDEFAULT and width != CW_USEDEFAULT:
right = CW_USEDEFAULT + width
if height == CW_USEDEFAULT:
height = self._window_height_
if top == CW_USEDEFAULT and height != CW_USEDEFAULT:
bottom = CW_USEDEFAULT + height
#for normal windows created trough venster, the mapping between window handle
#and window instance will be established by processing the WM_NCCREATE msg
#and looking up the instance in the createhndlMap
createHndlMap[id(self)] = self
wm_create_param = id(self)
if className == 'msctls_trackbar32':
wm_create_param = 0
hWnd = 0
if atom:
hWnd = CreateWindowEx_atom(exStyle, atom, title, style, left, top, right - left, bottom - top, handle(parent), handle(menu), hInstance, wm_create_param)
else:
hWnd = CreateWindowEx(exStyle, className, title, style, left, top, right - left, bottom - top, handle(parent), handle(menu), hInstance, wm_create_param)
del createHndlMap[id(self)]
if self._issubclassed_:
#for subclassed windows, we establish the instance <-> handle mapping here
WindowsObject.__init__(self, hWnd)
self._old_wnd_proc_ = self.SubClass(cGlobalWndProc)
def SubClass(self, newWndProc):
return SetWindowLong(self.handle, GWL_WNDPROC, newWndProc)
class Interceptor(object):
def __init__(self, receiver, window, msg_map, nMsg = [WM_NOTIFY]):
self.nMsg = dict([(x, 1) for x in nMsg])
self.newProc = WNDPROC(self.WndProc)
if window:
self.oldProc = window.SubClass(self.newProc)
self._msg_map_ = msg_map
self.receiver = receiver
def dispose(self):
self.WndProc = lambda self, hWnd, nMsg, wParam, lParam: 0
del self.receiver
del self._msg_map_
del self.newProc
def WndProc(self, hWnd, nMsg, wParam, lParam):
if nMsg in self.nMsg and hasattr(self, 'receiver'):
handled, res = self._msg_map_.Dispatch(self.receiver, hWnd, nMsg, wParam, lParam)
else:
handled = 0
if not handled:
return CallWindowProc(self.oldProc, hWnd, nMsg, wParam, lParam)
else:
return res
def Intercept(self, receiver, msgMap, nMsg = [WM_NOTIFY]):
return Window.Interceptor(self, receiver, msgMap, nMsg = nMsg)
def InterceptParent(self, nMsg = [WM_NOTIFY]):
"""intercepts msg proc in order to reroute msgs to self"""
self._interceptParent = self.Intercept(self.GetParent(), self._msg_map_, nMsg = nMsg)
def dispose(self):
if hasattr(self, '_interceptParent'):
self._interceptParent.dispose()
del self._interceptParent
def WndProc(self, hWnd, nMsg, wParam, lParam):
if self._window_dbg_msg_:
print(('%s, %d, %d, %d, %d' % (repr(self), hWnd, nMsg, wParam, lParam)))
return self._msg_map_.Dispatch(self, hWnd, nMsg, wParam, lParam)
def IsDialogMessage(self, lpmsg):
return IsDialogMessage(self.handle, lpmsg)
def PreTranslateMessage(self, msg):
return 0
def TranslateAccelerator(self, msg):
return 0
def __repr__(self):
return '<Window hWnd: %d>' % self.handle
#this is the base class for all handlers defined in msg maps
class HANDLER(object):
#the handler is given in the msg map as a unbound (static) method
#on some class X, to enable a derived class to override a handler method
#of a parent class Y, a lambda trick is needed to pick the correct handler
#(that of the base class)
def __init__(self, handler):
#TODO how to determine if handler is a lambda or a named function without
#looking at '__name__'?:
if not handler:
self.m_handler = None
elif handler.__name__ == '<lambda>':
self.m_handler = handler
else:#trick to make handler 'virtual' again
self.m_handler = lambda self, event: getattr(self, handler.__name__)(event)
def __call__(self, receiver, event):
return self.handler(receiver, event)
#handler = property(lambda self: self.m_handler)
def getHandler(self):
return self.m_handler
def setHandler(self, value):
self.m_handler = value
handler = property(getHandler, setHandler)
#Handler for normal window messages (e.g. WM_SIZE, WM_CLOSE, WM_PAINT etc)
class MSG_HANDLER(HANDLER):
def __init__(self, msg, handler):
HANDLER.__init__(self, handler)
self.msg = msg
def __install__(self, msgMap):
msgMap._msg_map_[self.msg] = self
class NTF_MAP(dict):
def __call__(self, receiver, event):
nmhdr = NMHDR.from_address(int(event.lParam))
handler = self.get(str(nmhdr.code), None)
if handler:
event.nmhdr = nmhdr
return handler(receiver, event)
else:
event.handled = 0
return 0
#handler for notification messages
#handles all notifications with the given code
class NTF_HANDLER(HANDLER):
def __init__(self, code, handler):
HANDLER.__init__(self, handler)
self.code = code
def __install__(self, msgMap):
notifMap = msgMap._msg_map_.setdefault(WM_NOTIFY, NTF_MAP())
notifMap[str(self.code)] = self
#support for WM_COMMAND msgs
#cmd is a map from id -> [(code, handler), ...]
#first handler in the list that matches the code is fired
#if code == -1, than handler is fired for any code
class CMD_MAP(dict):
def __call__(self, receiver, event):
code = HIWORD(event.wParam)
id = LOWORD(event.wParam)
for handlerCode, handler in self.get(id, []):
if handlerCode == -1 or handlerCode == code:
event.id = id
event.code = code
return handler(receiver, event)
#not handled
event.handled = 0
return 0
#maps command message based on control id AND notification code
class CMD_HANDLER(HANDLER):
def __init__(self, id, code, handler):
HANDLER.__init__(self, handler)
self.id, self.code = id, code
def __install__(self, msgMap):
cmdMap = msgMap._msg_map_.setdefault(WM_COMMAND, CMD_MAP())
notifList = cmdMap.setdefault(self.id, [])
notifList.append((self.code, self))
#maps command message based on control id
class CMD_ID_HANDLER(HANDLER):
def __init__(self, id, handler):
HANDLER.__init__(self, handler)
self.id = id
def __install__(self, msgMap):
cmdMap = msgMap._msg_map_.setdefault(WM_COMMAND, CMD_MAP())
notifList = cmdMap.setdefault(self.id, [])
notifList.append((-1, self))
#deprecated, will be removed before 1.0
class CHAIN_MSG_MAP (object):
def __init__(self, msgMap): pass
def __install__(self, msgMap): pass
#decorator versions of the above
def msg_handler(msg):
def decorator_func(func):
func.handler = MSG_HANDLER(msg, func)
return func
return decorator_func
def cmd_handler(id, code = None):
def decorator_func(func):
if code:
func.handler = CMD_HANDLER(id, code, func)
else:
func.handler = CMD_ID_HANDLER(id, func)
return func
return decorator_func
def ntf_handler(code):
def decorator_func(func):
func.handler = NTF_HANDLER(code, func)
return func
return decorator_func
#TODO allow the addition of more specific filters
#TODO make filters weak so that remove filter is not needed
class MessageLoop:
def __init__(self):
self.m_filters = {}
def AddFilter(self, filterFunc):
self.m_filters[filterFunc] = 1
def RemoveFilter(self, filterFunc):
del self.m_filters[filterFunc]
def Run(self):
msg = MSG()
lpmsg = byref(msg)
while GetMessage(lpmsg, 0, 0, 0):
if not self.PreTranslateMessage(msg):
TranslateMessage(lpmsg)
DispatchMessage(lpmsg)
global quit
quit = True
def PreTranslateMessage(self, msg):
for filter in list(self.m_filters.keys()):
if list(filter(msg)):
return 1
return 0
theMessageLoop = MessageLoop()
def GetMessageLoop():
return theMessageLoop
def Run():
theMessageLoop.Run()
class Application(object):
def Run(self):
return Run()
def Quit(self, nExitCode = 0):
"""quits the application by posting the WM_QUIT message with the given
exitCode"""
PostQuitMessage(nExitCode)
def Exit(self, nExitCode = 0):
self.Quit(nExitCode)
| ## Copyright (c) 2003 <NAME>
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from .windows import *
from ctypes import *
from .winuser import MAKEINTRESOURCE, LoadIcon, LoadCursor
import sys
import weakref
quit = False
class HandleMap(dict):
"""a special weakreference map for mapping window handles to python instances
when a python instance becomes garbage, the __dispose__ method of HandleMap
is called, deleting the handle from the map and freeing OS resources by calling
the method stored in the __dispose__ variable of the garbage python instance.
This latter method should be bound to a windows-free-routine corresponding to the
type of the handle"""
def freeze_method(self, fndisp, dbgstr):
def freeze_func(wref = 0, fndisp = fndisp, dbgstr = dbgstr):
self.__dispose__(handle, wref, fndisp, dbgstr)
return freeze_func
def __setitem__(self, handle, value):
# watch the lambda closure, freezing the binding of:
# - fndisp to the __dispose__ variable of the value object
# - handle to the provided windows-handle in the first actual parameter
lmdisp = lambda wr, fndisp = value.__dispose__, dbgstr = str(value.__class__): self.__dispose__(handle, wr, fndisp, dbgstr)
#lmdisp = self.freeze_method(value.__dispose__, str(value.__class__))
dict.__setitem__(self, handle, weakref.ref(value, lmdisp))
def __getitem__(self, handle):
return dict.__getitem__(self, handle)() # weak refs are 'called' to return the referred object
def get(self, k, d = None):
#~ if self.has_key(k):
if k in self:
return self[k]
else:
return d
def __dispose__(self, handle, wr, fndisp, dbgstr): # callback of weakref wr, called when wr() is garbage
global quit
self.__delitem__(handle)
if not quit:
try:
fndisp(handle)
except:
print(('ERROR HandleMap %d, %s, %s, %s' % (handle, repr(wr), repr(fndisp), dbgstr)))
hndlMap = HandleMap() #contains the mapping from python instances (of Window) to windows HANDLES
createHndlMap = {} #used while handling messages during CreateWindow(Ex)
def globalWndProc(hWnd, nMsg, wParam, lParam):
"""The purpose of globalWndProc is route messages coming in on the global queue
to the appropriate python Window instance for handling.
Also it establishes the mapping from python instances to window HANDLES by processing the WM_NCCREATE message
"""
try:
if nMsg == WM_NCCREATE:
#a venster window is being creaated trough CreateWindowEx,
#establish the mapping between the windows HANDLE and the Window instance
#the window instance is to be found in the createHndlMap by looking up
#the key that was given as a parameter to CreateWindowEx
#~ print createHndlMap
createStruct = CREATESTRUCT.from_address(int(lParam))
#~ window = createHndlMap.get(int(createStruct.lpCreateParams), None)
window = createHndlMap.get(createStruct.lpCreateParams, None)
#~ window = createHndlMap.get(cast(lParam, POINTER(CREATESTRUCT)).lpCreateParams, None)
if window:
#it is a venster window being created, establish the mapping:
WindowsObject.__init__(window, hWnd)
handled = False
result = None
window = hndlMap.get(hWnd, None)
if window:
#this is a known venster window, let the window process its own msgs
handled, result = window.WndProc(hWnd, nMsg, wParam, lParam)
if not handled and window._issubclassed_:
#its a subclassed window, try old window proc
result = CallWindowProc(window._old_wnd_proc_, hWnd, nMsg, wParam, lParam)
handled = True #always handled, either by old window proc, or old window proc called default handling
if not handled:
#still not handled, perform default processing
try:
return DefWindowProc(hWnd, nMsg, wParam, lParam) #windows default processing
except:
print(('ERROR perform default processing: DefWindowProc(%d, %d, %d, %d)' % (hWnd, nMsg, wParam, lParam)))
else:
return result
except:
try:
import traceback
traceback.print_exc()
except:
pass #this happens when the python runtime is already exitting, but we are still registered
#as a window proc and windows keeps calling the callback
cGlobalWndProc = WNDPROC(globalWndProc)
def handle(obj):
if not obj:
return NULL
elif hasattr(obj, 'handle'):
return obj.handle
else:
return obj
def instanceFromHandle(handle):
return hndlMap.get(handle, None)
def instanceOrHandle(handle):
return hndlMap.get(handle, handle)
def windowFromHandle(handle):
"""returns None if handle = 0, the python Window instance if
handle is known, or a new pseudo window if handle != 0 and not known"""
if not handle:
return None
window = hndlMap.get(handle, None)
if not window:
window = Window(hWnd = handle)
return window
class WindowsObject(object):
m_handle = 0
def __init__(self, handle, managed = True):
"""managed objects are stored in a global map so that they can
be looked up by their handle, also this allows for calling the
appropriate destructor function (__dispose__) whenever the object
becomes garbage"""
self.m_handle = handle
if managed:
hndlMap[handle] = self
handle = property(lambda self: self.m_handle)
def __str__(self):
return '<%s handle: %d>' % (self.__class__.__name__, self.handle)
def __equals__(self, other):
return self.handle == other.handle
class Event(object):
def __init__(self, hWnd, nMsg, wParam, lParam):
self.hWnd = hWnd
self.nMsg = nMsg
self.lParam = lParam
self.wParam = wParam
self.handled = 0
def structure(self, nmStructure):
return nmStructure.from_address(int(self.lParam))
def __str__(self):
return "<event hWnd: %d, nMsg: %d, lParam: %d, wParam: %d>" % (self.hWnd, self.nMsg, self.lParam, self.wParam)
class MSG_MAP(list):
def __init__(self, entries):
list.__init__(self, entries)
self._msg_map_ = {}
for entry in self:
self.append(entry)
def append(self, entry):
entry.__install__(self)
def Handle(self, receiver, hWnd, nMsg, wParam, lParam, clazz):
handler = self._msg_map_.get(nMsg, None)
if handler:
event = Event(hWnd, nMsg, wParam, lParam)
event.handled = True #the presence of a handler means that by default we assume the event to be handled
#if the handler wants to force further processing by parent class map
#the handler will set event.handled to False
result = handler(receiver, event)
if event.handled:
if result == None:
return (True, NULL)
else:
return (True, int(result))
return (False, NULL)
def HandleBaseClasses(self, receiver, hWnd, nMsg, wParam, lParam, clazz):
for baseClass in clazz.__bases__:
if issubclass(baseClass, Window):
handled, result = baseClass._msg_map_.Dispatch(receiver, hWnd, nMsg, wParam, lParam, baseClass)
if handled:
return (True, result)
return (False, NULL)
def Dispatch(self, receiver, hWnd, nMsg, wParam, lParam, clazz = None):
clazz = clazz or receiver.__class__
handled, result = self.Handle(receiver, hWnd, nMsg, wParam, lParam, clazz)
if handled:
return (True, result)
handled, result = self.HandleBaseClasses(receiver, hWnd, nMsg, wParam, lParam, clazz)
if handled:
return (True, result)
#nobody handled msg
return (False, NULL)
def DispatchMSG(self, receiver, msg):
return self.Dispatch(receiver, msg.hWnd, msg.message, msg.wParam, msg.lParam)
def __str__(self):
return str(self._msg_map_)
class WindowType(type):
def __init__(cls, name, bases, dct):
#make sure every window class has its own msg map
#~ if not dct.has_key('_msg_map_'):
if not '_msg_map_' in dct:
cls._msg_map_ = MSG_MAP([])
super(WindowType, cls).__init__(name, bases, dct)
#see if decorators were used to map events to handlers,
#and install the handlers in the msgmap
for item in list(dct.values()):
if hasattr(item, 'handler'):
cls._msg_map_.append(item.handler)
hInstance = GetModuleHandle(None)
wndClasses = []
RCDEFAULT = RECT(top = CW_USEDEFAULT, left = CW_USEDEFAULT, right = 0, bottom = 0)
class Window(WindowsObject, metaclass=WindowType):
_window_class_ = ''
_window_title_ = ''
_window_style_ = WS_OVERLAPPEDWINDOW | WS_VISIBLE
_window_style_ex_ = 0
_window_icon_ = LoadIcon(NULL, MAKEINTRESOURCE(IDI_APPLICATION))
_window_icon_sm_ = LoadIcon(NULL, MAKEINTRESOURCE(IDI_APPLICATION))
_window_background_ = 0
_window_class_style_ = 0
_window_style_clip_children_and_siblings_ = True
_window_dbg_msg_ = False
_window_width_ = CW_USEDEFAULT
_window_height_ = CW_USEDEFAULT
__dispose__ = DestroyWindow
def __init__(self, title = '', style = None, exStyle = None, parent = None, menu = None, rcPos = RCDEFAULT, orStyle = None, orExStyle = None, nandStyle = None, nandExStyle = None, width = CW_USEDEFAULT, height = CW_USEDEFAULT, hWnd = None):
if hWnd: #wrapping instead of creating
self.m_handle = hWnd #note client is responsible for deleting
return
windowClassExists = False
cls = WNDCLASSEX()
if self._window_class_:
if GetClassInfo(hInstance, self._window_class_, byref(cls)):
windowClassExists = True
#determine whether we are going to subclass an existing window class
#or create a new windowclass
self._issubclassed_ = self._window_class_ and windowClassExists
atom = 0
if not self._issubclassed_:
#if no _window_class_ is given, generate a new one
className = self._window_class_ or "venster_wtl_%d" % id(self.__class__)
cls = WNDCLASSEX()
cls.cbSize = sizeof(cls)
cls.lpszClassName = className
cls.hInstance = hInstance
cls.lpfnWndProc = cGlobalWndProc
cls.style = self._window_class_style_
cls.hbrBackground = self._window_background_
cls.hIcon = handle(self._window_icon_)
cls.hIconSm = handle(self._window_icon_sm_)
cls.hCursor = LoadCursor(NULL, MAKEINTRESOURCE(IDC_ARROW))
#cls structure needs to stay on heap
wndClasses.append(cls)
atom = RegisterClassEx(pointer(cls))
#~ print('atom %d' % atom)
else:
#subclass existing window class.
className = self._window_class_
title = title or self._window_title_
if style is None:
style = self._window_style_
if exStyle is None:
exStyle = self._window_style_ex_
if orStyle:
style |= orStyle
if orExStyle:
exStyle |= orExStyle
if self._window_style_clip_children_and_siblings_:
style |= WS_CLIPCHILDREN
style |= WS_CLIPSIBLINGS
if nandStyle:
style &= ~nandStyle
left, right = rcPos.left, rcPos.right
top, bottom = rcPos.top, rcPos.bottom
if width == CW_USEDEFAULT:
width = self._window_width_
if left == CW_USEDEFAULT and width != CW_USEDEFAULT:
right = CW_USEDEFAULT + width
if height == CW_USEDEFAULT:
height = self._window_height_
if top == CW_USEDEFAULT and height != CW_USEDEFAULT:
bottom = CW_USEDEFAULT + height
#for normal windows created trough venster, the mapping between window handle
#and window instance will be established by processing the WM_NCCREATE msg
#and looking up the instance in the createhndlMap
createHndlMap[id(self)] = self
wm_create_param = id(self)
if className == 'msctls_trackbar32':
wm_create_param = 0
hWnd = 0
if atom:
hWnd = CreateWindowEx_atom(exStyle, atom, title, style, left, top, right - left, bottom - top, handle(parent), handle(menu), hInstance, wm_create_param)
else:
hWnd = CreateWindowEx(exStyle, className, title, style, left, top, right - left, bottom - top, handle(parent), handle(menu), hInstance, wm_create_param)
del createHndlMap[id(self)]
if self._issubclassed_:
#for subclassed windows, we establish the instance <-> handle mapping here
WindowsObject.__init__(self, hWnd)
self._old_wnd_proc_ = self.SubClass(cGlobalWndProc)
def SubClass(self, newWndProc):
return SetWindowLong(self.handle, GWL_WNDPROC, newWndProc)
class Interceptor(object):
def __init__(self, receiver, window, msg_map, nMsg = [WM_NOTIFY]):
self.nMsg = dict([(x, 1) for x in nMsg])
self.newProc = WNDPROC(self.WndProc)
if window:
self.oldProc = window.SubClass(self.newProc)
self._msg_map_ = msg_map
self.receiver = receiver
def dispose(self):
self.WndProc = lambda self, hWnd, nMsg, wParam, lParam: 0
del self.receiver
del self._msg_map_
del self.newProc
def WndProc(self, hWnd, nMsg, wParam, lParam):
if nMsg in self.nMsg and hasattr(self, 'receiver'):
handled, res = self._msg_map_.Dispatch(self.receiver, hWnd, nMsg, wParam, lParam)
else:
handled = 0
if not handled:
return CallWindowProc(self.oldProc, hWnd, nMsg, wParam, lParam)
else:
return res
def Intercept(self, receiver, msgMap, nMsg = [WM_NOTIFY]):
return Window.Interceptor(self, receiver, msgMap, nMsg = nMsg)
def InterceptParent(self, nMsg = [WM_NOTIFY]):
"""intercepts msg proc in order to reroute msgs to self"""
self._interceptParent = self.Intercept(self.GetParent(), self._msg_map_, nMsg = nMsg)
def dispose(self):
if hasattr(self, '_interceptParent'):
self._interceptParent.dispose()
del self._interceptParent
def WndProc(self, hWnd, nMsg, wParam, lParam):
if self._window_dbg_msg_:
print(('%s, %d, %d, %d, %d' % (repr(self), hWnd, nMsg, wParam, lParam)))
return self._msg_map_.Dispatch(self, hWnd, nMsg, wParam, lParam)
def IsDialogMessage(self, lpmsg):
return IsDialogMessage(self.handle, lpmsg)
def PreTranslateMessage(self, msg):
return 0
def TranslateAccelerator(self, msg):
return 0
def __repr__(self):
return '<Window hWnd: %d>' % self.handle
#this is the base class for all handlers defined in msg maps
class HANDLER(object):
#the handler is given in the msg map as a unbound (static) method
#on some class X, to enable a derived class to override a handler method
#of a parent class Y, a lambda trick is needed to pick the correct handler
#(that of the base class)
def __init__(self, handler):
#TODO how to determine if handler is a lambda or a named function without
#looking at '__name__'?:
if not handler:
self.m_handler = None
elif handler.__name__ == '<lambda>':
self.m_handler = handler
else:#trick to make handler 'virtual' again
self.m_handler = lambda self, event: getattr(self, handler.__name__)(event)
def __call__(self, receiver, event):
return self.handler(receiver, event)
#handler = property(lambda self: self.m_handler)
def getHandler(self):
return self.m_handler
def setHandler(self, value):
self.m_handler = value
handler = property(getHandler, setHandler)
#Handler for normal window messages (e.g. WM_SIZE, WM_CLOSE, WM_PAINT etc)
class MSG_HANDLER(HANDLER):
def __init__(self, msg, handler):
HANDLER.__init__(self, handler)
self.msg = msg
def __install__(self, msgMap):
msgMap._msg_map_[self.msg] = self
class NTF_MAP(dict):
def __call__(self, receiver, event):
nmhdr = NMHDR.from_address(int(event.lParam))
handler = self.get(str(nmhdr.code), None)
if handler:
event.nmhdr = nmhdr
return handler(receiver, event)
else:
event.handled = 0
return 0
#handler for notification messages
#handles all notifications with the given code
class NTF_HANDLER(HANDLER):
def __init__(self, code, handler):
HANDLER.__init__(self, handler)
self.code = code
def __install__(self, msgMap):
notifMap = msgMap._msg_map_.setdefault(WM_NOTIFY, NTF_MAP())
notifMap[str(self.code)] = self
#support for WM_COMMAND msgs
#cmd is a map from id -> [(code, handler), ...]
#first handler in the list that matches the code is fired
#if code == -1, than handler is fired for any code
class CMD_MAP(dict):
def __call__(self, receiver, event):
code = HIWORD(event.wParam)
id = LOWORD(event.wParam)
for handlerCode, handler in self.get(id, []):
if handlerCode == -1 or handlerCode == code:
event.id = id
event.code = code
return handler(receiver, event)
#not handled
event.handled = 0
return 0
#maps command message based on control id AND notification code
class CMD_HANDLER(HANDLER):
def __init__(self, id, code, handler):
HANDLER.__init__(self, handler)
self.id, self.code = id, code
def __install__(self, msgMap):
cmdMap = msgMap._msg_map_.setdefault(WM_COMMAND, CMD_MAP())
notifList = cmdMap.setdefault(self.id, [])
notifList.append((self.code, self))
#maps command message based on control id
class CMD_ID_HANDLER(HANDLER):
def __init__(self, id, handler):
HANDLER.__init__(self, handler)
self.id = id
def __install__(self, msgMap):
cmdMap = msgMap._msg_map_.setdefault(WM_COMMAND, CMD_MAP())
notifList = cmdMap.setdefault(self.id, [])
notifList.append((-1, self))
#deprecated, will be removed before 1.0
class CHAIN_MSG_MAP (object):
def __init__(self, msgMap): pass
def __install__(self, msgMap): pass
#decorator versions of the above
def msg_handler(msg):
def decorator_func(func):
func.handler = MSG_HANDLER(msg, func)
return func
return decorator_func
def cmd_handler(id, code = None):
def decorator_func(func):
if code:
func.handler = CMD_HANDLER(id, code, func)
else:
func.handler = CMD_ID_HANDLER(id, func)
return func
return decorator_func
def ntf_handler(code):
def decorator_func(func):
func.handler = NTF_HANDLER(code, func)
return func
return decorator_func
#TODO allow the addition of more specific filters
#TODO make filters weak so that remove filter is not needed
class MessageLoop:
def __init__(self):
self.m_filters = {}
def AddFilter(self, filterFunc):
self.m_filters[filterFunc] = 1
def RemoveFilter(self, filterFunc):
del self.m_filters[filterFunc]
def Run(self):
msg = MSG()
lpmsg = byref(msg)
while GetMessage(lpmsg, 0, 0, 0):
if not self.PreTranslateMessage(msg):
TranslateMessage(lpmsg)
DispatchMessage(lpmsg)
global quit
quit = True
def PreTranslateMessage(self, msg):
for filter in list(self.m_filters.keys()):
if list(filter(msg)):
return 1
return 0
theMessageLoop = MessageLoop()
def GetMessageLoop():
return theMessageLoop
def Run():
theMessageLoop.Run()
class Application(object):
def Run(self):
return Run()
def Quit(self, nExitCode = 0):
"""quits the application by posting the WM_QUIT message with the given
exitCode"""
PostQuitMessage(nExitCode)
def Exit(self, nExitCode = 0):
self.Quit(nExitCode)
| en | 0.793058 | ## Copyright (c) 2003 <NAME> ## Permission is hereby granted, free of charge, to any person obtaining ## a copy of this software and associated documentation files (the ## "Software"), to deal in the Software without restriction, including ## without limitation the rights to use, copy, modify, merge, publish, ## distribute, sublicense, and/or sell copies of the Software, and to ## permit persons to whom the Software is furnished to do so, subject to ## the following conditions: ## The above copyright notice and this permission notice shall be ## included in all copies or substantial portions of the Software. ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE ## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE a special weakreference map for mapping window handles to python instances when a python instance becomes garbage, the __dispose__ method of HandleMap is called, deleting the handle from the map and freeing OS resources by calling the method stored in the __dispose__ variable of the garbage python instance. This latter method should be bound to a windows-free-routine corresponding to the type of the handle # watch the lambda closure, freezing the binding of: # - fndisp to the __dispose__ variable of the value object # - handle to the provided windows-handle in the first actual parameter #lmdisp = self.freeze_method(value.__dispose__, str(value.__class__)) # weak refs are 'called' to return the referred object #~ if self.has_key(k): # callback of weakref wr, called when wr() is garbage #contains the mapping from python instances (of Window) to windows HANDLES #used while handling messages during CreateWindow(Ex) The purpose of globalWndProc is route messages coming in on the global queue to the appropriate python Window instance for handling. Also it establishes the mapping from python instances to window HANDLES by processing the WM_NCCREATE message #a venster window is being creaated trough CreateWindowEx, #establish the mapping between the windows HANDLE and the Window instance #the window instance is to be found in the createHndlMap by looking up #the key that was given as a parameter to CreateWindowEx #~ print createHndlMap #~ window = createHndlMap.get(int(createStruct.lpCreateParams), None) #~ window = createHndlMap.get(cast(lParam, POINTER(CREATESTRUCT)).lpCreateParams, None) #it is a venster window being created, establish the mapping: #this is a known venster window, let the window process its own msgs #its a subclassed window, try old window proc #always handled, either by old window proc, or old window proc called default handling #still not handled, perform default processing #windows default processing #this happens when the python runtime is already exitting, but we are still registered #as a window proc and windows keeps calling the callback returns None if handle = 0, the python Window instance if handle is known, or a new pseudo window if handle != 0 and not known managed objects are stored in a global map so that they can be looked up by their handle, also this allows for calling the appropriate destructor function (__dispose__) whenever the object becomes garbage #the presence of a handler means that by default we assume the event to be handled #if the handler wants to force further processing by parent class map #the handler will set event.handled to False #nobody handled msg #make sure every window class has its own msg map #~ if not dct.has_key('_msg_map_'): #see if decorators were used to map events to handlers, #and install the handlers in the msgmap #wrapping instead of creating #note client is responsible for deleting #determine whether we are going to subclass an existing window class #or create a new windowclass #if no _window_class_ is given, generate a new one #cls structure needs to stay on heap #~ print('atom %d' % atom) #subclass existing window class. #for normal windows created trough venster, the mapping between window handle #and window instance will be established by processing the WM_NCCREATE msg #and looking up the instance in the createhndlMap #for subclassed windows, we establish the instance <-> handle mapping here intercepts msg proc in order to reroute msgs to self #this is the base class for all handlers defined in msg maps #the handler is given in the msg map as a unbound (static) method #on some class X, to enable a derived class to override a handler method #of a parent class Y, a lambda trick is needed to pick the correct handler #(that of the base class) #TODO how to determine if handler is a lambda or a named function without #looking at '__name__'?: #trick to make handler 'virtual' again #handler = property(lambda self: self.m_handler) #Handler for normal window messages (e.g. WM_SIZE, WM_CLOSE, WM_PAINT etc) #handler for notification messages #handles all notifications with the given code #support for WM_COMMAND msgs #cmd is a map from id -> [(code, handler), ...] #first handler in the list that matches the code is fired #if code == -1, than handler is fired for any code #not handled #maps command message based on control id AND notification code #maps command message based on control id #deprecated, will be removed before 1.0 #decorator versions of the above #TODO allow the addition of more specific filters #TODO make filters weak so that remove filter is not needed quits the application by posting the WM_QUIT message with the given exitCode | 1.855145 | 2 |
src/common/package/config/application.py | ammar-khan/raspberry-pi-opencv-dnn-face-detection | 3 | 6631695 | <reponame>ammar-khan/raspberry-pi-opencv-dnn-face-detection
##
# Copyright 2018, <NAME>
# Licensed under MIT.
# Since: v1.0.0
##
# Application configuration
# HTTP Port for web streaming
HTTP_PORT = 8000
# HTTP page template path
HTML_TEMPLATE_PATH = '/home/pi/code/raspberry-pi-opencv-dnn-face-detection/src/common/package/http/template/'
# Capturing device index (used for web camera)
CAPTURING_DEVICE = 0
# To user Pi Camera
USE_PI_CAMERA = True
# Capture configuration
WIDTH = 640
HEIGHT = 480
RESOLUTION = [WIDTH, HEIGHT]
FRAME_RATE = 24
| ##
# Copyright 2018, <NAME>
# Licensed under MIT.
# Since: v1.0.0
##
# Application configuration
# HTTP Port for web streaming
HTTP_PORT = 8000
# HTTP page template path
HTML_TEMPLATE_PATH = '/home/pi/code/raspberry-pi-opencv-dnn-face-detection/src/common/package/http/template/'
# Capturing device index (used for web camera)
CAPTURING_DEVICE = 0
# To user Pi Camera
USE_PI_CAMERA = True
# Capture configuration
WIDTH = 640
HEIGHT = 480
RESOLUTION = [WIDTH, HEIGHT]
FRAME_RATE = 24 | en | 0.544404 | ## # Copyright 2018, <NAME> # Licensed under MIT. # Since: v1.0.0 ## # Application configuration # HTTP Port for web streaming # HTTP page template path # Capturing device index (used for web camera) # To user Pi Camera # Capture configuration | 1.300025 | 1 |
adv/yurius.py | Caledor/dl | 0 | 6631696 | <gh_stars>0
from core.advbase import *
class Yurius(Adv):
def prerun(self):
self.dragondrive = self.dragonform.set_dragondrive(
ModeManager(
group="ddrive",
buffs=[
Selfbuff("dragondrive_sd", 0.35, -1, "s", "passive"),
Selfbuff("dragondrive_sp", 0.30, -1, "sp", "buff"),
],
s1=True,
s2=True,
),
drain=75,
)
variants = {None: Yurius}
| from core.advbase import *
class Yurius(Adv):
def prerun(self):
self.dragondrive = self.dragonform.set_dragondrive(
ModeManager(
group="ddrive",
buffs=[
Selfbuff("dragondrive_sd", 0.35, -1, "s", "passive"),
Selfbuff("dragondrive_sp", 0.30, -1, "sp", "buff"),
],
s1=True,
s2=True,
),
drain=75,
)
variants = {None: Yurius} | none | 1 | 2.124305 | 2 |
|
utils/utility.py | itsPeetah/image-2-text-converter | 0 | 6631697 | def prompt_conversion_type_selection():
choice = int(input("What kind of convertion do you wnat to use? ( 1: ascii-art, 2: braille)\n>>> "))
while choice < 1 or choice > 2:
print("Invalid answer, please, try again.")
choice = int(input("What kind of convertion do you wnat to use? ( 1: ascii-art, 2: braille)\n>>> "))
return choice
def grayscale_value_to_palette_index(grayscale, palette_length):
'''Scale the pixel gray rgb value to fit into the palette and pick the correspondig color'''
rgb_position = grayscale / 255
palette_index = int((palette_length - 1) * rgb_position)
return palette_index
def image_resize_prompt(original_size):
'''Prompt the user to resize the image and calculate new size'''
print("Current image size is: ", original_size)
width, height = original_size
do_resize = False
if input("Do you want to scale down the image? [Y/N]") in ["Y","y"]:
new_scale_factor = int(input("Downsizing factor: 1/"))
width = int(width/new_scale_factor)
height = int(height/new_scale_factor)
do_resize = True
return do_resize, width, height
def get_braille_character(chunk_Value):
unicode_value = int(0x2800) + chunk_Value
return chr(unicode_value)
| def prompt_conversion_type_selection():
choice = int(input("What kind of convertion do you wnat to use? ( 1: ascii-art, 2: braille)\n>>> "))
while choice < 1 or choice > 2:
print("Invalid answer, please, try again.")
choice = int(input("What kind of convertion do you wnat to use? ( 1: ascii-art, 2: braille)\n>>> "))
return choice
def grayscale_value_to_palette_index(grayscale, palette_length):
'''Scale the pixel gray rgb value to fit into the palette and pick the correspondig color'''
rgb_position = grayscale / 255
palette_index = int((palette_length - 1) * rgb_position)
return palette_index
def image_resize_prompt(original_size):
'''Prompt the user to resize the image and calculate new size'''
print("Current image size is: ", original_size)
width, height = original_size
do_resize = False
if input("Do you want to scale down the image? [Y/N]") in ["Y","y"]:
new_scale_factor = int(input("Downsizing factor: 1/"))
width = int(width/new_scale_factor)
height = int(height/new_scale_factor)
do_resize = True
return do_resize, width, height
def get_braille_character(chunk_Value):
unicode_value = int(0x2800) + chunk_Value
return chr(unicode_value)
| en | 0.746873 | Scale the pixel gray rgb value to fit into the palette and pick the correspondig color Prompt the user to resize the image and calculate new size | 3.965509 | 4 |
score.py | vkesanam/CustomVision | 10 | 6631698 | # Run the following score.py from the notebook to generate the web serivce schema JSON file
# Learn more about creating score file from here: https://docs.microsoft.com/en-us/azure/machine-learning/preview/model-management-service-deploy
def init():
from sklearn.externals import joblib
global model
model = joblib.load('output/trainedModel.pkl')
def run(input_df):
import json
pred = model.predict(input_df)
return json.dumps(str(pred[0]))
def main():
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema
import pandas
df = pandas.DataFrame(data=[[380, 120, 76]], columns=['indicator1', 'NF1', 'cellprofiling'])
# Check the output of the function
init()
input1 = pandas.DataFrame([[380, 120, 76]])
print("Result: " + run(input1))
inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}
# Generate the service_schema.json
generate_schema(run_func=run, inputs=inputs, filepath='output/service_schema.json')
print("Schema generated")
if __name__ == "__main__":
main()
| # Run the following score.py from the notebook to generate the web serivce schema JSON file
# Learn more about creating score file from here: https://docs.microsoft.com/en-us/azure/machine-learning/preview/model-management-service-deploy
def init():
from sklearn.externals import joblib
global model
model = joblib.load('output/trainedModel.pkl')
def run(input_df):
import json
pred = model.predict(input_df)
return json.dumps(str(pred[0]))
def main():
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema
import pandas
df = pandas.DataFrame(data=[[380, 120, 76]], columns=['indicator1', 'NF1', 'cellprofiling'])
# Check the output of the function
init()
input1 = pandas.DataFrame([[380, 120, 76]])
print("Result: " + run(input1))
inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}
# Generate the service_schema.json
generate_schema(run_func=run, inputs=inputs, filepath='output/service_schema.json')
print("Schema generated")
if __name__ == "__main__":
main()
| en | 0.655796 | # Run the following score.py from the notebook to generate the web serivce schema JSON file # Learn more about creating score file from here: https://docs.microsoft.com/en-us/azure/machine-learning/preview/model-management-service-deploy # Check the output of the function # Generate the service_schema.json | 2.932896 | 3 |
myvenv/lib/python3.7/site-packages/paste/auth/auth_tkt.py | kevintu2/spark-se-assessment | 1 | 6631699 | <gh_stars>1-10
# (c) 2005 <NAME> and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
##########################################################################
#
# Copyright (c) 2005 Imaginary Landscape LLC and Contributors.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##########################################################################
"""
Implementation of cookie signing as done in `mod_auth_tkt
<http://www.openfusion.com.au/labs/mod_auth_tkt/>`_.
mod_auth_tkt is an Apache module that looks for these signed cookies
and sets ``REMOTE_USER``, ``REMOTE_USER_TOKENS`` (a comma-separated
list of groups) and ``REMOTE_USER_DATA`` (arbitrary string data).
This module is an alternative to the ``paste.auth.cookie`` module;
it's primary benefit is compatibility with mod_auth_tkt, which in turn
makes it possible to use the same authentication process with
non-Python code run under Apache.
"""
import six
import time as time_mod
try:
import hashlib
except ImportError:
# mimic hashlib (will work for md5, fail for secure hashes)
import md5 as hashlib
try:
from http.cookies import SimpleCookie
except ImportError:
# Python 2
from Cookie import SimpleCookie
from paste import request
try:
from urllib import quote as url_quote # Python 2.X
from urllib import unquote as url_unquote
except ImportError:
from urllib.parse import quote as url_quote # Python 3+
from urllib.parse import unquote as url_unquote
DEFAULT_DIGEST = hashlib.md5
class AuthTicket(object):
"""
This class represents an authentication token. You must pass in
the shared secret, the userid, and the IP address. Optionally you
can include tokens (a list of strings, representing role names),
'user_data', which is arbitrary data available for your own use in
later scripts. Lastly, you can override the timestamp, cookie name,
whether to secure the cookie and the digest algorithm (for details
look at ``AuthTKTMiddleware``).
Once you provide all the arguments, use .cookie_value() to
generate the appropriate authentication ticket. .cookie()
generates a Cookie object, the str() of which is the complete
cookie header to be sent.
CGI usage::
token = auth_tkt.AuthTick('sharedsecret', 'username',
os.environ['REMOTE_ADDR'], tokens=['admin'])
print('Status: 200 OK')
print('Content-type: text/html')
print(token.cookie())
print("")
... redirect HTML ...
Webware usage::
token = auth_tkt.AuthTick('sharedsecret', 'username',
self.request().environ()['REMOTE_ADDR'], tokens=['admin'])
self.response().setCookie('auth_tkt', token.cookie_value())
Be careful not to do an HTTP redirect after login; use meta
refresh or Javascript -- some browsers have bugs where cookies
aren't saved when set on a redirect.
"""
def __init__(self, secret, userid, ip, tokens=(), user_data='',
time=None, cookie_name='auth_tkt',
secure=False, digest_algo=DEFAULT_DIGEST):
self.secret = secret
self.userid = userid
self.ip = ip
if not isinstance(tokens, six.string_types):
tokens = ','.join(tokens)
self.tokens = tokens
self.user_data = user_data
if time is None:
self.time = time_mod.time()
else:
self.time = time
self.cookie_name = cookie_name
self.secure = secure
if isinstance(digest_algo, six.binary_type):
# correct specification of digest from hashlib or fail
self.digest_algo = getattr(hashlib, digest_algo)
else:
self.digest_algo = digest_algo
def digest(self):
return calculate_digest(
self.ip, self.time, self.secret, self.userid, self.tokens,
self.user_data, self.digest_algo)
def cookie_value(self):
v = b'%s%08x%s!' % (self.digest(), int(self.time), maybe_encode(url_quote(self.userid)))
if self.tokens:
v += maybe_encode(self.tokens) + b'!'
v += maybe_encode(self.user_data)
return v
def cookie(self):
c = SimpleCookie()
if six.PY3:
import base64
cookie_value = base64.b64encode(self.cookie_value())
else:
cookie_value = self.cookie_value().encode('base64').strip().replace('\n', '')
c[self.cookie_name] = cookie_value
c[self.cookie_name]['path'] = '/'
if self.secure:
c[self.cookie_name]['secure'] = 'true'
return c
class BadTicket(Exception):
"""
Exception raised when a ticket can't be parsed. If we get
far enough to determine what the expected digest should have
been, expected is set. This should not be shown by default,
but can be useful for debugging.
"""
def __init__(self, msg, expected=None):
self.expected = expected
Exception.__init__(self, msg)
def parse_ticket(secret, ticket, ip, digest_algo=DEFAULT_DIGEST):
"""
Parse the ticket, returning (timestamp, userid, tokens, user_data).
If the ticket cannot be parsed, ``BadTicket`` will be raised with
an explanation.
"""
if isinstance(digest_algo, six.binary_type):
# correct specification of digest from hashlib or fail
digest_algo = getattr(hashlib, digest_algo)
digest_hexa_size = digest_algo().digest_size * 2
ticket = ticket.strip(b'"')
digest = ticket[:digest_hexa_size]
try:
timestamp = int(ticket[digest_hexa_size:digest_hexa_size + 8], 16)
except ValueError as e:
raise BadTicket('Timestamp is not a hex integer: %s' % e)
try:
userid, data = ticket[digest_hexa_size + 8:].split(b'!', 1)
except ValueError:
raise BadTicket('userid is not followed by !')
userid = url_unquote(userid.decode())
if b'!' in data:
tokens, user_data = data.split(b'!', 1)
else:
# @@: Is this the right order?
tokens = b''
user_data = data
expected = calculate_digest(ip, timestamp, secret,
userid, tokens, user_data,
digest_algo)
if expected != digest:
raise BadTicket('Digest signature is not correct',
expected=(expected, digest))
tokens = tokens.split(b',')
return (timestamp, userid, tokens, user_data)
# @@: Digest object constructor compatible with named ones in hashlib only
def calculate_digest(ip, timestamp, secret, userid, tokens, user_data,
digest_algo):
secret = maybe_encode(secret)
userid = maybe_encode(userid)
tokens = maybe_encode(tokens)
user_data = maybe_encode(user_data)
digest0 = maybe_encode(digest_algo(
encode_ip_timestamp(ip, timestamp) + secret + userid + b'\0'
+ tokens + b'\0' + user_data).hexdigest())
digest = digest_algo(digest0 + secret).hexdigest()
return maybe_encode(digest)
def encode_ip_timestamp(ip, timestamp):
ip_chars = b''.join(map(six.int2byte, map(int, ip.split('.'))))
t = int(timestamp)
ts = ((t & 0xff000000) >> 24,
(t & 0xff0000) >> 16,
(t & 0xff00) >> 8,
t & 0xff)
ts_chars = b''.join(map(six.int2byte, ts))
return (ip_chars + ts_chars)
def maybe_encode(s, encoding='utf8'):
if isinstance(s, six.text_type):
s = s.encode(encoding)
return s
class AuthTKTMiddleware(object):
"""
Middleware that checks for signed cookies that match what
`mod_auth_tkt <http://www.openfusion.com.au/labs/mod_auth_tkt/>`_
looks for (if you have mod_auth_tkt installed, you don't need this
middleware, since Apache will set the environmental variables for
you).
Arguments:
``secret``:
A secret that should be shared by any instances of this application.
If this app is served from more than one machine, they should all
have the same secret.
``cookie_name``:
The name of the cookie to read and write from. Default ``auth_tkt``.
``secure``:
If the cookie should be set as 'secure' (only sent over SSL) and if
the login must be over SSL. (Defaults to False)
``httponly``:
If the cookie should be marked as HttpOnly, which means that it's
not accessible to JavaScript. (Defaults to False)
``include_ip``:
If the cookie should include the user's IP address. If so, then
if they change IPs their cookie will be invalid.
``logout_path``:
The path under this middleware that should signify a logout. The
page will be shown as usual, but the user will also be logged out
when they visit this page.
``digest_algo``:
Digest algorithm specified as a name of the algorithm provided by
``hashlib`` or as a compatible digest object constructor.
Defaults to ``md5``, as in mod_auth_tkt. The others currently
compatible with mod_auth_tkt are ``sha256`` and ``sha512``.
If used with mod_auth_tkt, then these settings (except logout_path) should
match the analogous Apache configuration settings.
This also adds two functions to the request:
``environ['paste.auth_tkt.set_user'](userid, tokens='', user_data='')``
This sets a cookie that logs the user in. ``tokens`` is a
string (comma-separated groups) or a list of strings.
``user_data`` is a string for your own use.
``environ['paste.auth_tkt.logout_user']()``
Logs out the user.
"""
def __init__(self, app, secret, cookie_name='auth_tkt', secure=False,
include_ip=True, logout_path=None, httponly=False,
no_domain_cookie=True, current_domain_cookie=True,
wildcard_cookie=True, digest_algo=DEFAULT_DIGEST):
self.app = app
self.secret = secret
self.cookie_name = cookie_name
self.secure = secure
self.httponly = httponly
self.include_ip = include_ip
self.logout_path = logout_path
self.no_domain_cookie = no_domain_cookie
self.current_domain_cookie = current_domain_cookie
self.wildcard_cookie = wildcard_cookie
if isinstance(digest_algo, str):
# correct specification of digest from hashlib or fail
self.digest_algo = getattr(hashlib, digest_algo)
else:
self.digest_algo = digest_algo
def __call__(self, environ, start_response):
cookies = request.get_cookies(environ)
if self.cookie_name in cookies:
cookie_value = cookies[self.cookie_name].value
else:
cookie_value = ''
if cookie_value:
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
# mod_auth_tkt uses this dummy value when IP is not
# checked:
remote_addr = '0.0.0.0'
# @@: This should handle bad signatures better:
# Also, timeouts should cause cookie refresh
try:
timestamp, userid, tokens, user_data = parse_ticket(
self.secret, cookie_value, remote_addr, self.digest_algo)
tokens = ','.join(tokens)
environ['REMOTE_USER'] = userid
if environ.get('REMOTE_USER_TOKENS'):
# We want to add tokens/roles to what's there:
tokens = environ['REMOTE_USER_TOKENS'] + ',' + tokens
environ['REMOTE_USER_TOKENS'] = tokens
environ['REMOTE_USER_DATA'] = user_data
environ['AUTH_TYPE'] = 'cookie'
except BadTicket:
# bad credentials, just ignore without logging the user
# in or anything
pass
set_cookies = []
def set_user(userid, tokens='', user_data=''):
set_cookies.extend(self.set_user_cookie(
environ, userid, tokens, user_data))
def logout_user():
set_cookies.extend(self.logout_user_cookie(environ))
environ['paste.auth_tkt.set_user'] = set_user
environ['paste.auth_tkt.logout_user'] = logout_user
if self.logout_path and environ.get('PATH_INFO') == self.logout_path:
logout_user()
def cookie_setting_start_response(status, headers, exc_info=None):
headers.extend(set_cookies)
return start_response(status, headers, exc_info)
return self.app(environ, cookie_setting_start_response)
def set_user_cookie(self, environ, userid, tokens, user_data):
if not isinstance(tokens, six.string_types):
tokens = ','.join(tokens)
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
remote_addr = '0.0.0.0'
ticket = AuthTicket(
self.secret,
userid,
remote_addr,
tokens=tokens,
user_data=user_data,
cookie_name=self.cookie_name,
secure=self.secure)
# @@: Should we set REMOTE_USER etc in the current
# environment right now as well?
cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME'))
wild_domain = '.' + cur_domain
cookie_options = ""
if self.secure:
cookie_options += "; secure"
if self.httponly:
cookie_options += "; HttpOnly"
cookies = []
if self.no_domain_cookie:
cookies.append(('Set-Cookie', '%s=%s; Path=/%s' % (
self.cookie_name, ticket.cookie_value(), cookie_options)))
if self.current_domain_cookie:
cookies.append(('Set-Cookie', '%s=%s; Path=/; Domain=%s%s' % (
self.cookie_name, ticket.cookie_value(), cur_domain,
cookie_options)))
if self.wildcard_cookie:
cookies.append(('Set-Cookie', '%s=%s; Path=/; Domain=%s%s' % (
self.cookie_name, ticket.cookie_value(), wild_domain,
cookie_options)))
return cookies
def logout_user_cookie(self, environ):
cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME'))
wild_domain = '.' + cur_domain
expires = 'Sat, 01-Jan-2000 12:00:00 GMT'
cookies = [
('Set-Cookie', '%s=""; Expires="%s"; Path=/' % (self.cookie_name, expires)),
('Set-Cookie', '%s=""; Expires="%s"; Path=/; Domain=%s' %
(self.cookie_name, expires, cur_domain)),
('Set-Cookie', '%s=""; Expires="%s"; Path=/; Domain=%s' %
(self.cookie_name, expires, wild_domain)),
]
return cookies
def make_auth_tkt_middleware(
app,
global_conf,
secret=None,
cookie_name='auth_tkt',
secure=False,
include_ip=True,
logout_path=None):
"""
Creates the `AuthTKTMiddleware
<class-paste.auth.auth_tkt.AuthTKTMiddleware.html>`_.
``secret`` is required, but can be set globally or locally.
"""
from paste.deploy.converters import asbool
secure = asbool(secure)
include_ip = asbool(include_ip)
if secret is None:
secret = global_conf.get('secret')
if not secret:
raise ValueError(
"You must provide a 'secret' (in global or local configuration)")
return AuthTKTMiddleware(
app, secret, cookie_name, secure, include_ip, logout_path or None)
| # (c) 2005 <NAME> and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
##########################################################################
#
# Copyright (c) 2005 Imaginary Landscape LLC and Contributors.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##########################################################################
"""
Implementation of cookie signing as done in `mod_auth_tkt
<http://www.openfusion.com.au/labs/mod_auth_tkt/>`_.
mod_auth_tkt is an Apache module that looks for these signed cookies
and sets ``REMOTE_USER``, ``REMOTE_USER_TOKENS`` (a comma-separated
list of groups) and ``REMOTE_USER_DATA`` (arbitrary string data).
This module is an alternative to the ``paste.auth.cookie`` module;
it's primary benefit is compatibility with mod_auth_tkt, which in turn
makes it possible to use the same authentication process with
non-Python code run under Apache.
"""
import six
import time as time_mod
try:
import hashlib
except ImportError:
# mimic hashlib (will work for md5, fail for secure hashes)
import md5 as hashlib
try:
from http.cookies import SimpleCookie
except ImportError:
# Python 2
from Cookie import SimpleCookie
from paste import request
try:
from urllib import quote as url_quote # Python 2.X
from urllib import unquote as url_unquote
except ImportError:
from urllib.parse import quote as url_quote # Python 3+
from urllib.parse import unquote as url_unquote
DEFAULT_DIGEST = hashlib.md5
class AuthTicket(object):
"""
This class represents an authentication token. You must pass in
the shared secret, the userid, and the IP address. Optionally you
can include tokens (a list of strings, representing role names),
'user_data', which is arbitrary data available for your own use in
later scripts. Lastly, you can override the timestamp, cookie name,
whether to secure the cookie and the digest algorithm (for details
look at ``AuthTKTMiddleware``).
Once you provide all the arguments, use .cookie_value() to
generate the appropriate authentication ticket. .cookie()
generates a Cookie object, the str() of which is the complete
cookie header to be sent.
CGI usage::
token = auth_tkt.AuthTick('sharedsecret', 'username',
os.environ['REMOTE_ADDR'], tokens=['admin'])
print('Status: 200 OK')
print('Content-type: text/html')
print(token.cookie())
print("")
... redirect HTML ...
Webware usage::
token = auth_tkt.AuthTick('sharedsecret', 'username',
self.request().environ()['REMOTE_ADDR'], tokens=['admin'])
self.response().setCookie('auth_tkt', token.cookie_value())
Be careful not to do an HTTP redirect after login; use meta
refresh or Javascript -- some browsers have bugs where cookies
aren't saved when set on a redirect.
"""
def __init__(self, secret, userid, ip, tokens=(), user_data='',
time=None, cookie_name='auth_tkt',
secure=False, digest_algo=DEFAULT_DIGEST):
self.secret = secret
self.userid = userid
self.ip = ip
if not isinstance(tokens, six.string_types):
tokens = ','.join(tokens)
self.tokens = tokens
self.user_data = user_data
if time is None:
self.time = time_mod.time()
else:
self.time = time
self.cookie_name = cookie_name
self.secure = secure
if isinstance(digest_algo, six.binary_type):
# correct specification of digest from hashlib or fail
self.digest_algo = getattr(hashlib, digest_algo)
else:
self.digest_algo = digest_algo
def digest(self):
return calculate_digest(
self.ip, self.time, self.secret, self.userid, self.tokens,
self.user_data, self.digest_algo)
def cookie_value(self):
v = b'%s%08x%s!' % (self.digest(), int(self.time), maybe_encode(url_quote(self.userid)))
if self.tokens:
v += maybe_encode(self.tokens) + b'!'
v += maybe_encode(self.user_data)
return v
def cookie(self):
c = SimpleCookie()
if six.PY3:
import base64
cookie_value = base64.b64encode(self.cookie_value())
else:
cookie_value = self.cookie_value().encode('base64').strip().replace('\n', '')
c[self.cookie_name] = cookie_value
c[self.cookie_name]['path'] = '/'
if self.secure:
c[self.cookie_name]['secure'] = 'true'
return c
class BadTicket(Exception):
"""
Exception raised when a ticket can't be parsed. If we get
far enough to determine what the expected digest should have
been, expected is set. This should not be shown by default,
but can be useful for debugging.
"""
def __init__(self, msg, expected=None):
self.expected = expected
Exception.__init__(self, msg)
def parse_ticket(secret, ticket, ip, digest_algo=DEFAULT_DIGEST):
"""
Parse the ticket, returning (timestamp, userid, tokens, user_data).
If the ticket cannot be parsed, ``BadTicket`` will be raised with
an explanation.
"""
if isinstance(digest_algo, six.binary_type):
# correct specification of digest from hashlib or fail
digest_algo = getattr(hashlib, digest_algo)
digest_hexa_size = digest_algo().digest_size * 2
ticket = ticket.strip(b'"')
digest = ticket[:digest_hexa_size]
try:
timestamp = int(ticket[digest_hexa_size:digest_hexa_size + 8], 16)
except ValueError as e:
raise BadTicket('Timestamp is not a hex integer: %s' % e)
try:
userid, data = ticket[digest_hexa_size + 8:].split(b'!', 1)
except ValueError:
raise BadTicket('userid is not followed by !')
userid = url_unquote(userid.decode())
if b'!' in data:
tokens, user_data = data.split(b'!', 1)
else:
# @@: Is this the right order?
tokens = b''
user_data = data
expected = calculate_digest(ip, timestamp, secret,
userid, tokens, user_data,
digest_algo)
if expected != digest:
raise BadTicket('Digest signature is not correct',
expected=(expected, digest))
tokens = tokens.split(b',')
return (timestamp, userid, tokens, user_data)
# @@: Digest object constructor compatible with named ones in hashlib only
def calculate_digest(ip, timestamp, secret, userid, tokens, user_data,
digest_algo):
secret = maybe_encode(secret)
userid = maybe_encode(userid)
tokens = maybe_encode(tokens)
user_data = maybe_encode(user_data)
digest0 = maybe_encode(digest_algo(
encode_ip_timestamp(ip, timestamp) + secret + userid + b'\0'
+ tokens + b'\0' + user_data).hexdigest())
digest = digest_algo(digest0 + secret).hexdigest()
return maybe_encode(digest)
def encode_ip_timestamp(ip, timestamp):
ip_chars = b''.join(map(six.int2byte, map(int, ip.split('.'))))
t = int(timestamp)
ts = ((t & 0xff000000) >> 24,
(t & 0xff0000) >> 16,
(t & 0xff00) >> 8,
t & 0xff)
ts_chars = b''.join(map(six.int2byte, ts))
return (ip_chars + ts_chars)
def maybe_encode(s, encoding='utf8'):
if isinstance(s, six.text_type):
s = s.encode(encoding)
return s
class AuthTKTMiddleware(object):
"""
Middleware that checks for signed cookies that match what
`mod_auth_tkt <http://www.openfusion.com.au/labs/mod_auth_tkt/>`_
looks for (if you have mod_auth_tkt installed, you don't need this
middleware, since Apache will set the environmental variables for
you).
Arguments:
``secret``:
A secret that should be shared by any instances of this application.
If this app is served from more than one machine, they should all
have the same secret.
``cookie_name``:
The name of the cookie to read and write from. Default ``auth_tkt``.
``secure``:
If the cookie should be set as 'secure' (only sent over SSL) and if
the login must be over SSL. (Defaults to False)
``httponly``:
If the cookie should be marked as HttpOnly, which means that it's
not accessible to JavaScript. (Defaults to False)
``include_ip``:
If the cookie should include the user's IP address. If so, then
if they change IPs their cookie will be invalid.
``logout_path``:
The path under this middleware that should signify a logout. The
page will be shown as usual, but the user will also be logged out
when they visit this page.
``digest_algo``:
Digest algorithm specified as a name of the algorithm provided by
``hashlib`` or as a compatible digest object constructor.
Defaults to ``md5``, as in mod_auth_tkt. The others currently
compatible with mod_auth_tkt are ``sha256`` and ``sha512``.
If used with mod_auth_tkt, then these settings (except logout_path) should
match the analogous Apache configuration settings.
This also adds two functions to the request:
``environ['paste.auth_tkt.set_user'](userid, tokens='', user_data='')``
This sets a cookie that logs the user in. ``tokens`` is a
string (comma-separated groups) or a list of strings.
``user_data`` is a string for your own use.
``environ['paste.auth_tkt.logout_user']()``
Logs out the user.
"""
def __init__(self, app, secret, cookie_name='auth_tkt', secure=False,
include_ip=True, logout_path=None, httponly=False,
no_domain_cookie=True, current_domain_cookie=True,
wildcard_cookie=True, digest_algo=DEFAULT_DIGEST):
self.app = app
self.secret = secret
self.cookie_name = cookie_name
self.secure = secure
self.httponly = httponly
self.include_ip = include_ip
self.logout_path = logout_path
self.no_domain_cookie = no_domain_cookie
self.current_domain_cookie = current_domain_cookie
self.wildcard_cookie = wildcard_cookie
if isinstance(digest_algo, str):
# correct specification of digest from hashlib or fail
self.digest_algo = getattr(hashlib, digest_algo)
else:
self.digest_algo = digest_algo
def __call__(self, environ, start_response):
cookies = request.get_cookies(environ)
if self.cookie_name in cookies:
cookie_value = cookies[self.cookie_name].value
else:
cookie_value = ''
if cookie_value:
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
# mod_auth_tkt uses this dummy value when IP is not
# checked:
remote_addr = '0.0.0.0'
# @@: This should handle bad signatures better:
# Also, timeouts should cause cookie refresh
try:
timestamp, userid, tokens, user_data = parse_ticket(
self.secret, cookie_value, remote_addr, self.digest_algo)
tokens = ','.join(tokens)
environ['REMOTE_USER'] = userid
if environ.get('REMOTE_USER_TOKENS'):
# We want to add tokens/roles to what's there:
tokens = environ['REMOTE_USER_TOKENS'] + ',' + tokens
environ['REMOTE_USER_TOKENS'] = tokens
environ['REMOTE_USER_DATA'] = user_data
environ['AUTH_TYPE'] = 'cookie'
except BadTicket:
# bad credentials, just ignore without logging the user
# in or anything
pass
set_cookies = []
def set_user(userid, tokens='', user_data=''):
set_cookies.extend(self.set_user_cookie(
environ, userid, tokens, user_data))
def logout_user():
set_cookies.extend(self.logout_user_cookie(environ))
environ['paste.auth_tkt.set_user'] = set_user
environ['paste.auth_tkt.logout_user'] = logout_user
if self.logout_path and environ.get('PATH_INFO') == self.logout_path:
logout_user()
def cookie_setting_start_response(status, headers, exc_info=None):
headers.extend(set_cookies)
return start_response(status, headers, exc_info)
return self.app(environ, cookie_setting_start_response)
def set_user_cookie(self, environ, userid, tokens, user_data):
if not isinstance(tokens, six.string_types):
tokens = ','.join(tokens)
if self.include_ip:
remote_addr = environ['REMOTE_ADDR']
else:
remote_addr = '0.0.0.0'
ticket = AuthTicket(
self.secret,
userid,
remote_addr,
tokens=tokens,
user_data=user_data,
cookie_name=self.cookie_name,
secure=self.secure)
# @@: Should we set REMOTE_USER etc in the current
# environment right now as well?
cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME'))
wild_domain = '.' + cur_domain
cookie_options = ""
if self.secure:
cookie_options += "; secure"
if self.httponly:
cookie_options += "; HttpOnly"
cookies = []
if self.no_domain_cookie:
cookies.append(('Set-Cookie', '%s=%s; Path=/%s' % (
self.cookie_name, ticket.cookie_value(), cookie_options)))
if self.current_domain_cookie:
cookies.append(('Set-Cookie', '%s=%s; Path=/; Domain=%s%s' % (
self.cookie_name, ticket.cookie_value(), cur_domain,
cookie_options)))
if self.wildcard_cookie:
cookies.append(('Set-Cookie', '%s=%s; Path=/; Domain=%s%s' % (
self.cookie_name, ticket.cookie_value(), wild_domain,
cookie_options)))
return cookies
def logout_user_cookie(self, environ):
cur_domain = environ.get('HTTP_HOST', environ.get('SERVER_NAME'))
wild_domain = '.' + cur_domain
expires = 'Sat, 01-Jan-2000 12:00:00 GMT'
cookies = [
('Set-Cookie', '%s=""; Expires="%s"; Path=/' % (self.cookie_name, expires)),
('Set-Cookie', '%s=""; Expires="%s"; Path=/; Domain=%s' %
(self.cookie_name, expires, cur_domain)),
('Set-Cookie', '%s=""; Expires="%s"; Path=/; Domain=%s' %
(self.cookie_name, expires, wild_domain)),
]
return cookies
def make_auth_tkt_middleware(
app,
global_conf,
secret=None,
cookie_name='auth_tkt',
secure=False,
include_ip=True,
logout_path=None):
"""
Creates the `AuthTKTMiddleware
<class-paste.auth.auth_tkt.AuthTKTMiddleware.html>`_.
``secret`` is required, but can be set globally or locally.
"""
from paste.deploy.converters import asbool
secure = asbool(secure)
include_ip = asbool(include_ip)
if secret is None:
secret = global_conf.get('secret')
if not secret:
raise ValueError(
"You must provide a 'secret' (in global or local configuration)")
return AuthTKTMiddleware(
app, secret, cookie_name, secure, include_ip, logout_path or None) | en | 0.770651 | # (c) 2005 <NAME> and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php ########################################################################## # # Copyright (c) 2005 Imaginary Landscape LLC and Contributors. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ########################################################################## Implementation of cookie signing as done in `mod_auth_tkt <http://www.openfusion.com.au/labs/mod_auth_tkt/>`_. mod_auth_tkt is an Apache module that looks for these signed cookies and sets ``REMOTE_USER``, ``REMOTE_USER_TOKENS`` (a comma-separated list of groups) and ``REMOTE_USER_DATA`` (arbitrary string data). This module is an alternative to the ``paste.auth.cookie`` module; it's primary benefit is compatibility with mod_auth_tkt, which in turn makes it possible to use the same authentication process with non-Python code run under Apache. # mimic hashlib (will work for md5, fail for secure hashes) # Python 2 # Python 2.X # Python 3+ This class represents an authentication token. You must pass in the shared secret, the userid, and the IP address. Optionally you can include tokens (a list of strings, representing role names), 'user_data', which is arbitrary data available for your own use in later scripts. Lastly, you can override the timestamp, cookie name, whether to secure the cookie and the digest algorithm (for details look at ``AuthTKTMiddleware``). Once you provide all the arguments, use .cookie_value() to generate the appropriate authentication ticket. .cookie() generates a Cookie object, the str() of which is the complete cookie header to be sent. CGI usage:: token = auth_tkt.AuthTick('sharedsecret', 'username', os.environ['REMOTE_ADDR'], tokens=['admin']) print('Status: 200 OK') print('Content-type: text/html') print(token.cookie()) print("") ... redirect HTML ... Webware usage:: token = auth_tkt.AuthTick('sharedsecret', 'username', self.request().environ()['REMOTE_ADDR'], tokens=['admin']) self.response().setCookie('auth_tkt', token.cookie_value()) Be careful not to do an HTTP redirect after login; use meta refresh or Javascript -- some browsers have bugs where cookies aren't saved when set on a redirect. # correct specification of digest from hashlib or fail Exception raised when a ticket can't be parsed. If we get far enough to determine what the expected digest should have been, expected is set. This should not be shown by default, but can be useful for debugging. Parse the ticket, returning (timestamp, userid, tokens, user_data). If the ticket cannot be parsed, ``BadTicket`` will be raised with an explanation. # correct specification of digest from hashlib or fail # @@: Is this the right order? # @@: Digest object constructor compatible with named ones in hashlib only Middleware that checks for signed cookies that match what `mod_auth_tkt <http://www.openfusion.com.au/labs/mod_auth_tkt/>`_ looks for (if you have mod_auth_tkt installed, you don't need this middleware, since Apache will set the environmental variables for you). Arguments: ``secret``: A secret that should be shared by any instances of this application. If this app is served from more than one machine, they should all have the same secret. ``cookie_name``: The name of the cookie to read and write from. Default ``auth_tkt``. ``secure``: If the cookie should be set as 'secure' (only sent over SSL) and if the login must be over SSL. (Defaults to False) ``httponly``: If the cookie should be marked as HttpOnly, which means that it's not accessible to JavaScript. (Defaults to False) ``include_ip``: If the cookie should include the user's IP address. If so, then if they change IPs their cookie will be invalid. ``logout_path``: The path under this middleware that should signify a logout. The page will be shown as usual, but the user will also be logged out when they visit this page. ``digest_algo``: Digest algorithm specified as a name of the algorithm provided by ``hashlib`` or as a compatible digest object constructor. Defaults to ``md5``, as in mod_auth_tkt. The others currently compatible with mod_auth_tkt are ``sha256`` and ``sha512``. If used with mod_auth_tkt, then these settings (except logout_path) should match the analogous Apache configuration settings. This also adds two functions to the request: ``environ['paste.auth_tkt.set_user'](userid, tokens='', user_data='')`` This sets a cookie that logs the user in. ``tokens`` is a string (comma-separated groups) or a list of strings. ``user_data`` is a string for your own use. ``environ['paste.auth_tkt.logout_user']()`` Logs out the user. # correct specification of digest from hashlib or fail # mod_auth_tkt uses this dummy value when IP is not # checked: # @@: This should handle bad signatures better: # Also, timeouts should cause cookie refresh # We want to add tokens/roles to what's there: # bad credentials, just ignore without logging the user # in or anything # @@: Should we set REMOTE_USER etc in the current # environment right now as well? Creates the `AuthTKTMiddleware <class-paste.auth.auth_tkt.AuthTKTMiddleware.html>`_. ``secret`` is required, but can be set globally or locally. | 1.564106 | 2 |
QUIC/examples/doq_server.py | Niranjanpandeshwar/Data_Transfer_With_Distributed_Index | 0 | 6631700 | <filename>QUIC/examples/doq_server.py
import argparse
import asyncio
import json
import logging
from typing import Dict, Optional
from dnslib.dns import DNSRecord
from aioquic.asyncio import QuicConnectionProtocol, serve
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import QuicConnection
from aioquic.quic.events import ProtocolNegotiated, QuicEvent, StreamDataReceived
from aioquic.quic.logger import QuicLogger
from aioquic.tls import SessionTicket
try:
import uvloop
except ImportError:
uvloop = None
class DnsConnection:
def __init__(self, quic: QuicConnection):
self._quic = quic
def do_query(self, payload) -> bytes:
q = DNSRecord.parse(payload)
return q.send(self.resolver(), 53)
def resolver(self) -> str:
return args.resolver
def handle_event(self, event: QuicEvent) -> None:
if isinstance(event, StreamDataReceived):
data = self.do_query(event.data)
end_stream = False
self._quic.send_stream_data(event.stream_id, data, end_stream)
class DnsServerProtocol(QuicConnectionProtocol):
# -00 specifies 'dq', 'doq', and 'doq-h00' (the latter obviously tying to
# the version of the draft it matches). This is confusing, so we'll just
# support them all, until future drafts define conflicting behaviour.
SUPPORTED_ALPNS = ["dq", "doq", "doq-h00"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._dns: Optional[DnsConnection] = None
def quic_event_received(self, event: QuicEvent):
if isinstance(event, ProtocolNegotiated):
if event.alpn_protocol in DnsServerProtocol.SUPPORTED_ALPNS:
self._dns = DnsConnection(self._quic)
if self._dns is not None:
self._dns.handle_event(event)
class SessionTicketStore:
"""
Simple in-memory store for session tickets.
"""
def __init__(self) -> None:
self.tickets: Dict[bytes, SessionTicket] = {}
def add(self, ticket: SessionTicket) -> None:
self.tickets[ticket.ticket] = ticket
def pop(self, label: bytes) -> Optional[SessionTicket]:
return self.tickets.pop(label, None)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="DNS over QUIC server")
parser.add_argument(
"--host",
type=str,
default="::",
help="listen on the specified address (defaults to ::)",
)
parser.add_argument(
"--port",
type=int,
default=784,
help="listen on the specified port (defaults to 784)",
)
parser.add_argument(
"-k",
"--private-key",
type=str,
required=True,
help="load the TLS private key from the specified file",
)
parser.add_argument(
"-c",
"--certificate",
type=str,
required=True,
help="load the TLS certificate from the specified file",
)
parser.add_argument(
"-r",
"--resolver",
type=str,
default="8.8.8.8",
help="Upstream Classic DNS resolver to use",
)
parser.add_argument(
"-s",
"--stateless-retry",
action="store_true",
help="send a stateless retry for new connections",
)
parser.add_argument(
"-q", "--quic-log", type=str, help="log QUIC events to a file in QLOG format"
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="increase logging verbosity"
)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s %(levelname)s %(name)s %(message)s",
level=logging.DEBUG if args.verbose else logging.INFO,
)
if args.quic_log:
quic_logger = QuicLogger()
else:
quic_logger = None
configuration = QuicConfiguration(
alpn_protocols=["dq"],
is_client=False,
max_datagram_frame_size=65536,
quic_logger=quic_logger,
)
configuration.load_cert_chain(args.certificate, args.private_key)
ticket_store = SessionTicketStore()
if uvloop is not None:
uvloop.install()
loop = asyncio.get_event_loop()
loop.run_until_complete(
serve(
args.host,
args.port,
configuration=configuration,
create_protocol=DnsServerProtocol,
session_ticket_fetcher=ticket_store.pop,
session_ticket_handler=ticket_store.add,
stateless_retry=args.stateless_retry,
)
)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
if configuration.quic_logger is not None:
with open(args.quic_log, "w") as logger_fp:
json.dump(configuration.quic_logger.to_dict(), logger_fp, indent=4)
| <filename>QUIC/examples/doq_server.py
import argparse
import asyncio
import json
import logging
from typing import Dict, Optional
from dnslib.dns import DNSRecord
from aioquic.asyncio import QuicConnectionProtocol, serve
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import QuicConnection
from aioquic.quic.events import ProtocolNegotiated, QuicEvent, StreamDataReceived
from aioquic.quic.logger import QuicLogger
from aioquic.tls import SessionTicket
try:
import uvloop
except ImportError:
uvloop = None
class DnsConnection:
def __init__(self, quic: QuicConnection):
self._quic = quic
def do_query(self, payload) -> bytes:
q = DNSRecord.parse(payload)
return q.send(self.resolver(), 53)
def resolver(self) -> str:
return args.resolver
def handle_event(self, event: QuicEvent) -> None:
if isinstance(event, StreamDataReceived):
data = self.do_query(event.data)
end_stream = False
self._quic.send_stream_data(event.stream_id, data, end_stream)
class DnsServerProtocol(QuicConnectionProtocol):
# -00 specifies 'dq', 'doq', and 'doq-h00' (the latter obviously tying to
# the version of the draft it matches). This is confusing, so we'll just
# support them all, until future drafts define conflicting behaviour.
SUPPORTED_ALPNS = ["dq", "doq", "doq-h00"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._dns: Optional[DnsConnection] = None
def quic_event_received(self, event: QuicEvent):
if isinstance(event, ProtocolNegotiated):
if event.alpn_protocol in DnsServerProtocol.SUPPORTED_ALPNS:
self._dns = DnsConnection(self._quic)
if self._dns is not None:
self._dns.handle_event(event)
class SessionTicketStore:
"""
Simple in-memory store for session tickets.
"""
def __init__(self) -> None:
self.tickets: Dict[bytes, SessionTicket] = {}
def add(self, ticket: SessionTicket) -> None:
self.tickets[ticket.ticket] = ticket
def pop(self, label: bytes) -> Optional[SessionTicket]:
return self.tickets.pop(label, None)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="DNS over QUIC server")
parser.add_argument(
"--host",
type=str,
default="::",
help="listen on the specified address (defaults to ::)",
)
parser.add_argument(
"--port",
type=int,
default=784,
help="listen on the specified port (defaults to 784)",
)
parser.add_argument(
"-k",
"--private-key",
type=str,
required=True,
help="load the TLS private key from the specified file",
)
parser.add_argument(
"-c",
"--certificate",
type=str,
required=True,
help="load the TLS certificate from the specified file",
)
parser.add_argument(
"-r",
"--resolver",
type=str,
default="8.8.8.8",
help="Upstream Classic DNS resolver to use",
)
parser.add_argument(
"-s",
"--stateless-retry",
action="store_true",
help="send a stateless retry for new connections",
)
parser.add_argument(
"-q", "--quic-log", type=str, help="log QUIC events to a file in QLOG format"
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="increase logging verbosity"
)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s %(levelname)s %(name)s %(message)s",
level=logging.DEBUG if args.verbose else logging.INFO,
)
if args.quic_log:
quic_logger = QuicLogger()
else:
quic_logger = None
configuration = QuicConfiguration(
alpn_protocols=["dq"],
is_client=False,
max_datagram_frame_size=65536,
quic_logger=quic_logger,
)
configuration.load_cert_chain(args.certificate, args.private_key)
ticket_store = SessionTicketStore()
if uvloop is not None:
uvloop.install()
loop = asyncio.get_event_loop()
loop.run_until_complete(
serve(
args.host,
args.port,
configuration=configuration,
create_protocol=DnsServerProtocol,
session_ticket_fetcher=ticket_store.pop,
session_ticket_handler=ticket_store.add,
stateless_retry=args.stateless_retry,
)
)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
if configuration.quic_logger is not None:
with open(args.quic_log, "w") as logger_fp:
json.dump(configuration.quic_logger.to_dict(), logger_fp, indent=4)
| en | 0.888178 | # -00 specifies 'dq', 'doq', and 'doq-h00' (the latter obviously tying to # the version of the draft it matches). This is confusing, so we'll just # support them all, until future drafts define conflicting behaviour. Simple in-memory store for session tickets. | 2.333119 | 2 |
demo/armor_classify/classification.py | DRL-CASIA/Perception | 39 | 6631701 | <gh_stars>10-100
import torch
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
import numpy as np
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 12, 3)
self.conv2 = nn.Conv2d(12, 24, 3)
self.conv3 = nn.Conv2d(24, 36, 5)
self.fc1 = nn.Linear(24 * 4 * 4 , 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 128)
self.fc4 = nn.Linear(128, 6)
self.pool = nn.MaxPool2d(3, 3)
# forward这个函数定义了前向传播的运算
# 流程:卷积->relu->压缩->全连接层->relu->最后再一次全连接层
def forward(self, x):
# 第一层卷积并做非线性变换
x = F.relu(self.conv1(x))
# 结果进行压缩
x = self.pool(x)
# 第二层卷积并做非线性变换
x = F.relu(self.conv2(x))
# 再压缩
x = self.pool(x)
# 把二维特征图变为一维,这样全连接层才能处理
x = x.view(-1, 24* 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
def classification_modelload():
net_model = torch.load('./armor_classify/data/net_armor_model.pt')
net_model.eval()
return net_model
| import torch
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
import numpy as np
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 12, 3)
self.conv2 = nn.Conv2d(12, 24, 3)
self.conv3 = nn.Conv2d(24, 36, 5)
self.fc1 = nn.Linear(24 * 4 * 4 , 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 128)
self.fc4 = nn.Linear(128, 6)
self.pool = nn.MaxPool2d(3, 3)
# forward这个函数定义了前向传播的运算
# 流程:卷积->relu->压缩->全连接层->relu->最后再一次全连接层
def forward(self, x):
# 第一层卷积并做非线性变换
x = F.relu(self.conv1(x))
# 结果进行压缩
x = self.pool(x)
# 第二层卷积并做非线性变换
x = F.relu(self.conv2(x))
# 再压缩
x = self.pool(x)
# 把二维特征图变为一维,这样全连接层才能处理
x = x.view(-1, 24* 4 * 4)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
def classification_modelload():
net_model = torch.load('./armor_classify/data/net_armor_model.pt')
net_model.eval()
return net_model | zh | 0.947154 | # forward这个函数定义了前向传播的运算 # 流程:卷积->relu->压缩->全连接层->relu->最后再一次全连接层 # 第一层卷积并做非线性变换 # 结果进行压缩 # 第二层卷积并做非线性变换 # 再压缩 # 把二维特征图变为一维,这样全连接层才能处理 | 2.907692 | 3 |
nabu/neuralnetworks/decoders/max_decoder.py | rzcwade/nabu | 3 | 6631702 | <filename>nabu/neuralnetworks/decoders/max_decoder.py
'''@file max_decoder.py
contains the MaxDecoder'''
import os
import tensorflow as tf
import decoder
from nabu.neuralnetworks.components import ops
class MaxDecoder(decoder.Decoder):
'''max Decoder'''
def __init__(self, conf, model):
'''
Decoder constructor
Args:
conf: the decoder config
model: the model that will be used for decoding
'''
super(MaxDecoder, self).__init__(conf, model)
self.alphabets = {}
for o in model.output_names:
#get the alphabet
self.alphabets[o] = self.conf['%s_alphabet' % o].split(' ')
def __call__(self, inputs, input_seq_length):
'''decode a batch of data
Args:
inputs: the inputs as a dictionary of [batch_size x time x ...]
tensors
input_seq_length: the input sequence lengths as a dictionary of
[batch_size] vectors
Returns:
- the decoded sequences as a dictionary of outputs
'''
with tf.name_scope('max_decoder'):
#create the decoding graph
logits, logits_seq_length =\
self.model(
inputs, input_seq_length, targets=[],
target_seq_length=[], is_training=False)
outputs = {}
for out in logits:
sm = tf.nn.softmax(logits[out])
outputs[out] = (
tf.cast(tf.argmax(sm, axis=2), tf.int32),
logits_seq_length[out])
return outputs
def write(self, outputs, directory, names):
'''write the output of the decoder to disk
args:
outputs: the outputs of the decoder
directory: the directory where the results should be written
names: the names of the utterances in outputs
'''
for o in outputs:
batch_size = outputs[o][0].shape[0]
with open(os.path.join(directory, o), 'a') as fid:
for i in range(batch_size):
output = outputs[o][0][i, :outputs[o][1][i]]
text = ' '.join([self.alphabets[o][j] for j in output])
fid.write('%s %s\n' % (names[i], text))
def update_evaluation_loss(self, loss, outputs, references,
reference_seq_length):
'''update the evaluation loss
args:
loss: the current evaluation loss
outputs: the outputs of the decoder as a dictionary
references: the references as a dictionary
reference_seq_length: the sequence lengths of the references
Returns:
an op to update the evalution loss
'''
#create a valiable to hold the total number of reference targets
num_targets = tf.get_variable(
name='num_targets',
shape=[],
dtype=tf.float32,
initializer=tf.zeros_initializer(),
trainable=False
)
#stack all the logits
stacked_outputs = {
t:ops.stack_seq(outputs[t][0], outputs[t][1])
for t in outputs}
#create the stacked targets
stacked_targets = {
t:tf.cast(ops.stack_seq(references[t],
reference_seq_length[t]), tf.int32)
for t in references}
#compute the number of errors
errors = [
tf.reduce_sum(tf.reduce_sum(tf.cast(tf.not_equal(
stacked_outputs[o], stacked_targets[o]), tf.float32)))
for o in outputs]
errors = tf.reduce_sum(errors)
#compute the number of targets in this batch
batch_targets = tf.reduce_sum([
tf.reduce_sum(lengths)
for lengths in reference_seq_length.values()])
new_num_targets = num_targets + tf.cast(batch_targets, tf.float32)
#an operation to update the loss
update_loss = loss.assign(
(loss*num_targets + errors)/new_num_targets).op
#add an operation to update the number of targets
with tf.control_dependencies([update_loss]):
update_loss = num_targets.assign(new_num_targets).op
return update_loss
| <filename>nabu/neuralnetworks/decoders/max_decoder.py
'''@file max_decoder.py
contains the MaxDecoder'''
import os
import tensorflow as tf
import decoder
from nabu.neuralnetworks.components import ops
class MaxDecoder(decoder.Decoder):
'''max Decoder'''
def __init__(self, conf, model):
'''
Decoder constructor
Args:
conf: the decoder config
model: the model that will be used for decoding
'''
super(MaxDecoder, self).__init__(conf, model)
self.alphabets = {}
for o in model.output_names:
#get the alphabet
self.alphabets[o] = self.conf['%s_alphabet' % o].split(' ')
def __call__(self, inputs, input_seq_length):
'''decode a batch of data
Args:
inputs: the inputs as a dictionary of [batch_size x time x ...]
tensors
input_seq_length: the input sequence lengths as a dictionary of
[batch_size] vectors
Returns:
- the decoded sequences as a dictionary of outputs
'''
with tf.name_scope('max_decoder'):
#create the decoding graph
logits, logits_seq_length =\
self.model(
inputs, input_seq_length, targets=[],
target_seq_length=[], is_training=False)
outputs = {}
for out in logits:
sm = tf.nn.softmax(logits[out])
outputs[out] = (
tf.cast(tf.argmax(sm, axis=2), tf.int32),
logits_seq_length[out])
return outputs
def write(self, outputs, directory, names):
'''write the output of the decoder to disk
args:
outputs: the outputs of the decoder
directory: the directory where the results should be written
names: the names of the utterances in outputs
'''
for o in outputs:
batch_size = outputs[o][0].shape[0]
with open(os.path.join(directory, o), 'a') as fid:
for i in range(batch_size):
output = outputs[o][0][i, :outputs[o][1][i]]
text = ' '.join([self.alphabets[o][j] for j in output])
fid.write('%s %s\n' % (names[i], text))
def update_evaluation_loss(self, loss, outputs, references,
reference_seq_length):
'''update the evaluation loss
args:
loss: the current evaluation loss
outputs: the outputs of the decoder as a dictionary
references: the references as a dictionary
reference_seq_length: the sequence lengths of the references
Returns:
an op to update the evalution loss
'''
#create a valiable to hold the total number of reference targets
num_targets = tf.get_variable(
name='num_targets',
shape=[],
dtype=tf.float32,
initializer=tf.zeros_initializer(),
trainable=False
)
#stack all the logits
stacked_outputs = {
t:ops.stack_seq(outputs[t][0], outputs[t][1])
for t in outputs}
#create the stacked targets
stacked_targets = {
t:tf.cast(ops.stack_seq(references[t],
reference_seq_length[t]), tf.int32)
for t in references}
#compute the number of errors
errors = [
tf.reduce_sum(tf.reduce_sum(tf.cast(tf.not_equal(
stacked_outputs[o], stacked_targets[o]), tf.float32)))
for o in outputs]
errors = tf.reduce_sum(errors)
#compute the number of targets in this batch
batch_targets = tf.reduce_sum([
tf.reduce_sum(lengths)
for lengths in reference_seq_length.values()])
new_num_targets = num_targets + tf.cast(batch_targets, tf.float32)
#an operation to update the loss
update_loss = loss.assign(
(loss*num_targets + errors)/new_num_targets).op
#add an operation to update the number of targets
with tf.control_dependencies([update_loss]):
update_loss = num_targets.assign(new_num_targets).op
return update_loss
| en | 0.760865 | @file max_decoder.py contains the MaxDecoder max Decoder Decoder constructor Args: conf: the decoder config model: the model that will be used for decoding #get the alphabet decode a batch of data Args: inputs: the inputs as a dictionary of [batch_size x time x ...] tensors input_seq_length: the input sequence lengths as a dictionary of [batch_size] vectors Returns: - the decoded sequences as a dictionary of outputs #create the decoding graph write the output of the decoder to disk args: outputs: the outputs of the decoder directory: the directory where the results should be written names: the names of the utterances in outputs update the evaluation loss args: loss: the current evaluation loss outputs: the outputs of the decoder as a dictionary references: the references as a dictionary reference_seq_length: the sequence lengths of the references Returns: an op to update the evalution loss #create a valiable to hold the total number of reference targets #stack all the logits #create the stacked targets #compute the number of errors #compute the number of targets in this batch #an operation to update the loss #add an operation to update the number of targets | 2.948722 | 3 |
gran/utils/data_helper.py | longland-m/wikigen | 0 | 6631703 | # From GRAN repo, with minor changes
###############################################################################
#
# Some code is adapted from https://github.com/JiaxuanYou/graph-generation
#
###############################################################################
import os
import torch
import pickle
import numpy as np
from scipy import sparse as sp
import networkx as nx
import torch.nn.functional as F
__all__ = [
'save_graph_list', 'load_graph_list', 'graph_load_batch',
'preprocess_graph_list', 'create_graphs'
]
# save a list of graphs
def save_graph_list(G_list, fname):
with open(fname, "wb") as f:
pickle.dump(G_list, f)
def pick_connected_component_new(G):
adj_dict = nx.to_dict_of_lists(G)
for node_id in sorted(adj_dict.keys()):
id_min = min(adj_dict[node_id])
if node_id < id_min and node_id >= 1:
# if node_id<id_min and node_id>=4:
break
node_list = list(
range(node_id)) # only include node prior than node "node_id"
G = G.subgraph(node_list)
G = max(nx.connected_component_subgraphs(G), key=len)
return G
def load_graph_list(fname, is_real=True):
with open(fname, "rb") as f:
graph_list = pickle.load(f)
for i in range(len(graph_list)):
edges_with_selfloops = list(graph_list[i].selfloop_edges())
if len(edges_with_selfloops) > 0:
graph_list[i].remove_edges_from(edges_with_selfloops)
if is_real:
graph_list[i] = max(
nx.connected_component_subgraphs(graph_list[i]), key=len)
graph_list[i] = nx.convert_node_labels_to_integers(graph_list[i])
else:
graph_list[i] = pick_connected_component_new(graph_list[i])
return graph_list
def preprocess_graph_list(graph_list):
for i in range(len(graph_list)):
edges_with_selfloops = list(graph_list[i].selfloop_edges())
if len(edges_with_selfloops) > 0:
graph_list[i].remove_edges_from(edges_with_selfloops)
if is_real:
graph_list[i] = max(
nx.connected_component_subgraphs(graph_list[i]), key=len)
graph_list[i] = nx.convert_node_labels_to_integers(graph_list[i])
else:
graph_list[i] = pick_connected_component_new(graph_list[i])
return graph_list
def graph_load_batch(data_dir,
min_num_nodes=20,
max_num_nodes=1000,
name='ENZYMES',
node_attributes=True,
graph_labels=True):
'''
load many graphs, e.g. enzymes
:return: a list of graphs
'''
print('Loading graph dataset: ' + str(name))
G = nx.Graph()
# load data
path = os.path.join(data_dir, name)
data_adj = np.loadtxt(
os.path.join(path, '{}_A.txt'.format(name)), delimiter=',').astype(int)
if node_attributes:
data_node_att = np.loadtxt(
os.path.join(path, '{}_node_attributes.txt'.format(name)),
delimiter=',')
data_node_label = np.loadtxt(
os.path.join(path, '{}_node_labels.txt'.format(name)),
delimiter=',').astype(int)
data_graph_indicator = np.loadtxt(
os.path.join(path, '{}_graph_indicator.txt'.format(name)),
delimiter=',').astype(int)
if graph_labels:
data_graph_labels = np.loadtxt(
os.path.join(path, '{}_graph_labels.txt'.format(name)),
delimiter=',').astype(int)
data_tuple = list(map(tuple, data_adj))
# add edges
G.add_edges_from(data_tuple)
# add node attributes
for i in range(data_node_label.shape[0]):
if node_attributes:
G.add_node(i + 1, feature=data_node_att[i])
G.add_node(i + 1, label=data_node_label[i])
G.remove_nodes_from(list(nx.isolates(G)))
# remove self-loop
G.remove_edges_from(nx.selfloop_edges(G))
# split into graphs
graph_num = data_graph_indicator.max()
node_list = np.arange(data_graph_indicator.shape[0]) + 1
graphs = []
max_nodes = 0
for i in range(graph_num):
# find the nodes for each graph
nodes = node_list[data_graph_indicator == i + 1]
G_sub = G.subgraph(nodes)
if graph_labels:
G_sub.graph['label'] = data_graph_labels[i]
if G_sub.number_of_nodes() >= min_num_nodes and G_sub.number_of_nodes(
) <= max_num_nodes:
graphs.append(G_sub)
if G_sub.number_of_nodes() > max_nodes:
max_nodes = G_sub.number_of_nodes()
print('Loaded')
return graphs
def create_graphs(graph_type, data_dir='data', noise=10.0, seed=1234, fname=''):
npr = np.random.RandomState(seed)
### load datasets
graphs = []
# GRAN examples. Remove this later
if graph_type == 'grid':
graphs = []
for i in range(10, 20):
for j in range(10, 20):
graphs.append(nx.grid_2d_graph(i, j))
elif graph_type == 'lobster':
graphs = []
p1 = 0.7
p2 = 0.7
count = 0
min_node = 10
max_node = 100
max_edge = 0
mean_node = 80
num_graphs = 100
seed_tmp = seed
while count < num_graphs:
G = nx.random_lobster(mean_node, p1, p2, seed=seed_tmp)
if len(G.nodes()) >= min_node and len(G.nodes()) <= max_node:
graphs.append(G)
if G.number_of_edges() > max_edge:
max_edge = G.number_of_edges()
count += 1
seed_tmp += 1
elif graph_type == 'DD':
graphs = graph_load_batch(
data_dir,
min_num_nodes=100,
max_num_nodes=500,
name='DD',
node_attributes=False,
graph_labels=True)
elif graph_type == 'FIRSTMM_DB':
graphs = graph_load_batch(
data_dir,
min_num_nodes=0,
max_num_nodes=10000,
name='FIRSTMM_DB',
node_attributes=False,
graph_labels=True)
elif graph_type == 'custom':
graphs = pickle.load(open(data_dir+fname, 'rb'))
num_nodes = [gg.number_of_nodes() for gg in graphs]
num_edges = [gg.number_of_edges() for gg in graphs]
print('max # nodes = {} || mean # nodes = {}'.format(max(num_nodes), np.mean(num_nodes)))
print('max # edges = {} || mean # edges = {}'.format(max(num_edges), np.mean(num_edges)))
return graphs
| # From GRAN repo, with minor changes
###############################################################################
#
# Some code is adapted from https://github.com/JiaxuanYou/graph-generation
#
###############################################################################
import os
import torch
import pickle
import numpy as np
from scipy import sparse as sp
import networkx as nx
import torch.nn.functional as F
__all__ = [
'save_graph_list', 'load_graph_list', 'graph_load_batch',
'preprocess_graph_list', 'create_graphs'
]
# save a list of graphs
def save_graph_list(G_list, fname):
with open(fname, "wb") as f:
pickle.dump(G_list, f)
def pick_connected_component_new(G):
adj_dict = nx.to_dict_of_lists(G)
for node_id in sorted(adj_dict.keys()):
id_min = min(adj_dict[node_id])
if node_id < id_min and node_id >= 1:
# if node_id<id_min and node_id>=4:
break
node_list = list(
range(node_id)) # only include node prior than node "node_id"
G = G.subgraph(node_list)
G = max(nx.connected_component_subgraphs(G), key=len)
return G
def load_graph_list(fname, is_real=True):
with open(fname, "rb") as f:
graph_list = pickle.load(f)
for i in range(len(graph_list)):
edges_with_selfloops = list(graph_list[i].selfloop_edges())
if len(edges_with_selfloops) > 0:
graph_list[i].remove_edges_from(edges_with_selfloops)
if is_real:
graph_list[i] = max(
nx.connected_component_subgraphs(graph_list[i]), key=len)
graph_list[i] = nx.convert_node_labels_to_integers(graph_list[i])
else:
graph_list[i] = pick_connected_component_new(graph_list[i])
return graph_list
def preprocess_graph_list(graph_list):
for i in range(len(graph_list)):
edges_with_selfloops = list(graph_list[i].selfloop_edges())
if len(edges_with_selfloops) > 0:
graph_list[i].remove_edges_from(edges_with_selfloops)
if is_real:
graph_list[i] = max(
nx.connected_component_subgraphs(graph_list[i]), key=len)
graph_list[i] = nx.convert_node_labels_to_integers(graph_list[i])
else:
graph_list[i] = pick_connected_component_new(graph_list[i])
return graph_list
def graph_load_batch(data_dir,
min_num_nodes=20,
max_num_nodes=1000,
name='ENZYMES',
node_attributes=True,
graph_labels=True):
'''
load many graphs, e.g. enzymes
:return: a list of graphs
'''
print('Loading graph dataset: ' + str(name))
G = nx.Graph()
# load data
path = os.path.join(data_dir, name)
data_adj = np.loadtxt(
os.path.join(path, '{}_A.txt'.format(name)), delimiter=',').astype(int)
if node_attributes:
data_node_att = np.loadtxt(
os.path.join(path, '{}_node_attributes.txt'.format(name)),
delimiter=',')
data_node_label = np.loadtxt(
os.path.join(path, '{}_node_labels.txt'.format(name)),
delimiter=',').astype(int)
data_graph_indicator = np.loadtxt(
os.path.join(path, '{}_graph_indicator.txt'.format(name)),
delimiter=',').astype(int)
if graph_labels:
data_graph_labels = np.loadtxt(
os.path.join(path, '{}_graph_labels.txt'.format(name)),
delimiter=',').astype(int)
data_tuple = list(map(tuple, data_adj))
# add edges
G.add_edges_from(data_tuple)
# add node attributes
for i in range(data_node_label.shape[0]):
if node_attributes:
G.add_node(i + 1, feature=data_node_att[i])
G.add_node(i + 1, label=data_node_label[i])
G.remove_nodes_from(list(nx.isolates(G)))
# remove self-loop
G.remove_edges_from(nx.selfloop_edges(G))
# split into graphs
graph_num = data_graph_indicator.max()
node_list = np.arange(data_graph_indicator.shape[0]) + 1
graphs = []
max_nodes = 0
for i in range(graph_num):
# find the nodes for each graph
nodes = node_list[data_graph_indicator == i + 1]
G_sub = G.subgraph(nodes)
if graph_labels:
G_sub.graph['label'] = data_graph_labels[i]
if G_sub.number_of_nodes() >= min_num_nodes and G_sub.number_of_nodes(
) <= max_num_nodes:
graphs.append(G_sub)
if G_sub.number_of_nodes() > max_nodes:
max_nodes = G_sub.number_of_nodes()
print('Loaded')
return graphs
def create_graphs(graph_type, data_dir='data', noise=10.0, seed=1234, fname=''):
npr = np.random.RandomState(seed)
### load datasets
graphs = []
# GRAN examples. Remove this later
if graph_type == 'grid':
graphs = []
for i in range(10, 20):
for j in range(10, 20):
graphs.append(nx.grid_2d_graph(i, j))
elif graph_type == 'lobster':
graphs = []
p1 = 0.7
p2 = 0.7
count = 0
min_node = 10
max_node = 100
max_edge = 0
mean_node = 80
num_graphs = 100
seed_tmp = seed
while count < num_graphs:
G = nx.random_lobster(mean_node, p1, p2, seed=seed_tmp)
if len(G.nodes()) >= min_node and len(G.nodes()) <= max_node:
graphs.append(G)
if G.number_of_edges() > max_edge:
max_edge = G.number_of_edges()
count += 1
seed_tmp += 1
elif graph_type == 'DD':
graphs = graph_load_batch(
data_dir,
min_num_nodes=100,
max_num_nodes=500,
name='DD',
node_attributes=False,
graph_labels=True)
elif graph_type == 'FIRSTMM_DB':
graphs = graph_load_batch(
data_dir,
min_num_nodes=0,
max_num_nodes=10000,
name='FIRSTMM_DB',
node_attributes=False,
graph_labels=True)
elif graph_type == 'custom':
graphs = pickle.load(open(data_dir+fname, 'rb'))
num_nodes = [gg.number_of_nodes() for gg in graphs]
num_edges = [gg.number_of_edges() for gg in graphs]
print('max # nodes = {} || mean # nodes = {}'.format(max(num_nodes), np.mean(num_nodes)))
print('max # edges = {} || mean # edges = {}'.format(max(num_edges), np.mean(num_edges)))
return graphs
| en | 0.517412 | # From GRAN repo, with minor changes ############################################################################### # # Some code is adapted from https://github.com/JiaxuanYou/graph-generation # ############################################################################### # save a list of graphs # if node_id<id_min and node_id>=4: # only include node prior than node "node_id" load many graphs, e.g. enzymes :return: a list of graphs # load data # add edges # add node attributes # remove self-loop # split into graphs # find the nodes for each graph ### load datasets # GRAN examples. Remove this later # nodes = {} || mean # nodes = {}'.format(max(num_nodes), np.mean(num_nodes))) # edges = {} || mean # edges = {}'.format(max(num_edges), np.mean(num_edges))) | 2.367504 | 2 |
tools/build_r8lib.py | demon-xxi/r8 | 0 | 6631704 | <filename>tools/build_r8lib.py<gh_stars>0
#!/usr/bin/env python
# Copyright (c) 2018, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
'''
Build r8lib.jar using src/main/keep.txt and test that d8_api_usage_sample.jar
works with the minified R8.
'''
import argparse
import gradle
import os
import subprocess
import toolhelper
import utils
parser = argparse.ArgumentParser(description=__doc__.strip(),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-e', '--exclude_deps', action='store_true',
help='Create lib jar without dependencies')
parser.add_argument('-k', '--keep', default=utils.R8LIB_KEEP_RULES,
help='Keep rules file for lib')
parser.add_argument('-n', '--no_relocate', action='store_true',
help='Create lib jar without relocating libraries')
parser.add_argument('-o', '--out', default=None,
help='Output for built library')
parser.add_argument('-t', '--target', default='r8',
help='Compile target for library')
API_LEVEL = 26
DEPS_JAR = os.path.join(utils.LIBS, 'deps.jar')
SAMPLE_JAR = os.path.join(utils.REPO_ROOT, 'tests', 'd8_api_usage_sample.jar')
def build_r8lib(target, exclude_deps, no_relocate, keep_rules_path,
output_path, **kwargs):
# Clean the build directory to ensure no repackaging of any existing
# lib or deps.
gradle.RunGradle(['clean'])
lib_args = [target]
deps_args = ['repackageDeps']
if exclude_deps:
lib_args.append('-Pexclude_deps')
if no_relocate:
lib_args.append('-Plib_no_relocate')
deps_args.append('-Plib_no_relocate')
# Produce the r8lib target to be processed later.
gradle.RunGradle(lib_args)
target_lib = os.path.join(utils.LIBS, target + '.jar')
temp_lib = os.path.join(utils.LIBS, target + '_to_process.jar')
os.rename(target_lib, temp_lib)
# Produce the dependencies needed for running r8 on lib.jar.
gradle.RunGradle(deps_args)
temp_deps = os.path.join(utils.LIBS, target + 'lib_deps.jar')
os.rename(DEPS_JAR, temp_deps)
# Produce R8 for compiling lib
if output_path is None:
output_path = target + 'lib.jar'
output_map_path = output_path + '.map'
toolhelper.run(
'r8',
('--release',
'--classfile',
'--lib', utils.RT_JAR,
'--lib', temp_deps,
temp_lib,
'--output', output_path,
'--pg-conf', keep_rules_path,
'--pg-map-output', output_map_path),
**kwargs)
if exclude_deps:
return [output_path, temp_deps]
else:
return [output_path]
def test_d8sample(paths):
with utils.TempDir() as path:
args = ['java', '-cp', '%s:%s' % (SAMPLE_JAR, ":".join(paths)),
'com.android.tools.apiusagesample.D8ApiUsageSample',
'--output', path,
'--min-api', str(API_LEVEL),
'--lib', utils.get_android_jar(API_LEVEL),
'--classpath', utils.R8_JAR,
'--main-dex-list', '/dev/null',
os.path.join(utils.BUILD, 'test/examples/hello.jar')]
utils.PrintCmd(args)
subprocess.check_call(args)
def test_r8command(paths):
with utils.TempDir() as path:
# SAMPLE_JAR and LIB_JAR should not have any classes in common, since e.g.
# R8CommandParser should have been minified in LIB_JAR.
# Just in case R8CommandParser is also present in LIB_JAR, we put
# SAMPLE_JAR first on the classpath to use its version of R8CommandParser.
args = ['java', '-cp', '%s:%s' % (SAMPLE_JAR, ":".join(paths)),
'com.android.tools.r8.R8CommandParser',
'--output', path + "/output.zip",
'--min-api', str(API_LEVEL),
'--lib', utils.get_android_jar(API_LEVEL),
'--main-dex-list', '/dev/null',
os.path.join(utils.BUILD, 'test/examples/hello.jar')]
utils.PrintCmd(args)
subprocess.check_call(args)
def test_r8cfcommand(paths):
with utils.TempDir() as path:
# SAMPLE_JAR and LIB_JAR should not have any classes in common, since e.g.
# R8CommandParser should have been minified in LIB_JAR.
# Just in case R8CommandParser is also present in LIB_JAR, we put
# SAMPLE_JAR first on the classpath to use its version of R8CommandParser.
args = ['java', '-cp', '%s:%s' % (SAMPLE_JAR, ":".join(paths)),
'com.android.tools.r8.R8CommandParser',
'--classfile',
'--output', path + "/output.jar",
'--lib', utils.RT_JAR,
os.path.join(utils.BUILD, 'test/examples/hello.jar')]
utils.PrintCmd(args)
subprocess.check_call(args)
def main():
# Handle --help
args = parser.parse_args()
output_paths = build_r8lib(
args.target, args.exclude_deps, args.no_relocate, args.keep, args.out)
if args.target == 'r8':
gradle.RunGradle(['buildExampleJars'])
test_r8command(output_paths)
test_r8cfcommand(output_paths)
if args.target == 'd8':
gradle.RunGradle(['buildExampleJars'])
test_d8sample(output_paths)
if __name__ == '__main__':
main()
| <filename>tools/build_r8lib.py<gh_stars>0
#!/usr/bin/env python
# Copyright (c) 2018, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
'''
Build r8lib.jar using src/main/keep.txt and test that d8_api_usage_sample.jar
works with the minified R8.
'''
import argparse
import gradle
import os
import subprocess
import toolhelper
import utils
parser = argparse.ArgumentParser(description=__doc__.strip(),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-e', '--exclude_deps', action='store_true',
help='Create lib jar without dependencies')
parser.add_argument('-k', '--keep', default=utils.R8LIB_KEEP_RULES,
help='Keep rules file for lib')
parser.add_argument('-n', '--no_relocate', action='store_true',
help='Create lib jar without relocating libraries')
parser.add_argument('-o', '--out', default=None,
help='Output for built library')
parser.add_argument('-t', '--target', default='r8',
help='Compile target for library')
API_LEVEL = 26
DEPS_JAR = os.path.join(utils.LIBS, 'deps.jar')
SAMPLE_JAR = os.path.join(utils.REPO_ROOT, 'tests', 'd8_api_usage_sample.jar')
def build_r8lib(target, exclude_deps, no_relocate, keep_rules_path,
output_path, **kwargs):
# Clean the build directory to ensure no repackaging of any existing
# lib or deps.
gradle.RunGradle(['clean'])
lib_args = [target]
deps_args = ['repackageDeps']
if exclude_deps:
lib_args.append('-Pexclude_deps')
if no_relocate:
lib_args.append('-Plib_no_relocate')
deps_args.append('-Plib_no_relocate')
# Produce the r8lib target to be processed later.
gradle.RunGradle(lib_args)
target_lib = os.path.join(utils.LIBS, target + '.jar')
temp_lib = os.path.join(utils.LIBS, target + '_to_process.jar')
os.rename(target_lib, temp_lib)
# Produce the dependencies needed for running r8 on lib.jar.
gradle.RunGradle(deps_args)
temp_deps = os.path.join(utils.LIBS, target + 'lib_deps.jar')
os.rename(DEPS_JAR, temp_deps)
# Produce R8 for compiling lib
if output_path is None:
output_path = target + 'lib.jar'
output_map_path = output_path + '.map'
toolhelper.run(
'r8',
('--release',
'--classfile',
'--lib', utils.RT_JAR,
'--lib', temp_deps,
temp_lib,
'--output', output_path,
'--pg-conf', keep_rules_path,
'--pg-map-output', output_map_path),
**kwargs)
if exclude_deps:
return [output_path, temp_deps]
else:
return [output_path]
def test_d8sample(paths):
with utils.TempDir() as path:
args = ['java', '-cp', '%s:%s' % (SAMPLE_JAR, ":".join(paths)),
'com.android.tools.apiusagesample.D8ApiUsageSample',
'--output', path,
'--min-api', str(API_LEVEL),
'--lib', utils.get_android_jar(API_LEVEL),
'--classpath', utils.R8_JAR,
'--main-dex-list', '/dev/null',
os.path.join(utils.BUILD, 'test/examples/hello.jar')]
utils.PrintCmd(args)
subprocess.check_call(args)
def test_r8command(paths):
with utils.TempDir() as path:
# SAMPLE_JAR and LIB_JAR should not have any classes in common, since e.g.
# R8CommandParser should have been minified in LIB_JAR.
# Just in case R8CommandParser is also present in LIB_JAR, we put
# SAMPLE_JAR first on the classpath to use its version of R8CommandParser.
args = ['java', '-cp', '%s:%s' % (SAMPLE_JAR, ":".join(paths)),
'com.android.tools.r8.R8CommandParser',
'--output', path + "/output.zip",
'--min-api', str(API_LEVEL),
'--lib', utils.get_android_jar(API_LEVEL),
'--main-dex-list', '/dev/null',
os.path.join(utils.BUILD, 'test/examples/hello.jar')]
utils.PrintCmd(args)
subprocess.check_call(args)
def test_r8cfcommand(paths):
with utils.TempDir() as path:
# SAMPLE_JAR and LIB_JAR should not have any classes in common, since e.g.
# R8CommandParser should have been minified in LIB_JAR.
# Just in case R8CommandParser is also present in LIB_JAR, we put
# SAMPLE_JAR first on the classpath to use its version of R8CommandParser.
args = ['java', '-cp', '%s:%s' % (SAMPLE_JAR, ":".join(paths)),
'com.android.tools.r8.R8CommandParser',
'--classfile',
'--output', path + "/output.jar",
'--lib', utils.RT_JAR,
os.path.join(utils.BUILD, 'test/examples/hello.jar')]
utils.PrintCmd(args)
subprocess.check_call(args)
def main():
# Handle --help
args = parser.parse_args()
output_paths = build_r8lib(
args.target, args.exclude_deps, args.no_relocate, args.keep, args.out)
if args.target == 'r8':
gradle.RunGradle(['buildExampleJars'])
test_r8command(output_paths)
test_r8cfcommand(output_paths)
if args.target == 'd8':
gradle.RunGradle(['buildExampleJars'])
test_d8sample(output_paths)
if __name__ == '__main__':
main()
| en | 0.841674 | #!/usr/bin/env python # Copyright (c) 2018, the R8 project authors. Please see the AUTHORS file # for details. All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. Build r8lib.jar using src/main/keep.txt and test that d8_api_usage_sample.jar works with the minified R8. # Clean the build directory to ensure no repackaging of any existing # lib or deps. # Produce the r8lib target to be processed later. # Produce the dependencies needed for running r8 on lib.jar. # Produce R8 for compiling lib # SAMPLE_JAR and LIB_JAR should not have any classes in common, since e.g. # R8CommandParser should have been minified in LIB_JAR. # Just in case R8CommandParser is also present in LIB_JAR, we put # SAMPLE_JAR first on the classpath to use its version of R8CommandParser. # SAMPLE_JAR and LIB_JAR should not have any classes in common, since e.g. # R8CommandParser should have been minified in LIB_JAR. # Just in case R8CommandParser is also present in LIB_JAR, we put # SAMPLE_JAR first on the classpath to use its version of R8CommandParser. # Handle --help | 2.030141 | 2 |
ambari-agent/src/test/python/TestPuppetExecutor.py | telefonicaid/fiware-cosmos-ambari | 0 | 6631705 | #!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
from PuppetExecutor import PuppetExecutor
from RepoInstaller import RepoInstaller
from Grep import Grep
from pprint import pformat
import socket, threading, tempfile
import os, time
import sys
import json
from AmbariConfig import AmbariConfig
from mock.mock import patch, MagicMock, call
from threading import Thread
from shell import shellRunner
class TestPuppetExecutor(TestCase):
def test_build(self):
puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
command = puppetexecutor.puppetCommand("site.pp")
self.assertEquals("puppet", command[0], "puppet binary wrong")
self.assertEquals("apply", command[1], "local apply called")
self.assertEquals("--confdir=/tmp", command[2],"conf dir tmp")
self.assertEquals("--detailed-exitcodes", command[3], "make sure output \
correct")
@patch.object(shellRunner,'run')
def test_isJavaAvailable(self, cmdrun_mock):
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", '/tmpdir', None)
command = {'configurations':{'global':{'java64_home':'/usr/jdk/jdk123'}}}
cmdrun_mock.return_value = {'exitCode': 1, 'output': 'Command not found', 'error': ''}
self.assertEquals(puppetInstance.isJavaAvailable(command), False)
cmdrun_mock.return_value = {'exitCode': 0, 'output': 'OK', 'error': ''}
self.assertEquals(puppetInstance.isJavaAvailable(command), True)
@patch.object(PuppetExecutor, 'isJavaAvailable')
@patch.object(PuppetExecutor, 'runPuppetFile')
def test_run_command(self, runPuppetFileMock, isJavaAvailableMock):
tmpdir = AmbariConfig().getConfig().get("stack", "installprefix")
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, AmbariConfig().getConfig())
jsonFile = open('../../main/python/ambari_agent/test.json', 'r')
jsonStr = jsonFile.read()
parsedJson = json.loads(jsonStr)
parsedJson["taskId"] = 1
def side_effect1(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
result["exitcode"] = 0
runPuppetFileMock.side_effect = side_effect1
puppetInstance.reposInstalled = False
isJavaAvailableMock.return_value = True
res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertEquals(res["exitcode"], 0)
self.assertTrue(puppetInstance.reposInstalled)
def side_effect2(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
result["exitcode"] = 999
runPuppetFileMock.side_effect = side_effect2
puppetInstance.reposInstalled = False
isJavaAvailableMock.return_value = True
res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertEquals(res["exitcode"], 999)
self.assertFalse(puppetInstance.reposInstalled)
os.unlink(tmpdir + os.sep + 'site-' + str(parsedJson["taskId"]) + '.pp')
def side_effect2(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
result["exitcode"] = 0
runPuppetFileMock.side_effect = side_effect2
puppetInstance.reposInstalled = False
isJavaAvailableMock.return_value = False
parsedJson['roleCommand'] = "START"
parsedJson['configurations'] = {'global':{'java64_home':'/usr/jdk/jdk123'}}
res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
JAVANOTVALID_MSG = "Cannot access JDK! Make sure you have permission to execute {0}/bin/java"
errMsg = JAVANOTVALID_MSG.format('/usr/jdk/jdk123')
self.assertEquals(res["exitcode"], 1)
self.assertEquals(res["stderr"], errMsg)
self.assertFalse(puppetInstance.reposInstalled)
parsedJson['configurations'] = {'random':{'name1':'value2'}}
res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertEquals(res["exitcode"], 1)
self.assertEquals(res["stderr"], "Cannot access JDK! Make sure java64_home is specified in global config")
@patch.object(PuppetExecutor, 'isJavaAvailable')
@patch.object(RepoInstaller, 'generate_repo_manifests')
@patch.object(PuppetExecutor, 'runPuppetFile')
def test_overwrite_repos(self, runPuppetFileMock, generateRepoManifestMock, isJavaAvailableMock):
tmpdir = AmbariConfig().getConfig().get("stack", "installprefix")
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, AmbariConfig().getConfig())
jsonFile = open('../../main/python/ambari_agent/test.json', 'r')
jsonStr = jsonFile.read()
parsedJson = json.loads(jsonStr)
parsedJson["taskId"] = 77
parsedJson['roleCommand'] = "START"
def side_effect(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
result["exitcode"] = 0
runPuppetFileMock.side_effect = side_effect
isJavaAvailableMock.return_value = True
#If ambari-agent has been just started and no any commands were executed by
# PuppetExecutor.runCommand, then no repo files were updated by
# RepoInstaller.generate_repo_manifests
self.assertEquals(0, generateRepoManifestMock.call_count)
self.assertFalse(puppetInstance.reposInstalled)
# After executing of the first command, RepoInstaller.generate_repo_manifests
# generates a .pp file for updating repo files
puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertTrue(puppetInstance.reposInstalled)
self.assertEquals(1, generateRepoManifestMock.call_count)
isJavaAvailableMock.assert_called_with("java64_home")
# After executing of the next commands, repo manifest aren't generated again
puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertTrue(puppetInstance.reposInstalled)
self.assertEquals(1, generateRepoManifestMock.call_count)
puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertTrue(puppetInstance.reposInstalled)
self.assertEquals(1, generateRepoManifestMock.call_count)
@patch("os.path.exists")
def test_configure_environ(self, osPathExistsMock):
config = AmbariConfig().getConfig()
tmpdir = config.get("stack", "installprefix")
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, config)
environ = puppetInstance.configureEnviron({})
self.assertEquals(environ, {})
config.set('puppet','ruby_home',"test/ruby_home")
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, config)
osPathExistsMock.return_value = True
environ = puppetInstance.configureEnviron({"PATH" : "test_path"})
self.assertEquals(environ["PATH"], "test/ruby_home/bin:test_path")
self.assertEquals(environ["MY_RUBY_HOME"], "test/ruby_home")
def test_condense_bad2(self):
puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
grep = Grep()
puppetexecutor.grep = grep
grep.ERROR_LAST_LINES_BEFORE = 2
grep.ERROR_LAST_LINES_AFTER = 3
string_err = open('dummy_puppet_output_error2.txt', 'r').read().replace("\n", os.linesep)
result = puppetexecutor.condenseOutput(string_err, '', 1)
stripped_string = string_err.strip()
lines = stripped_string.splitlines(True)
d = lines[1:6]
d = grep.cleanByTemplate("".join(d).strip(), "warning").splitlines(True)
result_check = True
for l in d:
result_check &= grep.filterMarkup(l) in result
self.assertEquals(result_check, True, "Failed to condence fail log")
self.assertEquals(('warning' in result.lower()), False, "Failed to condence fail log")
self.assertEquals(len(result.splitlines(True)), 5, "Failed to condence fail log")
def test_condense_bad3(self):
puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
grep = Grep()
puppetexecutor.grep = grep
string_err = open('dummy_puppet_output_error3.txt', 'r').read().replace("\n", os.linesep)
result = puppetexecutor.condenseOutput(string_err, '', 1)
stripped_string = string_err.strip()
lines = stripped_string.splitlines(True)
#sys.stderr.write(result)
d = lines[0:31]
d = grep.cleanByTemplate("".join(d).strip(), "warning").splitlines(True)
result_check = True
for l in d:
result_check &= grep.filterMarkup(l) in result
self.assertEquals(result_check, True, "Failed to condence fail log")
self.assertEquals(('warning' in result.lower()), False, "Failed to condence fail log")
self.assertEquals(len(result.splitlines(True)), 19, "Failed to condence fail log")
def test_condense_good(self):
puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
grep = Grep()
puppetexecutor.grep = grep
grep.OUTPUT_LAST_LINES = 2
string_good = open('dummy_puppet_output_good.txt', 'r').read().replace("\n", os.linesep)
result = puppetexecutor.condenseOutput(string_good, PuppetExecutor.NO_ERROR, 0)
stripped_string = string_good.strip()
lines = stripped_string.splitlines(True)
result_check = lines[45].strip() in result and lines[46].strip() in result
self.assertEquals(result_check, True, "Failed to condence output log")
self.assertEquals(len(result.splitlines(True)), 2, "Failed to condence output log")
@patch("shell.kill_process_with_children")
def test_watchdog_1(self, kill_process_with_children_mock):
"""
Tests whether watchdog works
"""
subproc_mock = self.Subprocess_mockup()
executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
"/usr/",
"/root/workspace/puppet-install/facter-1.6.10/",
"/tmp", AmbariConfig().getConfig(), subproc_mock)
_, tmpoutfile = tempfile.mkstemp()
_, tmperrfile = tempfile.mkstemp()
result = { }
puppetEnv = { "RUBYLIB" : ""}
executor_mock.PUPPET_TIMEOUT_SECONDS = 0.1
kill_process_with_children_mock.side_effect = lambda pid : subproc_mock.terminate()
subproc_mock.returncode = None
thread = Thread(target = executor_mock.runPuppetFile, args = ("fake_puppetFile", result, puppetEnv, tmpoutfile, tmperrfile))
thread.start()
time.sleep(0.1)
subproc_mock.finished_event.wait()
self.assertEquals(subproc_mock.was_terminated, True, "Subprocess should be terminated due to timeout")
def test_watchdog_2(self):
"""
Tries to catch false positive watchdog invocations
"""
subproc_mock = self.Subprocess_mockup()
executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
"/usr/",
"/root/workspace/puppet-install/facter-1.6.10/",
"/tmp", AmbariConfig().getConfig(), subproc_mock)
_, tmpoutfile = tempfile.mkstemp()
_, tmperrfile = tempfile.mkstemp()
result = { }
puppetEnv = { "RUBYLIB" : ""}
executor_mock.PUPPET_TIMEOUT_SECONDS = 5
subproc_mock.returncode = 0
thread = Thread(target = executor_mock.runPuppetFile, args = ("fake_puppetFile", result, puppetEnv, tmpoutfile, tmperrfile))
thread.start()
time.sleep(0.1)
subproc_mock.should_finish_event.set()
subproc_mock.finished_event.wait()
self.assertEquals(subproc_mock.was_terminated, False, "Subprocess should not be terminated before timeout")
self.assertEquals(subproc_mock.returncode, 0, "Subprocess should not be terminated before timeout")
class PuppetExecutor_mock(PuppetExecutor):
def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config, subprocess_mockup):
self.subprocess_mockup = subprocess_mockup
PuppetExecutor.__init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config)
pass
def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
self.subprocess_mockup.tmpout = tmpout
self.subprocess_mockup.tmperr = tmperr
return self.subprocess_mockup
def runShellKillPgrp(self, puppet):
puppet.terminate() # note: In real code, subprocess.terminate() is not called
pass
class Subprocess_mockup():
returncode = 0
started_event = threading.Event()
should_finish_event = threading.Event()
finished_event = threading.Event()
was_terminated = False
tmpout = None
tmperr = None
pid=-1
def communicate(self):
self.started_event.set()
self.tmpout.write("Dummy output")
self.tmpout.flush()
self.tmperr.write("Dummy err")
self.tmperr.flush()
self.should_finish_event.wait()
self.finished_event.set()
pass
def terminate(self):
self.was_terminated = True
self.returncode = 17
self.should_finish_event.set()
| #!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
from PuppetExecutor import PuppetExecutor
from RepoInstaller import RepoInstaller
from Grep import Grep
from pprint import pformat
import socket, threading, tempfile
import os, time
import sys
import json
from AmbariConfig import AmbariConfig
from mock.mock import patch, MagicMock, call
from threading import Thread
from shell import shellRunner
class TestPuppetExecutor(TestCase):
def test_build(self):
puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
command = puppetexecutor.puppetCommand("site.pp")
self.assertEquals("puppet", command[0], "puppet binary wrong")
self.assertEquals("apply", command[1], "local apply called")
self.assertEquals("--confdir=/tmp", command[2],"conf dir tmp")
self.assertEquals("--detailed-exitcodes", command[3], "make sure output \
correct")
@patch.object(shellRunner,'run')
def test_isJavaAvailable(self, cmdrun_mock):
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", '/tmpdir', None)
command = {'configurations':{'global':{'java64_home':'/usr/jdk/jdk123'}}}
cmdrun_mock.return_value = {'exitCode': 1, 'output': 'Command not found', 'error': ''}
self.assertEquals(puppetInstance.isJavaAvailable(command), False)
cmdrun_mock.return_value = {'exitCode': 0, 'output': 'OK', 'error': ''}
self.assertEquals(puppetInstance.isJavaAvailable(command), True)
@patch.object(PuppetExecutor, 'isJavaAvailable')
@patch.object(PuppetExecutor, 'runPuppetFile')
def test_run_command(self, runPuppetFileMock, isJavaAvailableMock):
tmpdir = AmbariConfig().getConfig().get("stack", "installprefix")
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, AmbariConfig().getConfig())
jsonFile = open('../../main/python/ambari_agent/test.json', 'r')
jsonStr = jsonFile.read()
parsedJson = json.loads(jsonStr)
parsedJson["taskId"] = 1
def side_effect1(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
result["exitcode"] = 0
runPuppetFileMock.side_effect = side_effect1
puppetInstance.reposInstalled = False
isJavaAvailableMock.return_value = True
res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertEquals(res["exitcode"], 0)
self.assertTrue(puppetInstance.reposInstalled)
def side_effect2(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
result["exitcode"] = 999
runPuppetFileMock.side_effect = side_effect2
puppetInstance.reposInstalled = False
isJavaAvailableMock.return_value = True
res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertEquals(res["exitcode"], 999)
self.assertFalse(puppetInstance.reposInstalled)
os.unlink(tmpdir + os.sep + 'site-' + str(parsedJson["taskId"]) + '.pp')
def side_effect2(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
result["exitcode"] = 0
runPuppetFileMock.side_effect = side_effect2
puppetInstance.reposInstalled = False
isJavaAvailableMock.return_value = False
parsedJson['roleCommand'] = "START"
parsedJson['configurations'] = {'global':{'java64_home':'/usr/jdk/jdk123'}}
res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
JAVANOTVALID_MSG = "Cannot access JDK! Make sure you have permission to execute {0}/bin/java"
errMsg = JAVANOTVALID_MSG.format('/usr/jdk/jdk123')
self.assertEquals(res["exitcode"], 1)
self.assertEquals(res["stderr"], errMsg)
self.assertFalse(puppetInstance.reposInstalled)
parsedJson['configurations'] = {'random':{'name1':'value2'}}
res = puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertEquals(res["exitcode"], 1)
self.assertEquals(res["stderr"], "Cannot access JDK! Make sure java64_home is specified in global config")
@patch.object(PuppetExecutor, 'isJavaAvailable')
@patch.object(RepoInstaller, 'generate_repo_manifests')
@patch.object(PuppetExecutor, 'runPuppetFile')
def test_overwrite_repos(self, runPuppetFileMock, generateRepoManifestMock, isJavaAvailableMock):
tmpdir = AmbariConfig().getConfig().get("stack", "installprefix")
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, AmbariConfig().getConfig())
jsonFile = open('../../main/python/ambari_agent/test.json', 'r')
jsonStr = jsonFile.read()
parsedJson = json.loads(jsonStr)
parsedJson["taskId"] = 77
parsedJson['roleCommand'] = "START"
def side_effect(puppetFile, result, puppetEnv, tmpoutfile, tmperrfile):
result["exitcode"] = 0
runPuppetFileMock.side_effect = side_effect
isJavaAvailableMock.return_value = True
#If ambari-agent has been just started and no any commands were executed by
# PuppetExecutor.runCommand, then no repo files were updated by
# RepoInstaller.generate_repo_manifests
self.assertEquals(0, generateRepoManifestMock.call_count)
self.assertFalse(puppetInstance.reposInstalled)
# After executing of the first command, RepoInstaller.generate_repo_manifests
# generates a .pp file for updating repo files
puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertTrue(puppetInstance.reposInstalled)
self.assertEquals(1, generateRepoManifestMock.call_count)
isJavaAvailableMock.assert_called_with("java64_home")
# After executing of the next commands, repo manifest aren't generated again
puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertTrue(puppetInstance.reposInstalled)
self.assertEquals(1, generateRepoManifestMock.call_count)
puppetInstance.runCommand(parsedJson, tmpdir + '/out.txt', tmpdir + '/err.txt')
self.assertTrue(puppetInstance.reposInstalled)
self.assertEquals(1, generateRepoManifestMock.call_count)
@patch("os.path.exists")
def test_configure_environ(self, osPathExistsMock):
config = AmbariConfig().getConfig()
tmpdir = config.get("stack", "installprefix")
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, config)
environ = puppetInstance.configureEnviron({})
self.assertEquals(environ, {})
config.set('puppet','ruby_home',"test/ruby_home")
puppetInstance = PuppetExecutor("/tmp", "/x", "/y", tmpdir, config)
osPathExistsMock.return_value = True
environ = puppetInstance.configureEnviron({"PATH" : "test_path"})
self.assertEquals(environ["PATH"], "test/ruby_home/bin:test_path")
self.assertEquals(environ["MY_RUBY_HOME"], "test/ruby_home")
def test_condense_bad2(self):
puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
grep = Grep()
puppetexecutor.grep = grep
grep.ERROR_LAST_LINES_BEFORE = 2
grep.ERROR_LAST_LINES_AFTER = 3
string_err = open('dummy_puppet_output_error2.txt', 'r').read().replace("\n", os.linesep)
result = puppetexecutor.condenseOutput(string_err, '', 1)
stripped_string = string_err.strip()
lines = stripped_string.splitlines(True)
d = lines[1:6]
d = grep.cleanByTemplate("".join(d).strip(), "warning").splitlines(True)
result_check = True
for l in d:
result_check &= grep.filterMarkup(l) in result
self.assertEquals(result_check, True, "Failed to condence fail log")
self.assertEquals(('warning' in result.lower()), False, "Failed to condence fail log")
self.assertEquals(len(result.splitlines(True)), 5, "Failed to condence fail log")
def test_condense_bad3(self):
puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
grep = Grep()
puppetexecutor.grep = grep
string_err = open('dummy_puppet_output_error3.txt', 'r').read().replace("\n", os.linesep)
result = puppetexecutor.condenseOutput(string_err, '', 1)
stripped_string = string_err.strip()
lines = stripped_string.splitlines(True)
#sys.stderr.write(result)
d = lines[0:31]
d = grep.cleanByTemplate("".join(d).strip(), "warning").splitlines(True)
result_check = True
for l in d:
result_check &= grep.filterMarkup(l) in result
self.assertEquals(result_check, True, "Failed to condence fail log")
self.assertEquals(('warning' in result.lower()), False, "Failed to condence fail log")
self.assertEquals(len(result.splitlines(True)), 19, "Failed to condence fail log")
def test_condense_good(self):
puppetexecutor = PuppetExecutor("/tmp", "/x", "/y", "/z", AmbariConfig().getConfig())
grep = Grep()
puppetexecutor.grep = grep
grep.OUTPUT_LAST_LINES = 2
string_good = open('dummy_puppet_output_good.txt', 'r').read().replace("\n", os.linesep)
result = puppetexecutor.condenseOutput(string_good, PuppetExecutor.NO_ERROR, 0)
stripped_string = string_good.strip()
lines = stripped_string.splitlines(True)
result_check = lines[45].strip() in result and lines[46].strip() in result
self.assertEquals(result_check, True, "Failed to condence output log")
self.assertEquals(len(result.splitlines(True)), 2, "Failed to condence output log")
@patch("shell.kill_process_with_children")
def test_watchdog_1(self, kill_process_with_children_mock):
"""
Tests whether watchdog works
"""
subproc_mock = self.Subprocess_mockup()
executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
"/usr/",
"/root/workspace/puppet-install/facter-1.6.10/",
"/tmp", AmbariConfig().getConfig(), subproc_mock)
_, tmpoutfile = tempfile.mkstemp()
_, tmperrfile = tempfile.mkstemp()
result = { }
puppetEnv = { "RUBYLIB" : ""}
executor_mock.PUPPET_TIMEOUT_SECONDS = 0.1
kill_process_with_children_mock.side_effect = lambda pid : subproc_mock.terminate()
subproc_mock.returncode = None
thread = Thread(target = executor_mock.runPuppetFile, args = ("fake_puppetFile", result, puppetEnv, tmpoutfile, tmperrfile))
thread.start()
time.sleep(0.1)
subproc_mock.finished_event.wait()
self.assertEquals(subproc_mock.was_terminated, True, "Subprocess should be terminated due to timeout")
def test_watchdog_2(self):
"""
Tries to catch false positive watchdog invocations
"""
subproc_mock = self.Subprocess_mockup()
executor_mock = self.PuppetExecutor_mock("/home/centos/ambari_repo_info/ambari-agent/src/main/puppet/",
"/usr/",
"/root/workspace/puppet-install/facter-1.6.10/",
"/tmp", AmbariConfig().getConfig(), subproc_mock)
_, tmpoutfile = tempfile.mkstemp()
_, tmperrfile = tempfile.mkstemp()
result = { }
puppetEnv = { "RUBYLIB" : ""}
executor_mock.PUPPET_TIMEOUT_SECONDS = 5
subproc_mock.returncode = 0
thread = Thread(target = executor_mock.runPuppetFile, args = ("fake_puppetFile", result, puppetEnv, tmpoutfile, tmperrfile))
thread.start()
time.sleep(0.1)
subproc_mock.should_finish_event.set()
subproc_mock.finished_event.wait()
self.assertEquals(subproc_mock.was_terminated, False, "Subprocess should not be terminated before timeout")
self.assertEquals(subproc_mock.returncode, 0, "Subprocess should not be terminated before timeout")
class PuppetExecutor_mock(PuppetExecutor):
def __init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config, subprocess_mockup):
self.subprocess_mockup = subprocess_mockup
PuppetExecutor.__init__(self, puppetModule, puppetInstall, facterInstall, tmpDir, config)
pass
def lauch_puppet_subprocess(self, puppetcommand, tmpout, tmperr, puppetEnv):
self.subprocess_mockup.tmpout = tmpout
self.subprocess_mockup.tmperr = tmperr
return self.subprocess_mockup
def runShellKillPgrp(self, puppet):
puppet.terminate() # note: In real code, subprocess.terminate() is not called
pass
class Subprocess_mockup():
returncode = 0
started_event = threading.Event()
should_finish_event = threading.Event()
finished_event = threading.Event()
was_terminated = False
tmpout = None
tmperr = None
pid=-1
def communicate(self):
self.started_event.set()
self.tmpout.write("Dummy output")
self.tmpout.flush()
self.tmperr.write("Dummy err")
self.tmperr.flush()
self.should_finish_event.wait()
self.finished_event.set()
pass
def terminate(self):
self.was_terminated = True
self.returncode = 17
self.should_finish_event.set()
| en | 0.870642 | #!/usr/bin/env python2.6 Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. #If ambari-agent has been just started and no any commands were executed by # PuppetExecutor.runCommand, then no repo files were updated by # RepoInstaller.generate_repo_manifests # After executing of the first command, RepoInstaller.generate_repo_manifests # generates a .pp file for updating repo files # After executing of the next commands, repo manifest aren't generated again #sys.stderr.write(result) Tests whether watchdog works Tries to catch false positive watchdog invocations # note: In real code, subprocess.terminate() is not called | 1.86964 | 2 |
emoticonvis/apps/api/tests/__init__.py | nanchenchen/emoticon-analysis | 10 | 6631706 | <gh_stars>1-10
def api_time_format(dt):
"""Convert a datetime to string according to the API settings"""
from rest_framework.fields import DateTimeField
field = DateTimeField()
return field.to_representation(dt)
def django_time_format(dtstr):
"""Convert a datetime to string according to the API settings"""
from rest_framework.fields import DateTimeField
field = DateTimeField()
return field.to_internal_value(dtstr)
| def api_time_format(dt):
"""Convert a datetime to string according to the API settings"""
from rest_framework.fields import DateTimeField
field = DateTimeField()
return field.to_representation(dt)
def django_time_format(dtstr):
"""Convert a datetime to string according to the API settings"""
from rest_framework.fields import DateTimeField
field = DateTimeField()
return field.to_internal_value(dtstr) | en | 0.714579 | Convert a datetime to string according to the API settings Convert a datetime to string according to the API settings | 3.179621 | 3 |
boilerplateapp/api/__init__.py | svenstaro/python-web-boilerplate | 20 | 6631707 | """Re-export API."""
from flask import Blueprint
api = Blueprint('api', __name__, url_prefix='')
from boilerplateapp.api import auth # noqa E402
from boilerplateapp.api import hello # noqa E402
| """Re-export API."""
from flask import Blueprint
api = Blueprint('api', __name__, url_prefix='')
from boilerplateapp.api import auth # noqa E402
from boilerplateapp.api import hello # noqa E402
| en | 0.240144 | Re-export API. # noqa E402 # noqa E402 | 1.517123 | 2 |
tests/datastructures/arrays/test_missing_number.py | sikakente/educative-io-python | 1 | 6631708 | <reponame>sikakente/educative-io-python<gh_stars>1-10
import unittest
import pytest
from datastructures.arrays.missing_number import missing_number
@pytest.mark.parametrize("nums,expected", [
([3, 0, 1], 2),
([0, 1], 2),
([9, 6, 4, 2, 3, 5, 7, 0, 1], 8)
])
def test_missing_number(nums, expected):
assert expected == missing_number(nums)
if __name__ == '__main__':
unittest.main()
| import unittest
import pytest
from datastructures.arrays.missing_number import missing_number
@pytest.mark.parametrize("nums,expected", [
([3, 0, 1], 2),
([0, 1], 2),
([9, 6, 4, 2, 3, 5, 7, 0, 1], 8)
])
def test_missing_number(nums, expected):
assert expected == missing_number(nums)
if __name__ == '__main__':
unittest.main() | none | 1 | 3.045425 | 3 |
|
pygears/lib/drvrnd.py | bogdanvuk/pygears | 120 | 6631709 | from pygears import gear, module
from pygears.sim.extens.randomization import randomize, rand_seq
from .verif import drv
@gear
def drvrnd(*, t, cnt=None, cons=None, params=None):
return drv(
t=t, seq=randomize(t, module().basename, cnt=cnt, cons=cons, params=params))
| from pygears import gear, module
from pygears.sim.extens.randomization import randomize, rand_seq
from .verif import drv
@gear
def drvrnd(*, t, cnt=None, cons=None, params=None):
return drv(
t=t, seq=randomize(t, module().basename, cnt=cnt, cons=cons, params=params))
| none | 1 | 2.34135 | 2 |
|
app/tests/intersection_test.py | pedrolp85/homework | 0 | 6631710 | <reponame>pedrolp85/homework
from typer.testing import CliRunner
from util import app
runner = CliRunner()
def test_intersection_1():
result = runner.invoke(
app, ["tests/test_files/intersection.log", "--first", 11, "--last", 11]
)
assert result.exit_code == 0
assert "L" not in result.stdout
print(result.stdout)
def test_intersection_2():
result = runner.invoke(
app, ["tests/test_files/intersection.log", "--first", 11, "--last", 12]
)
assert result.exit_code == 0
assert "L11" in result.stdout
print(result.stdout)
def test_intersection_3():
result = runner.invoke(
app, ["tests/test_files/intersection.log", "--first", 5, "--ipv4", "172.16.0.1"]
)
assert result.exit_code == 0
assert "L1" in result.stdout
assert "L3" not in result.stdout
print(result.stdout)
def test_intersection_4():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--first",
5,
"--ipv6",
"2001:db8:0:0:0::1",
],
)
assert result.exit_code == 0
assert "L2" in result.stdout
assert "L4" in result.stdout
assert "L1" not in result.stdout
assert "L5" not in result.stdout
print(result.stdout)
def test_intersection_5():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--first",
20,
"--timestamps",
"11:11:11",
],
)
assert result.exit_code == 0
assert "L12" in result.stdout
assert "L17" in result.stdout
assert "L21" not in result.stdout
print(result.stdout)
def test_intersection_6():
result = runner.invoke(
app, ["tests/test_files/intersection.log", "--last", 8, "--ipv4", "192.168.1.1"]
)
assert result.exit_code == 0
assert "L20" in result.stdout
assert "L19" in result.stdout
assert "14" not in result.stdout
print(result.stdout)
def test_intersection_7():
result = runner.invoke(
app,
["tests/test_files/intersection.log", "--last", 8, "--ipv6", "fdf8:f53e:61e4::18"],
)
assert result.exit_code == 0
assert "L20" in result.stdout
assert "L22" in result.stdout
assert "21" not in result.stdout
print(result.stdout)
def test_intersection_8():
result = runner.invoke(
app,
["tests/test_files/intersection.log", "--last", 8, "--timestamps", "12:03:59"],
)
assert result.exit_code == 0
assert "L18" in result.stdout
assert "L20" in result.stdout
assert "13" not in result.stdout
print(result.stdout)
def test_intersection_9():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--ipv4",
"192.168.1.1",
"--ipv6",
"2001:db8:0:0:0::1",
],
)
assert result.exit_code == 0
assert "L8" in result.stdout
assert "L19" in result.stdout
assert "18" in result.stdout
assert "L3" not in result.stdout
print(result.stdout)
def test_intersection_10():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--ipv6",
"2001:db8:0:0:0::1",
"--timestamps",
"11:11:11",
],
)
assert result.exit_code == 0
assert "L21" in result.stdout
assert "L17" in result.stdout
assert "L12" not in result.stdout
print(result.stdout)
def test_intersection_11():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--first",
15,
"--last",
15,
"--ipv4",
"172.16.0.1",
],
)
assert result.exit_code == 0
assert "L12" in result.stdout
assert "L1 " not in result.stdout
assert "L3" not in result.stdout
print(result.stdout)
def test_intersection_12():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--last",
14,
"--ipv4",
"192.168.1.1",
"--ipv6",
"2001:db8:0::1",
],
)
assert result.exit_code == 0
assert "L18" in result.stdout
assert "L14" not in result.stdout
assert "L19" in result.stdout
print(result.stdout)
def test_intersection_13():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--ipv4",
"192.168.1.1",
"--ipv6",
"2001:db8:0::1",
"--timestamps",
"12:03:59",
],
)
assert result.exit_code == 0
assert "L18" in result.stdout
assert "L13" not in result.stdout
print(result.stdout)
def test_intersection_14():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--first",
20,
"--last",
"20",
"--ipv4",
"192.168.1.10",
"--ipv6",
"2001:db8:0::1",
],
)
assert result.exit_code == 0
assert "L3" in result.stdout
print(result.stdout)
def test_intersection_15():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--last",
5,
"--ipv4",
"192.168.1.1",
"--ipv6",
"2001:db8:0::1",
"--timestamps",
"12:03:59",
],
)
assert result.exit_code == 0
assert "L18" in result.stdout
print(result.stdout)
| from typer.testing import CliRunner
from util import app
runner = CliRunner()
def test_intersection_1():
result = runner.invoke(
app, ["tests/test_files/intersection.log", "--first", 11, "--last", 11]
)
assert result.exit_code == 0
assert "L" not in result.stdout
print(result.stdout)
def test_intersection_2():
result = runner.invoke(
app, ["tests/test_files/intersection.log", "--first", 11, "--last", 12]
)
assert result.exit_code == 0
assert "L11" in result.stdout
print(result.stdout)
def test_intersection_3():
result = runner.invoke(
app, ["tests/test_files/intersection.log", "--first", 5, "--ipv4", "172.16.0.1"]
)
assert result.exit_code == 0
assert "L1" in result.stdout
assert "L3" not in result.stdout
print(result.stdout)
def test_intersection_4():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--first",
5,
"--ipv6",
"2001:db8:0:0:0::1",
],
)
assert result.exit_code == 0
assert "L2" in result.stdout
assert "L4" in result.stdout
assert "L1" not in result.stdout
assert "L5" not in result.stdout
print(result.stdout)
def test_intersection_5():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--first",
20,
"--timestamps",
"11:11:11",
],
)
assert result.exit_code == 0
assert "L12" in result.stdout
assert "L17" in result.stdout
assert "L21" not in result.stdout
print(result.stdout)
def test_intersection_6():
result = runner.invoke(
app, ["tests/test_files/intersection.log", "--last", 8, "--ipv4", "192.168.1.1"]
)
assert result.exit_code == 0
assert "L20" in result.stdout
assert "L19" in result.stdout
assert "14" not in result.stdout
print(result.stdout)
def test_intersection_7():
result = runner.invoke(
app,
["tests/test_files/intersection.log", "--last", 8, "--ipv6", "fdf8:f53e:61e4::18"],
)
assert result.exit_code == 0
assert "L20" in result.stdout
assert "L22" in result.stdout
assert "21" not in result.stdout
print(result.stdout)
def test_intersection_8():
result = runner.invoke(
app,
["tests/test_files/intersection.log", "--last", 8, "--timestamps", "12:03:59"],
)
assert result.exit_code == 0
assert "L18" in result.stdout
assert "L20" in result.stdout
assert "13" not in result.stdout
print(result.stdout)
def test_intersection_9():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--ipv4",
"192.168.1.1",
"--ipv6",
"2001:db8:0:0:0::1",
],
)
assert result.exit_code == 0
assert "L8" in result.stdout
assert "L19" in result.stdout
assert "18" in result.stdout
assert "L3" not in result.stdout
print(result.stdout)
def test_intersection_10():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--ipv6",
"2001:db8:0:0:0::1",
"--timestamps",
"11:11:11",
],
)
assert result.exit_code == 0
assert "L21" in result.stdout
assert "L17" in result.stdout
assert "L12" not in result.stdout
print(result.stdout)
def test_intersection_11():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--first",
15,
"--last",
15,
"--ipv4",
"172.16.0.1",
],
)
assert result.exit_code == 0
assert "L12" in result.stdout
assert "L1 " not in result.stdout
assert "L3" not in result.stdout
print(result.stdout)
def test_intersection_12():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--last",
14,
"--ipv4",
"192.168.1.1",
"--ipv6",
"2001:db8:0::1",
],
)
assert result.exit_code == 0
assert "L18" in result.stdout
assert "L14" not in result.stdout
assert "L19" in result.stdout
print(result.stdout)
def test_intersection_13():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--ipv4",
"192.168.1.1",
"--ipv6",
"2001:db8:0::1",
"--timestamps",
"12:03:59",
],
)
assert result.exit_code == 0
assert "L18" in result.stdout
assert "L13" not in result.stdout
print(result.stdout)
def test_intersection_14():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--first",
20,
"--last",
"20",
"--ipv4",
"192.168.1.10",
"--ipv6",
"2001:db8:0::1",
],
)
assert result.exit_code == 0
assert "L3" in result.stdout
print(result.stdout)
def test_intersection_15():
result = runner.invoke(
app,
[
"tests/test_files/intersection.log",
"--last",
5,
"--ipv4",
"192.168.1.1",
"--ipv6",
"2001:db8:0::1",
"--timestamps",
"12:03:59",
],
)
assert result.exit_code == 0
assert "L18" in result.stdout
print(result.stdout) | none | 1 | 2.352961 | 2 |
|
src/open_sim.py | pcrumley/IseultServer | 1 | 6631711 | <filename>src/open_sim.py
from tristan_sim import TristanSim
from myCmaps import myCmaps_names
def open_sim(simType ='tristan-mp', outdir = ''):
if simType =='tristan-mp':
mySim = TristanSim(dirpath=outdir)
responseDict = mySim.get_avail_prtls()
responseDict['cmaps']= myCmaps_names
responseDict['fileArray'] = mySim.get_file_nums()
return responseDict
| <filename>src/open_sim.py
from tristan_sim import TristanSim
from myCmaps import myCmaps_names
def open_sim(simType ='tristan-mp', outdir = ''):
if simType =='tristan-mp':
mySim = TristanSim(dirpath=outdir)
responseDict = mySim.get_avail_prtls()
responseDict['cmaps']= myCmaps_names
responseDict['fileArray'] = mySim.get_file_nums()
return responseDict
| none | 1 | 2.281064 | 2 |
|
Mean var std.py | jibinmathew691993/PythonHackerrank | 0 | 6631712 | import numpy as np
n,m = input().split()
arr = np.array([input().split() for _ in range(int(n))], int)
np.set_printoptions(legacy='1.13')
print(np.mean(arr,axis=1))
print(np.var(arr,axis=0))
print(np.std(arr)) | import numpy as np
n,m = input().split()
arr = np.array([input().split() for _ in range(int(n))], int)
np.set_printoptions(legacy='1.13')
print(np.mean(arr,axis=1))
print(np.var(arr,axis=0))
print(np.std(arr)) | none | 1 | 3.232303 | 3 |
|
samr/tests/geometry/test_package.py | ktchu/RESEARCH-PySAMR | 0 | 6631713 | """
Unit tests for 'geometry' package.
------------------------------------------------------------------------------
COPYRIGHT/LICENSE. This file is part of the XYZ package. It is subject
to the license terms in the LICENSE file found in the top-level directory of
this distribution. No part of the XYZ package, including this file, may be
copied, modified, propagated, or distributed except according to the terms
contained in the LICENSE file.
------------------------------------------------------------------------------
"""
# --- Imports
# XYZ
from samr import geometry
# --- Tests
def test_attributes():
"""
Test for expected package attributes.
"""
# Package information
assert geometry.Geometry
assert geometry.CartesianGeometry
| """
Unit tests for 'geometry' package.
------------------------------------------------------------------------------
COPYRIGHT/LICENSE. This file is part of the XYZ package. It is subject
to the license terms in the LICENSE file found in the top-level directory of
this distribution. No part of the XYZ package, including this file, may be
copied, modified, propagated, or distributed except according to the terms
contained in the LICENSE file.
------------------------------------------------------------------------------
"""
# --- Imports
# XYZ
from samr import geometry
# --- Tests
def test_attributes():
"""
Test for expected package attributes.
"""
# Package information
assert geometry.Geometry
assert geometry.CartesianGeometry
| en | 0.725933 | Unit tests for 'geometry' package. ------------------------------------------------------------------------------ COPYRIGHT/LICENSE. This file is part of the XYZ package. It is subject to the license terms in the LICENSE file found in the top-level directory of this distribution. No part of the XYZ package, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the LICENSE file. ------------------------------------------------------------------------------ # --- Imports # XYZ # --- Tests Test for expected package attributes. # Package information | 2.356976 | 2 |
tests/components/test_vultr.py | dauden1184/home-assistant | 37 | 6631714 | <filename>tests/components/test_vultr.py
"""The tests for the Vultr component."""
from copy import deepcopy
import json
import unittest
from unittest.mock import patch
import requests_mock
from homeassistant import setup
import homeassistant.components.vultr as vultr
from tests.common import (
get_test_home_assistant, load_fixture)
VALID_CONFIG = {
'vultr': {
'api_key': '<KEY>'
}
}
class TestVultr(unittest.TestCase):
"""Tests the Vultr component."""
def setUp(self):
"""Initialize values for this test case class."""
self.hass = get_test_home_assistant()
self.config = VALID_CONFIG
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that we started."""
self.hass.stop()
@requests_mock.Mocker()
def test_setup(self, mock):
"""Test successful setup."""
with patch(
'vultr.Vultr.server_list',
return_value=json.loads(
load_fixture('vultr_server_list.json'))):
response = vultr.setup(self.hass, self.config)
self.assertTrue(response)
def test_setup_no_api_key(self):
"""Test failed setup with missing API Key."""
conf = deepcopy(self.config)
del conf['vultr']['api_key']
assert not setup.setup_component(self.hass, vultr.DOMAIN, conf)
| <filename>tests/components/test_vultr.py
"""The tests for the Vultr component."""
from copy import deepcopy
import json
import unittest
from unittest.mock import patch
import requests_mock
from homeassistant import setup
import homeassistant.components.vultr as vultr
from tests.common import (
get_test_home_assistant, load_fixture)
VALID_CONFIG = {
'vultr': {
'api_key': '<KEY>'
}
}
class TestVultr(unittest.TestCase):
"""Tests the Vultr component."""
def setUp(self):
"""Initialize values for this test case class."""
self.hass = get_test_home_assistant()
self.config = VALID_CONFIG
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that we started."""
self.hass.stop()
@requests_mock.Mocker()
def test_setup(self, mock):
"""Test successful setup."""
with patch(
'vultr.Vultr.server_list',
return_value=json.loads(
load_fixture('vultr_server_list.json'))):
response = vultr.setup(self.hass, self.config)
self.assertTrue(response)
def test_setup_no_api_key(self):
"""Test failed setup with missing API Key."""
conf = deepcopy(self.config)
del conf['vultr']['api_key']
assert not setup.setup_component(self.hass, vultr.DOMAIN, conf)
| en | 0.737154 | The tests for the Vultr component. Tests the Vultr component. Initialize values for this test case class. # pylint: disable=invalid-name Stop everything that we started. Test successful setup. Test failed setup with missing API Key. | 2.719335 | 3 |
regfile/examples/chiprgfExample/sources/rgf2_defs.py | vhnatyk/vlsistuff | 1 | 6631715 | ADDR_MAP = {}
WIDTH_MAP = {}
ronly = 0x10000
ADDR_MAP["ronly"] = 0x10000
WIDTH_MAP["ronly"] = 4
ronly2 = 0x10004
ADDR_MAP["ronly2"] = 0x10004
WIDTH_MAP["ronly2"] = 4
wonly = 0x10008
ADDR_MAP["wonly"] = 0x10008
WIDTH_MAP["wonly"] = 4
one = 0x1000c
ADDR_MAP["one"] = 0x1000c
WIDTH_MAP["one"] = 4
rega = 0x10010
ADDR_MAP["rega"] = 0x10010
WIDTH_MAP["rega"] = 4
control0 = 0x10014
ADDR_MAP["control0"] = 0x10014
WIDTH_MAP["control0"] = 4
statusa = 0x10018
ADDR_MAP["statusa"] = 0x10018
WIDTH_MAP["statusa"] = 4
regb = 0x1001c
ADDR_MAP["regb"] = 0x1001c
WIDTH_MAP["regb"] = 4
w1cc = 0x10020
ADDR_MAP["w1cc"] = 0x10020
WIDTH_MAP["w1cc"] = 4
badfwr = 0x10024
ADDR_MAP["badfwr"] = 0x10024
WIDTH_MAP["badfwr"] = 4
badfro = 0x10028
ADDR_MAP["badfro"] = 0x10028
WIDTH_MAP["badfro"] = 4
ramx = 0x10100
ADDR_MAP["ramx"] = 0x10100
WIDTH_MAP["ramx"] = 1024
rega = 0x20000
ADDR_MAP["rega"] = 0x20000
WIDTH_MAP["rega"] = 4
control0 = 0x20004
ADDR_MAP["control0"] = 0x20004
WIDTH_MAP["control0"] = 4
statusa = 0x20008
ADDR_MAP["statusa"] = 0x20008
WIDTH_MAP["statusa"] = 4
regb = 0x2000c
ADDR_MAP["regb"] = 0x2000c
WIDTH_MAP["regb"] = 4
extern = 0x20010
ADDR_MAP["extern"] = 0x20010
WIDTH_MAP["extern"] = 4
eth0tmp0 = 0x20100
ADDR_MAP["eth0tmp0"] = 0x20100
WIDTH_MAP["eth0tmp0"] = 4
eth0tmp1 = 0x20104
ADDR_MAP["eth0tmp1"] = 0x20104
WIDTH_MAP["eth0tmp1"] = 4
eth0tmp2 = 0x20108
ADDR_MAP["eth0tmp2"] = 0x20108
WIDTH_MAP["eth0tmp2"] = 4
eth1tmp0 = 0x20200
ADDR_MAP["eth1tmp0"] = 0x20200
WIDTH_MAP["eth1tmp0"] = 4
eth1tmp1 = 0x20204
ADDR_MAP["eth1tmp1"] = 0x20204
WIDTH_MAP["eth1tmp1"] = 4
eth1tmp2 = 0x20208
ADDR_MAP["eth1tmp2"] = 0x20208
WIDTH_MAP["eth1tmp2"] = 4
eth2tmp0 = 0x20300
ADDR_MAP["eth2tmp0"] = 0x20300
WIDTH_MAP["eth2tmp0"] = 4
eth2tmp1 = 0x20304
ADDR_MAP["eth2tmp1"] = 0x20304
WIDTH_MAP["eth2tmp1"] = 4
eth2tmp2 = 0x20308
ADDR_MAP["eth2tmp2"] = 0x20308
WIDTH_MAP["eth2tmp2"] = 4
eth3tmp0 = 0x20400
ADDR_MAP["eth3tmp0"] = 0x20400
WIDTH_MAP["eth3tmp0"] = 4
eth3tmp1 = 0x20404
ADDR_MAP["eth3tmp1"] = 0x20404
WIDTH_MAP["eth3tmp1"] = 4
eth3tmp2 = 0x20408
ADDR_MAP["eth3tmp2"] = 0x20408
WIDTH_MAP["eth3tmp2"] = 4
wider = 0x2040c
ADDR_MAP["wider"] = 0x2040c
WIDTH_MAP["wider"] = 16
longer = 0x2041c
ADDR_MAP["longer"] = 0x2041c
WIDTH_MAP["longer"] = 16
ronly = 0x2042c
ADDR_MAP["ronly"] = 0x2042c
WIDTH_MAP["ronly"] = 4
ronly2 = 0x20430
ADDR_MAP["ronly2"] = 0x20430
WIDTH_MAP["ronly2"] = 4
ldst_ram = 0x20800
ADDR_MAP["ldst_ram"] = 0x20800
WIDTH_MAP["ldst_ram"] = 512
ronly = 0x30000
ADDR_MAP["ronly"] = 0x30000
WIDTH_MAP["ronly"] = 4
ronly2 = 0x30004
ADDR_MAP["ronly2"] = 0x30004
WIDTH_MAP["ronly2"] = 4
wonly = 0x30008
ADDR_MAP["wonly"] = 0x30008
WIDTH_MAP["wonly"] = 4
one = 0x3000c
ADDR_MAP["one"] = 0x3000c
WIDTH_MAP["one"] = 4
rega = 0x30010
ADDR_MAP["rega"] = 0x30010
WIDTH_MAP["rega"] = 4
control0 = 0x30014
ADDR_MAP["control0"] = 0x30014
WIDTH_MAP["control0"] = 4
statusa = 0x30018
ADDR_MAP["statusa"] = 0x30018
WIDTH_MAP["statusa"] = 4
regb = 0x3001c
ADDR_MAP["regb"] = 0x3001c
WIDTH_MAP["regb"] = 4
w1cc = 0x30020
ADDR_MAP["w1cc"] = 0x30020
WIDTH_MAP["w1cc"] = 4
badfwr = 0x30024
ADDR_MAP["badfwr"] = 0x30024
WIDTH_MAP["badfwr"] = 4
badfro = 0x30028
ADDR_MAP["badfro"] = 0x30028
WIDTH_MAP["badfro"] = 4
ramx = 0x30100
ADDR_MAP["ramx"] = 0x30100
WIDTH_MAP["ramx"] = 1024
rega = 0x40000
ADDR_MAP["rega"] = 0x40000
WIDTH_MAP["rega"] = 4
control0 = 0x40004
ADDR_MAP["control0"] = 0x40004
WIDTH_MAP["control0"] = 4
statusa = 0x40008
ADDR_MAP["statusa"] = 0x40008
WIDTH_MAP["statusa"] = 4
regb = 0x4000c
ADDR_MAP["regb"] = 0x4000c
WIDTH_MAP["regb"] = 4
extern = 0x40010
ADDR_MAP["extern"] = 0x40010
WIDTH_MAP["extern"] = 4
eth0tmp0 = 0x40100
ADDR_MAP["eth0tmp0"] = 0x40100
WIDTH_MAP["eth0tmp0"] = 4
eth0tmp1 = 0x40104
ADDR_MAP["eth0tmp1"] = 0x40104
WIDTH_MAP["eth0tmp1"] = 4
eth0tmp2 = 0x40108
ADDR_MAP["eth0tmp2"] = 0x40108
WIDTH_MAP["eth0tmp2"] = 4
eth1tmp0 = 0x40200
ADDR_MAP["eth1tmp0"] = 0x40200
WIDTH_MAP["eth1tmp0"] = 4
eth1tmp1 = 0x40204
ADDR_MAP["eth1tmp1"] = 0x40204
WIDTH_MAP["eth1tmp1"] = 4
eth1tmp2 = 0x40208
ADDR_MAP["eth1tmp2"] = 0x40208
WIDTH_MAP["eth1tmp2"] = 4
eth2tmp0 = 0x40300
ADDR_MAP["eth2tmp0"] = 0x40300
WIDTH_MAP["eth2tmp0"] = 4
eth2tmp1 = 0x40304
ADDR_MAP["eth2tmp1"] = 0x40304
WIDTH_MAP["eth2tmp1"] = 4
eth2tmp2 = 0x40308
ADDR_MAP["eth2tmp2"] = 0x40308
WIDTH_MAP["eth2tmp2"] = 4
eth3tmp0 = 0x40400
ADDR_MAP["eth3tmp0"] = 0x40400
WIDTH_MAP["eth3tmp0"] = 4
eth3tmp1 = 0x40404
ADDR_MAP["eth3tmp1"] = 0x40404
WIDTH_MAP["eth3tmp1"] = 4
eth3tmp2 = 0x40408
ADDR_MAP["eth3tmp2"] = 0x40408
WIDTH_MAP["eth3tmp2"] = 4
wider = 0x4040c
ADDR_MAP["wider"] = 0x4040c
WIDTH_MAP["wider"] = 16
longer = 0x4041c
ADDR_MAP["longer"] = 0x4041c
WIDTH_MAP["longer"] = 16
ronly = 0x4042c
ADDR_MAP["ronly"] = 0x4042c
WIDTH_MAP["ronly"] = 4
ronly2 = 0x40430
ADDR_MAP["ronly2"] = 0x40430
WIDTH_MAP["ronly2"] = 4
ldst_ram = 0x40800
ADDR_MAP["ldst_ram"] = 0x40800
WIDTH_MAP["ldst_ram"] = 512
| ADDR_MAP = {}
WIDTH_MAP = {}
ronly = 0x10000
ADDR_MAP["ronly"] = 0x10000
WIDTH_MAP["ronly"] = 4
ronly2 = 0x10004
ADDR_MAP["ronly2"] = 0x10004
WIDTH_MAP["ronly2"] = 4
wonly = 0x10008
ADDR_MAP["wonly"] = 0x10008
WIDTH_MAP["wonly"] = 4
one = 0x1000c
ADDR_MAP["one"] = 0x1000c
WIDTH_MAP["one"] = 4
rega = 0x10010
ADDR_MAP["rega"] = 0x10010
WIDTH_MAP["rega"] = 4
control0 = 0x10014
ADDR_MAP["control0"] = 0x10014
WIDTH_MAP["control0"] = 4
statusa = 0x10018
ADDR_MAP["statusa"] = 0x10018
WIDTH_MAP["statusa"] = 4
regb = 0x1001c
ADDR_MAP["regb"] = 0x1001c
WIDTH_MAP["regb"] = 4
w1cc = 0x10020
ADDR_MAP["w1cc"] = 0x10020
WIDTH_MAP["w1cc"] = 4
badfwr = 0x10024
ADDR_MAP["badfwr"] = 0x10024
WIDTH_MAP["badfwr"] = 4
badfro = 0x10028
ADDR_MAP["badfro"] = 0x10028
WIDTH_MAP["badfro"] = 4
ramx = 0x10100
ADDR_MAP["ramx"] = 0x10100
WIDTH_MAP["ramx"] = 1024
rega = 0x20000
ADDR_MAP["rega"] = 0x20000
WIDTH_MAP["rega"] = 4
control0 = 0x20004
ADDR_MAP["control0"] = 0x20004
WIDTH_MAP["control0"] = 4
statusa = 0x20008
ADDR_MAP["statusa"] = 0x20008
WIDTH_MAP["statusa"] = 4
regb = 0x2000c
ADDR_MAP["regb"] = 0x2000c
WIDTH_MAP["regb"] = 4
extern = 0x20010
ADDR_MAP["extern"] = 0x20010
WIDTH_MAP["extern"] = 4
eth0tmp0 = 0x20100
ADDR_MAP["eth0tmp0"] = 0x20100
WIDTH_MAP["eth0tmp0"] = 4
eth0tmp1 = 0x20104
ADDR_MAP["eth0tmp1"] = 0x20104
WIDTH_MAP["eth0tmp1"] = 4
eth0tmp2 = 0x20108
ADDR_MAP["eth0tmp2"] = 0x20108
WIDTH_MAP["eth0tmp2"] = 4
eth1tmp0 = 0x20200
ADDR_MAP["eth1tmp0"] = 0x20200
WIDTH_MAP["eth1tmp0"] = 4
eth1tmp1 = 0x20204
ADDR_MAP["eth1tmp1"] = 0x20204
WIDTH_MAP["eth1tmp1"] = 4
eth1tmp2 = 0x20208
ADDR_MAP["eth1tmp2"] = 0x20208
WIDTH_MAP["eth1tmp2"] = 4
eth2tmp0 = 0x20300
ADDR_MAP["eth2tmp0"] = 0x20300
WIDTH_MAP["eth2tmp0"] = 4
eth2tmp1 = 0x20304
ADDR_MAP["eth2tmp1"] = 0x20304
WIDTH_MAP["eth2tmp1"] = 4
eth2tmp2 = 0x20308
ADDR_MAP["eth2tmp2"] = 0x20308
WIDTH_MAP["eth2tmp2"] = 4
eth3tmp0 = 0x20400
ADDR_MAP["eth3tmp0"] = 0x20400
WIDTH_MAP["eth3tmp0"] = 4
eth3tmp1 = 0x20404
ADDR_MAP["eth3tmp1"] = 0x20404
WIDTH_MAP["eth3tmp1"] = 4
eth3tmp2 = 0x20408
ADDR_MAP["eth3tmp2"] = 0x20408
WIDTH_MAP["eth3tmp2"] = 4
wider = 0x2040c
ADDR_MAP["wider"] = 0x2040c
WIDTH_MAP["wider"] = 16
longer = 0x2041c
ADDR_MAP["longer"] = 0x2041c
WIDTH_MAP["longer"] = 16
ronly = 0x2042c
ADDR_MAP["ronly"] = 0x2042c
WIDTH_MAP["ronly"] = 4
ronly2 = 0x20430
ADDR_MAP["ronly2"] = 0x20430
WIDTH_MAP["ronly2"] = 4
ldst_ram = 0x20800
ADDR_MAP["ldst_ram"] = 0x20800
WIDTH_MAP["ldst_ram"] = 512
ronly = 0x30000
ADDR_MAP["ronly"] = 0x30000
WIDTH_MAP["ronly"] = 4
ronly2 = 0x30004
ADDR_MAP["ronly2"] = 0x30004
WIDTH_MAP["ronly2"] = 4
wonly = 0x30008
ADDR_MAP["wonly"] = 0x30008
WIDTH_MAP["wonly"] = 4
one = 0x3000c
ADDR_MAP["one"] = 0x3000c
WIDTH_MAP["one"] = 4
rega = 0x30010
ADDR_MAP["rega"] = 0x30010
WIDTH_MAP["rega"] = 4
control0 = 0x30014
ADDR_MAP["control0"] = 0x30014
WIDTH_MAP["control0"] = 4
statusa = 0x30018
ADDR_MAP["statusa"] = 0x30018
WIDTH_MAP["statusa"] = 4
regb = 0x3001c
ADDR_MAP["regb"] = 0x3001c
WIDTH_MAP["regb"] = 4
w1cc = 0x30020
ADDR_MAP["w1cc"] = 0x30020
WIDTH_MAP["w1cc"] = 4
badfwr = 0x30024
ADDR_MAP["badfwr"] = 0x30024
WIDTH_MAP["badfwr"] = 4
badfro = 0x30028
ADDR_MAP["badfro"] = 0x30028
WIDTH_MAP["badfro"] = 4
ramx = 0x30100
ADDR_MAP["ramx"] = 0x30100
WIDTH_MAP["ramx"] = 1024
rega = 0x40000
ADDR_MAP["rega"] = 0x40000
WIDTH_MAP["rega"] = 4
control0 = 0x40004
ADDR_MAP["control0"] = 0x40004
WIDTH_MAP["control0"] = 4
statusa = 0x40008
ADDR_MAP["statusa"] = 0x40008
WIDTH_MAP["statusa"] = 4
regb = 0x4000c
ADDR_MAP["regb"] = 0x4000c
WIDTH_MAP["regb"] = 4
extern = 0x40010
ADDR_MAP["extern"] = 0x40010
WIDTH_MAP["extern"] = 4
eth0tmp0 = 0x40100
ADDR_MAP["eth0tmp0"] = 0x40100
WIDTH_MAP["eth0tmp0"] = 4
eth0tmp1 = 0x40104
ADDR_MAP["eth0tmp1"] = 0x40104
WIDTH_MAP["eth0tmp1"] = 4
eth0tmp2 = 0x40108
ADDR_MAP["eth0tmp2"] = 0x40108
WIDTH_MAP["eth0tmp2"] = 4
eth1tmp0 = 0x40200
ADDR_MAP["eth1tmp0"] = 0x40200
WIDTH_MAP["eth1tmp0"] = 4
eth1tmp1 = 0x40204
ADDR_MAP["eth1tmp1"] = 0x40204
WIDTH_MAP["eth1tmp1"] = 4
eth1tmp2 = 0x40208
ADDR_MAP["eth1tmp2"] = 0x40208
WIDTH_MAP["eth1tmp2"] = 4
eth2tmp0 = 0x40300
ADDR_MAP["eth2tmp0"] = 0x40300
WIDTH_MAP["eth2tmp0"] = 4
eth2tmp1 = 0x40304
ADDR_MAP["eth2tmp1"] = 0x40304
WIDTH_MAP["eth2tmp1"] = 4
eth2tmp2 = 0x40308
ADDR_MAP["eth2tmp2"] = 0x40308
WIDTH_MAP["eth2tmp2"] = 4
eth3tmp0 = 0x40400
ADDR_MAP["eth3tmp0"] = 0x40400
WIDTH_MAP["eth3tmp0"] = 4
eth3tmp1 = 0x40404
ADDR_MAP["eth3tmp1"] = 0x40404
WIDTH_MAP["eth3tmp1"] = 4
eth3tmp2 = 0x40408
ADDR_MAP["eth3tmp2"] = 0x40408
WIDTH_MAP["eth3tmp2"] = 4
wider = 0x4040c
ADDR_MAP["wider"] = 0x4040c
WIDTH_MAP["wider"] = 16
longer = 0x4041c
ADDR_MAP["longer"] = 0x4041c
WIDTH_MAP["longer"] = 16
ronly = 0x4042c
ADDR_MAP["ronly"] = 0x4042c
WIDTH_MAP["ronly"] = 4
ronly2 = 0x40430
ADDR_MAP["ronly2"] = 0x40430
WIDTH_MAP["ronly2"] = 4
ldst_ram = 0x40800
ADDR_MAP["ldst_ram"] = 0x40800
WIDTH_MAP["ldst_ram"] = 512
| none | 1 | 1.575051 | 2 |
|
solutions/python3/675.py | sm2774us/amazon_interview_prep_2021 | 42 | 6631716 | <reponame>sm2774us/amazon_interview_prep_2021<gh_stars>10-100
class Solution:
def cutOffTree(self, forest):
def hadlocks(forest, sr, sc, tr, tc):
R, C = len(forest), len(forest[0])
processed = set()
deque = collections.deque([(0, sr, sc)])
while deque:
detours, r, c = deque.popleft()
if (r, c) not in processed:
processed.add((r, c))
if r == tr and c == tc:
return abs(sr-tr) + abs(sc-tc) + 2*detours
for nr, nc, closer in ((r-1, c, r > tr), (r+1, c, r < tr),
(r, c-1, c > tc), (r, c+1, c < tc)):
if 0 <= nr < R and 0 <= nc < C and forest[nr][nc]:
if closer:
deque.appendleft((detours, nr, nc))
else:
deque.append((detours+1, nr, nc))
return -1
trees = sorted((v, r, c) for r, row in enumerate(forest)
for c, v in enumerate(row) if v > 1)
sr = sc = ans = 0
for _, tr, tc in trees:
d = hadlocks(forest, sr, sc, tr, tc)
if d < 0: return -1
ans += d
sr, sc = tr, tc
return ans | class Solution:
def cutOffTree(self, forest):
def hadlocks(forest, sr, sc, tr, tc):
R, C = len(forest), len(forest[0])
processed = set()
deque = collections.deque([(0, sr, sc)])
while deque:
detours, r, c = deque.popleft()
if (r, c) not in processed:
processed.add((r, c))
if r == tr and c == tc:
return abs(sr-tr) + abs(sc-tc) + 2*detours
for nr, nc, closer in ((r-1, c, r > tr), (r+1, c, r < tr),
(r, c-1, c > tc), (r, c+1, c < tc)):
if 0 <= nr < R and 0 <= nc < C and forest[nr][nc]:
if closer:
deque.appendleft((detours, nr, nc))
else:
deque.append((detours+1, nr, nc))
return -1
trees = sorted((v, r, c) for r, row in enumerate(forest)
for c, v in enumerate(row) if v > 1)
sr = sc = ans = 0
for _, tr, tc in trees:
d = hadlocks(forest, sr, sc, tr, tc)
if d < 0: return -1
ans += d
sr, sc = tr, tc
return ans | none | 1 | 2.815334 | 3 |
|
tests/pydevtest/test_fuse.py | wtsi-hgi/irods | 0 | 6631717 | <filename>tests/pydevtest/test_fuse.py
import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
from resource_suite import ResourceBase
import commands
import distutils.spawn
import os
import subprocess
import stat
import socket
import lib
class Test_Fuse(ResourceBase, unittest.TestCase):
def setUp(self):
super(Test_Fuse, self).setUp()
def tearDown(self):
super(Test_Fuse, self).tearDown()
def test_irodsFs_issue_2252(self):
# =-=-=-=-=-=-=-
# set up a fuse mount
mount_point = "fuse_mount_point"
if not os.path.isdir(mount_point):
os.mkdir(mount_point)
os.system("irodsFs " + mount_point)
largefilename = "big_file.txt"
output = commands.getstatusoutput('dd if=/dev/zero of=' + largefilename + ' bs=1M count=100')
# =-=-=-=-=-=-=-
# use system copy to put some data into the mount mount
# and verify that it shows up in the ils
cmd = "cp ./" + largefilename + " ./" + mount_point + "; ls ./" + mount_point + "/" + largefilename
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
os.system("rm ./" + largefilename)
os.system("rm ./" + mount_point + "/" + largefilename)
# tear down the fuse mount
os.system("fusermount -uz " + mount_point)
if os.path.isdir(mount_point):
os.rmdir(mount_point)
assert(-1 != out_str.find(largefilename))
def test_fusermount_permissions(self):
# Check that fusermount is configured correctly for irodsFs
fusermount_path = distutils.spawn.find_executable('fusermount')
assert fusermount_path is not None, 'fusermount binary not found'
assert os.stat(fusermount_path).st_mode & stat.S_ISUID, 'fusermount setuid bit not set'
p = subprocess.Popen(['fusermount -V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdoutdata, stderrdata = p.communicate()
assert p.returncode == 0, '\n'.join(['fusermount not executable',
'return code: ' + str(p.returncode),
'stdout: ' + stdoutdata,
'stderr: ' + stderrdata])
def test_irodsFs(self):
# =-=-=-=-=-=-=-
# set up a fuse mount
mount_point = "fuse_mount_point"
if not os.path.isdir(mount_point):
os.mkdir(mount_point)
os.system("irodsFs " + mount_point)
# =-=-=-=-=-=-=-
# put some test data
test_file = "irodsfs_test_file"
lib.make_file(test_file, 10)
cmd = "iput " + test_file + " foo0"
output = commands.getstatusoutput(cmd)
# =-=-=-=-=-=-=-
# see if the data object is actually in the mount point
# using the system ls
cmd = "ls -l " + mount_point
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("mount ls results [" + out_str + "]")
assert(-1 != out_str.find("foo0"))
# =-=-=-=-=-=-=-
# use system copy to put some data into the mount mount
# and verify that it shows up in the ils
cmd = "cp " + test_file + " " + mount_point + "/baz ; ils -l baz"
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
assert(-1 != out_str.find("baz"))
# =-=-=-=-=-=-=-
# now irm the file and verify that it is not visible
# via the fuse mount
cmd = "irm -f baz ; ils -l baz"
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
assert(-1 != out_str.find("baz does not exist"))
output = commands.getstatusoutput("ls -l " + mount_point)
out_str = str(output)
print("mount ls results [" + out_str + "]")
assert(-1 != out_str.find("foo0"))
# =-=-=-=-=-=-=-
# now rm the foo0 file and then verify it doesnt show
# up in the ils
cmd = "rm " + mount_point + "/foo0; ils -l foo0"
print("cmd: [" + cmd + "]")
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
assert(-1 != out_str.find("foo0 does not exist"))
# =-=-=-=-=-=-=-
# now run bonnie++ and then verify it reports a summary
if (os.path.isfile("/usr/sbin/bonnie++")):
# ubuntu and centos
bonniecmd = "/usr/sbin/bonnie++"
else:
# suse
bonniecmd = "/usr/bin/bonnie++"
cmd = bonniecmd + " -r 1024 -d " + mount_point
print("cmd: [" + cmd + "]")
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
assert(-1 != out_str.find("-Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--"))
assert(-1 != out_str.find("-Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--"))
# tear down the fuse mount
os.system("fusermount -uz " + mount_point)
if os.path.isdir(mount_point):
os.rmdir(mount_point)
| <filename>tests/pydevtest/test_fuse.py
import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
from resource_suite import ResourceBase
import commands
import distutils.spawn
import os
import subprocess
import stat
import socket
import lib
class Test_Fuse(ResourceBase, unittest.TestCase):
def setUp(self):
super(Test_Fuse, self).setUp()
def tearDown(self):
super(Test_Fuse, self).tearDown()
def test_irodsFs_issue_2252(self):
# =-=-=-=-=-=-=-
# set up a fuse mount
mount_point = "fuse_mount_point"
if not os.path.isdir(mount_point):
os.mkdir(mount_point)
os.system("irodsFs " + mount_point)
largefilename = "big_file.txt"
output = commands.getstatusoutput('dd if=/dev/zero of=' + largefilename + ' bs=1M count=100')
# =-=-=-=-=-=-=-
# use system copy to put some data into the mount mount
# and verify that it shows up in the ils
cmd = "cp ./" + largefilename + " ./" + mount_point + "; ls ./" + mount_point + "/" + largefilename
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
os.system("rm ./" + largefilename)
os.system("rm ./" + mount_point + "/" + largefilename)
# tear down the fuse mount
os.system("fusermount -uz " + mount_point)
if os.path.isdir(mount_point):
os.rmdir(mount_point)
assert(-1 != out_str.find(largefilename))
def test_fusermount_permissions(self):
# Check that fusermount is configured correctly for irodsFs
fusermount_path = distutils.spawn.find_executable('fusermount')
assert fusermount_path is not None, 'fusermount binary not found'
assert os.stat(fusermount_path).st_mode & stat.S_ISUID, 'fusermount setuid bit not set'
p = subprocess.Popen(['fusermount -V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdoutdata, stderrdata = p.communicate()
assert p.returncode == 0, '\n'.join(['fusermount not executable',
'return code: ' + str(p.returncode),
'stdout: ' + stdoutdata,
'stderr: ' + stderrdata])
def test_irodsFs(self):
# =-=-=-=-=-=-=-
# set up a fuse mount
mount_point = "fuse_mount_point"
if not os.path.isdir(mount_point):
os.mkdir(mount_point)
os.system("irodsFs " + mount_point)
# =-=-=-=-=-=-=-
# put some test data
test_file = "irodsfs_test_file"
lib.make_file(test_file, 10)
cmd = "iput " + test_file + " foo0"
output = commands.getstatusoutput(cmd)
# =-=-=-=-=-=-=-
# see if the data object is actually in the mount point
# using the system ls
cmd = "ls -l " + mount_point
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("mount ls results [" + out_str + "]")
assert(-1 != out_str.find("foo0"))
# =-=-=-=-=-=-=-
# use system copy to put some data into the mount mount
# and verify that it shows up in the ils
cmd = "cp " + test_file + " " + mount_point + "/baz ; ils -l baz"
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
assert(-1 != out_str.find("baz"))
# =-=-=-=-=-=-=-
# now irm the file and verify that it is not visible
# via the fuse mount
cmd = "irm -f baz ; ils -l baz"
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
assert(-1 != out_str.find("baz does not exist"))
output = commands.getstatusoutput("ls -l " + mount_point)
out_str = str(output)
print("mount ls results [" + out_str + "]")
assert(-1 != out_str.find("foo0"))
# =-=-=-=-=-=-=-
# now rm the foo0 file and then verify it doesnt show
# up in the ils
cmd = "rm " + mount_point + "/foo0; ils -l foo0"
print("cmd: [" + cmd + "]")
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
assert(-1 != out_str.find("foo0 does not exist"))
# =-=-=-=-=-=-=-
# now run bonnie++ and then verify it reports a summary
if (os.path.isfile("/usr/sbin/bonnie++")):
# ubuntu and centos
bonniecmd = "/usr/sbin/bonnie++"
else:
# suse
bonniecmd = "/usr/bin/bonnie++"
cmd = bonniecmd + " -r 1024 -d " + mount_point
print("cmd: [" + cmd + "]")
output = commands.getstatusoutput(cmd)
out_str = str(output)
print("results[" + out_str + "]")
assert(-1 != out_str.find("-Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--"))
assert(-1 != out_str.find("-Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--"))
# tear down the fuse mount
os.system("fusermount -uz " + mount_point)
if os.path.isdir(mount_point):
os.rmdir(mount_point)
| en | 0.727096 | # =-=-=-=-=-=-=- # set up a fuse mount # =-=-=-=-=-=-=- # use system copy to put some data into the mount mount # and verify that it shows up in the ils # tear down the fuse mount # Check that fusermount is configured correctly for irodsFs # =-=-=-=-=-=-=- # set up a fuse mount # =-=-=-=-=-=-=- # put some test data # =-=-=-=-=-=-=- # see if the data object is actually in the mount point # using the system ls # =-=-=-=-=-=-=- # use system copy to put some data into the mount mount # and verify that it shows up in the ils # =-=-=-=-=-=-=- # now irm the file and verify that it is not visible # via the fuse mount # =-=-=-=-=-=-=- # now rm the foo0 file and then verify it doesnt show # up in the ils # =-=-=-=-=-=-=- # now run bonnie++ and then verify it reports a summary # ubuntu and centos # suse # tear down the fuse mount | 2.440072 | 2 |
Twitter/DataAccess/sample.py | aslisabanci/integrations | 5 | 6631718 | import Algorithmia
input = {"INPUT": "something interesting"}
client = Algorithmia.client('YOUR_API_KEY')
algo = client.algo('OUR_ALGORITHM')
print(algo.pipe(input)) | import Algorithmia
input = {"INPUT": "something interesting"}
client = Algorithmia.client('YOUR_API_KEY')
algo = client.algo('OUR_ALGORITHM')
print(algo.pipe(input)) | none | 1 | 2.240737 | 2 |
|
mccd/grads.py | tobias-liaudat/deep_mccd | 0 | 6631719 | <reponame>tobias-liaudat/deep_mccd
# -*- coding: utf-8 -*-
r"""GRADIENTS.
Defines the gradient classes that will be used in the optimization
procedures from the ModOpt package.
: Authors: <NAME> <<EMAIL>>
<NAME> <https://github.com/MorganSchmitz>
<NAME> <https://github.com/jerome-bonnin>
"""
from __future__ import absolute_import, print_function
import numpy as np
from modopt.opt.gradient import GradParent
from modopt.math.matrix import PowerMethod
from modopt.signal.wavelet import filter_convolve
import mccd.utils as utils
class CoeffLocGrad(GradParent, PowerMethod):
r"""Gradient class for the local coefficient update.
Local Alpha, :math:`\\alpha_{k}`.
Parameters
----------
data: numpy.ndarray
Observed data.
weights: numpy.ndarray
Corresponding pixel-wise weights.
S: numpy.ndarray
Current eigenPSFs :math:`S`.
VT: numpy.ndarray
Matrix of spatial constraint enforcement (in the MCCD-RCA case will
be the matrix of concatenated graph Laplacians.)
H_glob: numpy.ndarray
Current estimation of the global model.
flux: numpy.ndarray
Per-object flux value.
sig: numpy.ndarray
Noise levels.
ker: numpy.ndarray
Shifting kernels.
ker_rot: numpy.ndarray
Inverted shifting kernels.
SNR_weights: numpy.ndarray
Array of per star weights.
D: float
Upsampling factor.
save_iter_cost: bool
To save iteration diagnostic data.
Default is ``False``.
data_type: str
Data type to be used.
Default is ``float``.
"""
def __init__(self, data, weights, S, VT, H_glob, flux, sig, ker,
ker_rot, SNR_weights, D, save_iter_cost=False,
data_type='float', verbose=True):
r"""Initialize class attributes."""
self.verbose = verbose
self._grad_data_type = data_type
self.obs_data = data
self.obs_weights = weights
self.op = self.MX
self.trans_op = self.MtX
self.VT = VT
self.H_glob = H_glob
self.flux = flux
self.sig = sig
self.normfacs = self.flux / (np.median(self.flux) * self.sig)
self.ker = ker
self.ker_rot = ker_rot
self.D = D
self.SNR_weights = SNR_weights
self.iter_cost = []
self.save_iter_cost = save_iter_cost
self.S = None
self.FdS = None
self.FdH_glob = None
PowerMethod.__init__(self, self.trans_op_op,
(S.shape[-1], VT.shape[0]), auto_run=False)
self.update_S(np.copy(S), update_spectral_radius=False)
self._current_rec = None
def reset_iter_cost(self):
r"""Reset iteration cost."""
self.iter_cost = []
def get_iter_cost(self):
r"""Get current iteration cost."""
return self.iter_cost
def update_S(self, new_S, update_spectral_radius=True):
r"""Update current eigenPSFs."""
self.S = new_S
self.FdS = np.array([[nf * utils.degradation_op(S_j, shift_ker, self.D)
for nf, shift_ker in
zip(self.normfacs, utils.reg_format(self.ker))]
for S_j in utils.reg_format(self.S)])
if update_spectral_radius:
PowerMethod.get_spec_rad(self)
def update_H_glob(self, new_H_glob):
r"""Update current global model."""
self.H_glob = new_H_glob
dec_H_glob = np.array(
[nf * utils.degradation_op(H_i, shift_ker, self.D)
for nf, shift_ker, H_i in
zip(self.normfacs,
utils.reg_format(self.ker),
utils.reg_format(self.H_glob))])
self.FdH_glob = utils.rca_format(dec_H_glob)
def MX(self, alpha):
r"""Apply degradation operator and renormalize.
Parameters
----------
alpha: numpy.ndarray
Current coefficients (after factorization by :math:`V^{\\top}`).
"""
A = alpha.dot(self.VT)
dec_rec = np.empty(self.obs_data.shape)
for j in range(dec_rec.shape[-1]):
dec_rec[:, :, j] = np.sum(A[:, j].reshape(-1, 1, 1) *
self.FdS[:, j], axis=0)
self._current_rec = dec_rec
return self._current_rec
def MtX(self, x):
r"""Adjoint to degradation operator :func:`MX`.
Parameters
----------
x : numpy.ndarray
Set of finer-grid images.
"""
x = utils.reg_format(x * self.SNR_weights) # [TL] CHECK
STx = np.array([np.sum(FdS_i * x, axis=(1, 2)) for FdS_i in self.FdS])
return STx.dot(self.VT.T)
def cost(self, x, y=None, verbose=False):
r"""Compute data fidelity term.
Notes
-----
``y`` is unused (it's just so ``modopt.opt.algorithms.Condat``
can feed the dual variable.)
"""
if isinstance(self._current_rec, type(None)):
self._current_rec = self.MX(x)
cost_val = 0.5 * np.linalg.norm(
self.obs_weights * (self._current_rec + self.FdH_glob -
self.obs_data) * self.SNR_weights) ** 2
return cost_val
def get_grad(self, x):
r"""Compute current iteration's gradient."""
self.grad = self.MtX(self.obs_weights ** 2 *
(self.MX(x) + self.FdH_glob - self.obs_data))
if self.save_iter_cost:
self.iter_cost.append(self.cost(x))
class CoeffGlobGrad(GradParent, PowerMethod):
r"""Gradient class for the local coefficient update.
Global Alpha, :math: \\tilde{\\alpha}`.
Parameters
----------
data: numpy.ndarray
Observed data.
weights: numpy.ndarray
Corresponding pixel-wise weights.
S: numpy.ndarray
Current eigenPSFs :math:`S`.
Pi: numpy.ndarray
Matrix of positions polynomials.
H_loc: numpy.ndarray
Current estimation of the local model
flux: numpy.ndarray
Per-object flux value.
sig: numpy.ndarray
Noise levels.
ker: numpy.ndarray
Shifting kernels.
ker_rot: numpy.ndarray
Inverted shifting kernels.
SNR_weights: numpy.ndarray
Array of per star weights.
D: float
Upsampling factor.
save_iter_cost: bool
To save iteration diagnostic data.
Default is ``False``.
data_type: str
Data type to be used.
Default is ``float``.
"""
def __init__(self, data, weights, S, Pi, H_loc, flux, sig, ker,
ker_rot, D, SNR_weights, save_iter_cost=False,
data_type='float', verbose=True):
r"""Initialize class attributes."""
self.verbose = verbose
self._grad_data_type = data_type
self.obs_data = data
self.obs_weights = weights
self.op = self.MX
self.trans_op = self.MtX
self.Pi = Pi
self.H_loc = H_loc
self.flux = flux
self.sig = sig
self.normfacs = self.flux / (np.median(self.flux) * self.sig)
self.ker = ker
self.ker_rot = ker_rot
self.D = D
self.SNR_weights = SNR_weights
self.iter_cost = []
self.save_iter_cost = save_iter_cost
self.S = None
self.FdS = None
self.FdH_loc = None
PowerMethod.__init__(self, self.trans_op_op,
(S.shape[-1], Pi.shape[0]), auto_run=False)
self.update_S(np.copy(S), update_spectral_radius=False)
self._current_rec = None
def reset_iter_cost(self):
r"""Reset iteration cost."""
self.iter_cost = []
def get_iter_cost(self):
r"""Get current iteration cost."""
return self.iter_cost
def update_S(self, new_S, update_spectral_radius=True):
r"""Update current eigenPSFs."""
self.S = new_S
self.FdS = np.array([[nf * utils.degradation_op(S_j, shift_ker, self.D)
for nf, shift_ker in
zip(self.normfacs, utils.reg_format(self.ker))]
for S_j in utils.reg_format(self.S)])
if update_spectral_radius:
PowerMethod.get_spec_rad(self)
def update_H_loc(self, new_H_loc):
r"""Update current local models."""
self.H_loc = new_H_loc
dec_H_loc = np.array([nf * utils.degradation_op(H_i, shift_ker, self.D)
for nf, shift_ker, H_i in
zip(self.normfacs,
utils.reg_format(self.ker),
utils.reg_format(self.H_loc))])
self.FdH_loc = utils.rca_format(dec_H_loc)
def MX(self, alpha):
r"""Apply degradation operator and renormalize.
Parameters
----------
alpha: numpy.ndarray
Current coefficients (after factorization by :math:`\\Pi`).
"""
A = alpha.dot(self.Pi)
dec_rec = np.empty(self.obs_data.shape)
for j in range(dec_rec.shape[-1]):
dec_rec[:, :, j] = np.sum(A[:, j].reshape(-1, 1, 1) *
self.FdS[:, j], axis=0)
self._current_rec = dec_rec
return self._current_rec
def MtX(self, x):
r"""Adjoint to degradation operator :func:`MX`.
Parameters
----------
x : numpy.ndarray
Set of finer-grid images.
"""
x = utils.reg_format(x * self.SNR_weights) # [TL] CHECK
STx = np.array([np.sum(FdS_i * x, axis=(1, 2)) for FdS_i in self.FdS])
return STx.dot(self.Pi.T)
def cost(self, x, y=None, verbose=False):
r"""Compute data fidelity term.
Notes
-----
``y`` is unused (it's just so ``modopt.opt.algorithms.Condat``
can feed the dual variable.)
"""
if isinstance(self._current_rec, type(None)):
self._current_rec = self.MX(x)
cost_val = 0.5 * np.linalg.norm(
self.obs_weights * (self._current_rec + self.FdH_loc -
self.obs_data) * self.SNR_weights) ** 2
return cost_val
def get_grad(self, x):
r"""Compute current iteration's gradient."""
self.grad = self.MtX(self.obs_weights ** 2 *
(self.MX(x) + self.FdH_loc - self.obs_data))
if self.save_iter_cost:
self.iter_cost.append(self.cost(x))
class SourceLocGrad(GradParent, PowerMethod):
r"""Gradient class for the local eigenPSF update.
Local S, :math:`S_{k}`.
Parameters
----------
data: numpy.ndarray
Input data array, a array of 2D observed images (i.e. with noise).
weights: numpy.ndarray
Corresponding pixel-wise weights.
A: numpy.ndarray
Current estimation of corresponding coefficients.
H_glob: numpy.ndarray
Current estimation of the global model
flux: numpy.ndarray
Per-object flux value.
sig: numpy.ndarray
Noise levels.
ker: numpy.ndarray
Shifting kernels.
ker_rot: numpy.ndarray
Inverted shifting kernels.
D: float
Upsampling factor.
filters: numpy.ndarray
Set of filters.
save_iter_cost: bool
To save iteration diagnostic data.
Default is ``False``.
data_type: str
Data type to be used.
Default is ``float``.
"""
def __init__(self, data, weights, A, H_glob, flux, sig, ker, ker_rot,
SNR_weights, D, filters, save_iter_cost=False,
data_type='float', verbose=True):
r"""Initialize class attributes."""
self.verbose = verbose
self._grad_data_type = data_type
self.obs_data = data
self.obs_weights = weights
self.op = self.MX
self.trans_op = self.MtX
self.A = np.copy(A)
self.H_glob = np.copy(H_glob)
self.flux = flux
self.sig = sig
self.normfacs = self.flux / (np.median(self.flux) * self.sig)
self.ker = ker
self.ker_rot = ker_rot
self.D = D
self.filters = filters
self.SNR_weights = SNR_weights
self.iter_cost = []
self.save_iter_cost = save_iter_cost
self.FdH_glob = None
hr_shape = np.array(self.obs_data.shape[:2]) * self.D
power_method_shape = tuple([hr_shape[0], hr_shape[1], self.A.shape[0]])
PowerMethod.__init__(self, self.trans_op_op,
power_method_shape,
auto_run=False)
self._current_rec = None
def reset_iter_cost(self):
r"""Reset iteration cost."""
self.iter_cost = []
def get_iter_cost(self):
r"""Get current iteration cost."""
return self.iter_cost
def update_A(self, new_A, update_spectral_radius=True):
r"""Update current coefficients."""
self.A = new_A
if update_spectral_radius:
PowerMethod.get_spec_rad(self)
def update_H_glob(self, new_H_glob):
r"""Update current global model."""
self.H_glob = new_H_glob
dec_H_glob = np.array(
[nf * utils.degradation_op(H_i, shift_ker, self.D)
for nf, shift_ker, H_i in zip(self.normfacs,
utils.reg_format(self.ker),
utils.reg_format(self.H_glob))])
self.FdH_glob = utils.rca_format(dec_H_glob)
def MX(self, S):
r"""Apply degradation operator and renormalize.
Parameters
----------
S : numpy.ndarray
Current eigenPSFs in direct space.
Returns
-------
numpy.ndarray result
"""
#S = utils.rca_format(
# np.array([filter_convolve(transf_Sj, self.filters, filter_rot=True)
# for transf_Sj in transf_S]))
# S = utils.rca_format(transf_S)
dec_rec = np.array(
[nf * utils.degradation_op(S.dot(A_i), shift_ker, self.D)
for nf, A_i, shift_ker in zip(self.normfacs,
#[nf * utils.degradation_op(Sj, shift_ker, self.D)
#for nf, Sj, shift_ker in zip(self.normfacs,
self.A.T,
utils.reg_format(self.ker))])
self._current_rec = utils.rca_format(dec_rec)
return self._current_rec
def MtX(self, x):
r"""Adjoint to degradation operator :func:`MX`."""
x = utils.reg_format(x * self.SNR_weights)
upsamp_x = np.array(
[nf * utils.adjoint_degradation_op(x_i, shift_ker, self.D)
for nf, x_i, shift_ker in zip(self.normfacs,
x,
utils.reg_format(self.ker_rot))])
x = utils.rca_format(x)
upsamp_x = utils.rca_format(upsamp_x)
#return utils.apply_transform(upsamp_x.dot(self.A.T), self.filters)
return upsamp_x.dot(self.A.T)
def cost(self, x, y=None, verbose=False):
r"""Compute data fidelity term.
Notes
-----
``y`` is unused (it's just so ``modopt.opt.algorithms.Condat``
can feed the dual variable.)
"""
if isinstance(self._current_rec, type(None)):
self._current_rec = self.MX(x)
cost_val = 0.5 * np.linalg.norm(
self.obs_weights * (self._current_rec + self.FdH_glob -
self.obs_data) * self.SNR_weights) ** 2
return cost_val
def get_grad(self, x):
r"""Compute current iteration's gradient."""
self.grad = self.MtX(self.obs_weights ** 2 *
(self.MX(x) + self.FdH_glob - self.obs_data))
if self.save_iter_cost:
self.iter_cost.append(self.cost(x))
class SourceGlobGrad(GradParent, PowerMethod):
r"""Gradient class for the global eigenPSF update.
Global S, :math:`\\tilde{S}`.
Parameters
----------
data: numpy.ndarray
Input data array, a array of 2D observed images (i.e. with noise).
weights: numpy.ndarray
Corresponding pixel-wise weights.
A: numpy.ndarray
Current estimation of corresponding coefficients.
H_loc: numpy.ndarray
Current estimation of the local models
flux: numpy.ndarray
Per-object flux value.
sig: numpy.ndarray
Noise levels.
ker: numpy.ndarray
Shifting kernels.
ker_rot: numpy.ndarray
Inverted shifting kernels.
D: float
Upsampling factor.
filters: numpy.ndarray
Set of filters.
save_iter_cost: bool
To save iteration diagnostic data.
Default is ``False``.
data_type: str
Data type to be used.
Default is ``float``.
"""
def __init__(self, data, weights, A, H_loc, flux, sig,
ker, ker_rot, SNR_weights, D, filters, save_iter_cost=False,
data_type='float', verbose=True):
r"""Initialize class attributes."""
self.verbose = verbose
self._grad_data_type = data_type
self.obs_data = data
self.obs_weights = weights
self.op = self.MX
self.trans_op = self.MtX
self.A = np.copy(A)
self.H_loc = np.copy(H_loc)
self.flux = flux
self.sig = sig
self.normfacs = self.flux / (np.median(self.flux) * self.sig)
self.ker = ker
self.ker_rot = ker_rot
self.D = D
self.filters = filters
self.SNR_weights = SNR_weights
self.iter_cost = []
self.save_iter_cost = save_iter_cost
self.FdH_loc = None
hr_shape = np.array(self.obs_data.shape[:2]) * self.D
power_method_shape = tuple([hr_shape[0], hr_shape[1], self.A.shape[0]])
PowerMethod.__init__(self, self.trans_op_op,
power_method_shape,
auto_run=False)
self._current_rec = None
def reset_iter_cost(self):
r"""Reset iteration cost."""
self.iter_cost = []
def get_iter_cost(self):
r"""Get current iteration cost."""
return self.iter_cost
def update_A(self, new_A, update_spectral_radius=True):
r"""Update current coefficients."""
self.A = new_A
if update_spectral_radius:
PowerMethod.get_spec_rad(self)
def update_H_loc(self, new_H_loc):
r"""Update current local models."""
self.H_loc = new_H_loc
dec_H_loc = np.array(
[nf * utils.degradation_op(H_i, shift_ker, self.D)
for nf, shift_ker, H_i in
zip(self.normfacs,
utils.reg_format(self.ker),
utils.reg_format(self.H_loc))])
self.FdH_loc = utils.rca_format(dec_H_loc)
def MX(self, S):
r"""Apply degradation operator and renormalize.
Parameters
----------
S : numpy.ndarray (rca_format)
Current eigenPSFs in direct space.
Returns
-------
numpy.ndarray result
"""
#S = utils.rca_format(
# np.array([filter_convolve(transf_Sj, self.filters, filter_rot=True)
# for transf_Sj in transf_S]))
# S = utils.rca_format(transf_S)
# print(np.shape(S))
dec_rec = np.array(
[nf * utils.degradation_op(S.dot(A_i), shift_ker, self.D)
for nf, A_i, shift_ker in zip(self.normfacs,
#[nf * utils.degradation_op(Sj, shift_ker, self.D)
#for nf, Sj, shift_ker in zip(self.normfacs,
self.A.T,
utils.reg_format(self.ker))])
self._current_rec = utils.rca_format(dec_rec)
return self._current_rec
def MtX(self, x):
r"""Adjoint to degradation operator :func:`MX`."""
x = utils.reg_format(x * self.SNR_weights)
upsamp_x = np.array(
[nf * utils.adjoint_degradation_op(x_i, shift_ker, self.D) for
nf, x_i, shift_ker
in zip(self.normfacs, x, utils.reg_format(self.ker_rot))])
x = utils.rca_format(x)
upsamp_x = utils.rca_format(upsamp_x)
#return utils.apply_transform(upsamp_x.dot(self.A.T), self.filters)
return upsamp_x.dot(self.A.T)
def cost(self, x, y=None, verbose=False):
r"""Compute data fidelity term.
Notes
-----
``y`` is unused (it's just so ``modopt.opt.algorithms.Condat``
can feed the dual variable.)
"""
if isinstance(self._current_rec, type(None)):
self._current_rec = self.MX(x)
cost_val = 0.5 * np.linalg.norm(
self.obs_weights * (
self._current_rec + self.FdH_loc - self.obs_data) *
self.SNR_weights) ** 2
return cost_val
def get_grad(self, x):
r"""Compute current iteration's gradient."""
self.grad = self.MtX(self.obs_weights ** 2 * (
self.MX(x) + self.FdH_loc - self.obs_data))
if self.save_iter_cost:
self.iter_cost.append(self.cost(x))
| # -*- coding: utf-8 -*-
r"""GRADIENTS.
Defines the gradient classes that will be used in the optimization
procedures from the ModOpt package.
: Authors: <NAME> <<EMAIL>>
<NAME> <https://github.com/MorganSchmitz>
<NAME> <https://github.com/jerome-bonnin>
"""
from __future__ import absolute_import, print_function
import numpy as np
from modopt.opt.gradient import GradParent
from modopt.math.matrix import PowerMethod
from modopt.signal.wavelet import filter_convolve
import mccd.utils as utils
class CoeffLocGrad(GradParent, PowerMethod):
r"""Gradient class for the local coefficient update.
Local Alpha, :math:`\\alpha_{k}`.
Parameters
----------
data: numpy.ndarray
Observed data.
weights: numpy.ndarray
Corresponding pixel-wise weights.
S: numpy.ndarray
Current eigenPSFs :math:`S`.
VT: numpy.ndarray
Matrix of spatial constraint enforcement (in the MCCD-RCA case will
be the matrix of concatenated graph Laplacians.)
H_glob: numpy.ndarray
Current estimation of the global model.
flux: numpy.ndarray
Per-object flux value.
sig: numpy.ndarray
Noise levels.
ker: numpy.ndarray
Shifting kernels.
ker_rot: numpy.ndarray
Inverted shifting kernels.
SNR_weights: numpy.ndarray
Array of per star weights.
D: float
Upsampling factor.
save_iter_cost: bool
To save iteration diagnostic data.
Default is ``False``.
data_type: str
Data type to be used.
Default is ``float``.
"""
def __init__(self, data, weights, S, VT, H_glob, flux, sig, ker,
ker_rot, SNR_weights, D, save_iter_cost=False,
data_type='float', verbose=True):
r"""Initialize class attributes."""
self.verbose = verbose
self._grad_data_type = data_type
self.obs_data = data
self.obs_weights = weights
self.op = self.MX
self.trans_op = self.MtX
self.VT = VT
self.H_glob = H_glob
self.flux = flux
self.sig = sig
self.normfacs = self.flux / (np.median(self.flux) * self.sig)
self.ker = ker
self.ker_rot = ker_rot
self.D = D
self.SNR_weights = SNR_weights
self.iter_cost = []
self.save_iter_cost = save_iter_cost
self.S = None
self.FdS = None
self.FdH_glob = None
PowerMethod.__init__(self, self.trans_op_op,
(S.shape[-1], VT.shape[0]), auto_run=False)
self.update_S(np.copy(S), update_spectral_radius=False)
self._current_rec = None
def reset_iter_cost(self):
r"""Reset iteration cost."""
self.iter_cost = []
def get_iter_cost(self):
r"""Get current iteration cost."""
return self.iter_cost
def update_S(self, new_S, update_spectral_radius=True):
r"""Update current eigenPSFs."""
self.S = new_S
self.FdS = np.array([[nf * utils.degradation_op(S_j, shift_ker, self.D)
for nf, shift_ker in
zip(self.normfacs, utils.reg_format(self.ker))]
for S_j in utils.reg_format(self.S)])
if update_spectral_radius:
PowerMethod.get_spec_rad(self)
def update_H_glob(self, new_H_glob):
r"""Update current global model."""
self.H_glob = new_H_glob
dec_H_glob = np.array(
[nf * utils.degradation_op(H_i, shift_ker, self.D)
for nf, shift_ker, H_i in
zip(self.normfacs,
utils.reg_format(self.ker),
utils.reg_format(self.H_glob))])
self.FdH_glob = utils.rca_format(dec_H_glob)
def MX(self, alpha):
r"""Apply degradation operator and renormalize.
Parameters
----------
alpha: numpy.ndarray
Current coefficients (after factorization by :math:`V^{\\top}`).
"""
A = alpha.dot(self.VT)
dec_rec = np.empty(self.obs_data.shape)
for j in range(dec_rec.shape[-1]):
dec_rec[:, :, j] = np.sum(A[:, j].reshape(-1, 1, 1) *
self.FdS[:, j], axis=0)
self._current_rec = dec_rec
return self._current_rec
def MtX(self, x):
r"""Adjoint to degradation operator :func:`MX`.
Parameters
----------
x : numpy.ndarray
Set of finer-grid images.
"""
x = utils.reg_format(x * self.SNR_weights) # [TL] CHECK
STx = np.array([np.sum(FdS_i * x, axis=(1, 2)) for FdS_i in self.FdS])
return STx.dot(self.VT.T)
def cost(self, x, y=None, verbose=False):
r"""Compute data fidelity term.
Notes
-----
``y`` is unused (it's just so ``modopt.opt.algorithms.Condat``
can feed the dual variable.)
"""
if isinstance(self._current_rec, type(None)):
self._current_rec = self.MX(x)
cost_val = 0.5 * np.linalg.norm(
self.obs_weights * (self._current_rec + self.FdH_glob -
self.obs_data) * self.SNR_weights) ** 2
return cost_val
def get_grad(self, x):
r"""Compute current iteration's gradient."""
self.grad = self.MtX(self.obs_weights ** 2 *
(self.MX(x) + self.FdH_glob - self.obs_data))
if self.save_iter_cost:
self.iter_cost.append(self.cost(x))
class CoeffGlobGrad(GradParent, PowerMethod):
r"""Gradient class for the local coefficient update.
Global Alpha, :math: \\tilde{\\alpha}`.
Parameters
----------
data: numpy.ndarray
Observed data.
weights: numpy.ndarray
Corresponding pixel-wise weights.
S: numpy.ndarray
Current eigenPSFs :math:`S`.
Pi: numpy.ndarray
Matrix of positions polynomials.
H_loc: numpy.ndarray
Current estimation of the local model
flux: numpy.ndarray
Per-object flux value.
sig: numpy.ndarray
Noise levels.
ker: numpy.ndarray
Shifting kernels.
ker_rot: numpy.ndarray
Inverted shifting kernels.
SNR_weights: numpy.ndarray
Array of per star weights.
D: float
Upsampling factor.
save_iter_cost: bool
To save iteration diagnostic data.
Default is ``False``.
data_type: str
Data type to be used.
Default is ``float``.
"""
def __init__(self, data, weights, S, Pi, H_loc, flux, sig, ker,
ker_rot, D, SNR_weights, save_iter_cost=False,
data_type='float', verbose=True):
r"""Initialize class attributes."""
self.verbose = verbose
self._grad_data_type = data_type
self.obs_data = data
self.obs_weights = weights
self.op = self.MX
self.trans_op = self.MtX
self.Pi = Pi
self.H_loc = H_loc
self.flux = flux
self.sig = sig
self.normfacs = self.flux / (np.median(self.flux) * self.sig)
self.ker = ker
self.ker_rot = ker_rot
self.D = D
self.SNR_weights = SNR_weights
self.iter_cost = []
self.save_iter_cost = save_iter_cost
self.S = None
self.FdS = None
self.FdH_loc = None
PowerMethod.__init__(self, self.trans_op_op,
(S.shape[-1], Pi.shape[0]), auto_run=False)
self.update_S(np.copy(S), update_spectral_radius=False)
self._current_rec = None
def reset_iter_cost(self):
r"""Reset iteration cost."""
self.iter_cost = []
def get_iter_cost(self):
r"""Get current iteration cost."""
return self.iter_cost
def update_S(self, new_S, update_spectral_radius=True):
r"""Update current eigenPSFs."""
self.S = new_S
self.FdS = np.array([[nf * utils.degradation_op(S_j, shift_ker, self.D)
for nf, shift_ker in
zip(self.normfacs, utils.reg_format(self.ker))]
for S_j in utils.reg_format(self.S)])
if update_spectral_radius:
PowerMethod.get_spec_rad(self)
def update_H_loc(self, new_H_loc):
r"""Update current local models."""
self.H_loc = new_H_loc
dec_H_loc = np.array([nf * utils.degradation_op(H_i, shift_ker, self.D)
for nf, shift_ker, H_i in
zip(self.normfacs,
utils.reg_format(self.ker),
utils.reg_format(self.H_loc))])
self.FdH_loc = utils.rca_format(dec_H_loc)
def MX(self, alpha):
r"""Apply degradation operator and renormalize.
Parameters
----------
alpha: numpy.ndarray
Current coefficients (after factorization by :math:`\\Pi`).
"""
A = alpha.dot(self.Pi)
dec_rec = np.empty(self.obs_data.shape)
for j in range(dec_rec.shape[-1]):
dec_rec[:, :, j] = np.sum(A[:, j].reshape(-1, 1, 1) *
self.FdS[:, j], axis=0)
self._current_rec = dec_rec
return self._current_rec
def MtX(self, x):
r"""Adjoint to degradation operator :func:`MX`.
Parameters
----------
x : numpy.ndarray
Set of finer-grid images.
"""
x = utils.reg_format(x * self.SNR_weights) # [TL] CHECK
STx = np.array([np.sum(FdS_i * x, axis=(1, 2)) for FdS_i in self.FdS])
return STx.dot(self.Pi.T)
def cost(self, x, y=None, verbose=False):
r"""Compute data fidelity term.
Notes
-----
``y`` is unused (it's just so ``modopt.opt.algorithms.Condat``
can feed the dual variable.)
"""
if isinstance(self._current_rec, type(None)):
self._current_rec = self.MX(x)
cost_val = 0.5 * np.linalg.norm(
self.obs_weights * (self._current_rec + self.FdH_loc -
self.obs_data) * self.SNR_weights) ** 2
return cost_val
def get_grad(self, x):
r"""Compute current iteration's gradient."""
self.grad = self.MtX(self.obs_weights ** 2 *
(self.MX(x) + self.FdH_loc - self.obs_data))
if self.save_iter_cost:
self.iter_cost.append(self.cost(x))
class SourceLocGrad(GradParent, PowerMethod):
r"""Gradient class for the local eigenPSF update.
Local S, :math:`S_{k}`.
Parameters
----------
data: numpy.ndarray
Input data array, a array of 2D observed images (i.e. with noise).
weights: numpy.ndarray
Corresponding pixel-wise weights.
A: numpy.ndarray
Current estimation of corresponding coefficients.
H_glob: numpy.ndarray
Current estimation of the global model
flux: numpy.ndarray
Per-object flux value.
sig: numpy.ndarray
Noise levels.
ker: numpy.ndarray
Shifting kernels.
ker_rot: numpy.ndarray
Inverted shifting kernels.
D: float
Upsampling factor.
filters: numpy.ndarray
Set of filters.
save_iter_cost: bool
To save iteration diagnostic data.
Default is ``False``.
data_type: str
Data type to be used.
Default is ``float``.
"""
def __init__(self, data, weights, A, H_glob, flux, sig, ker, ker_rot,
SNR_weights, D, filters, save_iter_cost=False,
data_type='float', verbose=True):
r"""Initialize class attributes."""
self.verbose = verbose
self._grad_data_type = data_type
self.obs_data = data
self.obs_weights = weights
self.op = self.MX
self.trans_op = self.MtX
self.A = np.copy(A)
self.H_glob = np.copy(H_glob)
self.flux = flux
self.sig = sig
self.normfacs = self.flux / (np.median(self.flux) * self.sig)
self.ker = ker
self.ker_rot = ker_rot
self.D = D
self.filters = filters
self.SNR_weights = SNR_weights
self.iter_cost = []
self.save_iter_cost = save_iter_cost
self.FdH_glob = None
hr_shape = np.array(self.obs_data.shape[:2]) * self.D
power_method_shape = tuple([hr_shape[0], hr_shape[1], self.A.shape[0]])
PowerMethod.__init__(self, self.trans_op_op,
power_method_shape,
auto_run=False)
self._current_rec = None
def reset_iter_cost(self):
r"""Reset iteration cost."""
self.iter_cost = []
def get_iter_cost(self):
r"""Get current iteration cost."""
return self.iter_cost
def update_A(self, new_A, update_spectral_radius=True):
r"""Update current coefficients."""
self.A = new_A
if update_spectral_radius:
PowerMethod.get_spec_rad(self)
def update_H_glob(self, new_H_glob):
r"""Update current global model."""
self.H_glob = new_H_glob
dec_H_glob = np.array(
[nf * utils.degradation_op(H_i, shift_ker, self.D)
for nf, shift_ker, H_i in zip(self.normfacs,
utils.reg_format(self.ker),
utils.reg_format(self.H_glob))])
self.FdH_glob = utils.rca_format(dec_H_glob)
def MX(self, S):
r"""Apply degradation operator and renormalize.
Parameters
----------
S : numpy.ndarray
Current eigenPSFs in direct space.
Returns
-------
numpy.ndarray result
"""
#S = utils.rca_format(
# np.array([filter_convolve(transf_Sj, self.filters, filter_rot=True)
# for transf_Sj in transf_S]))
# S = utils.rca_format(transf_S)
dec_rec = np.array(
[nf * utils.degradation_op(S.dot(A_i), shift_ker, self.D)
for nf, A_i, shift_ker in zip(self.normfacs,
#[nf * utils.degradation_op(Sj, shift_ker, self.D)
#for nf, Sj, shift_ker in zip(self.normfacs,
self.A.T,
utils.reg_format(self.ker))])
self._current_rec = utils.rca_format(dec_rec)
return self._current_rec
def MtX(self, x):
r"""Adjoint to degradation operator :func:`MX`."""
x = utils.reg_format(x * self.SNR_weights)
upsamp_x = np.array(
[nf * utils.adjoint_degradation_op(x_i, shift_ker, self.D)
for nf, x_i, shift_ker in zip(self.normfacs,
x,
utils.reg_format(self.ker_rot))])
x = utils.rca_format(x)
upsamp_x = utils.rca_format(upsamp_x)
#return utils.apply_transform(upsamp_x.dot(self.A.T), self.filters)
return upsamp_x.dot(self.A.T)
def cost(self, x, y=None, verbose=False):
r"""Compute data fidelity term.
Notes
-----
``y`` is unused (it's just so ``modopt.opt.algorithms.Condat``
can feed the dual variable.)
"""
if isinstance(self._current_rec, type(None)):
self._current_rec = self.MX(x)
cost_val = 0.5 * np.linalg.norm(
self.obs_weights * (self._current_rec + self.FdH_glob -
self.obs_data) * self.SNR_weights) ** 2
return cost_val
def get_grad(self, x):
r"""Compute current iteration's gradient."""
self.grad = self.MtX(self.obs_weights ** 2 *
(self.MX(x) + self.FdH_glob - self.obs_data))
if self.save_iter_cost:
self.iter_cost.append(self.cost(x))
class SourceGlobGrad(GradParent, PowerMethod):
r"""Gradient class for the global eigenPSF update.
Global S, :math:`\\tilde{S}`.
Parameters
----------
data: numpy.ndarray
Input data array, a array of 2D observed images (i.e. with noise).
weights: numpy.ndarray
Corresponding pixel-wise weights.
A: numpy.ndarray
Current estimation of corresponding coefficients.
H_loc: numpy.ndarray
Current estimation of the local models
flux: numpy.ndarray
Per-object flux value.
sig: numpy.ndarray
Noise levels.
ker: numpy.ndarray
Shifting kernels.
ker_rot: numpy.ndarray
Inverted shifting kernels.
D: float
Upsampling factor.
filters: numpy.ndarray
Set of filters.
save_iter_cost: bool
To save iteration diagnostic data.
Default is ``False``.
data_type: str
Data type to be used.
Default is ``float``.
"""
def __init__(self, data, weights, A, H_loc, flux, sig,
ker, ker_rot, SNR_weights, D, filters, save_iter_cost=False,
data_type='float', verbose=True):
r"""Initialize class attributes."""
self.verbose = verbose
self._grad_data_type = data_type
self.obs_data = data
self.obs_weights = weights
self.op = self.MX
self.trans_op = self.MtX
self.A = np.copy(A)
self.H_loc = np.copy(H_loc)
self.flux = flux
self.sig = sig
self.normfacs = self.flux / (np.median(self.flux) * self.sig)
self.ker = ker
self.ker_rot = ker_rot
self.D = D
self.filters = filters
self.SNR_weights = SNR_weights
self.iter_cost = []
self.save_iter_cost = save_iter_cost
self.FdH_loc = None
hr_shape = np.array(self.obs_data.shape[:2]) * self.D
power_method_shape = tuple([hr_shape[0], hr_shape[1], self.A.shape[0]])
PowerMethod.__init__(self, self.trans_op_op,
power_method_shape,
auto_run=False)
self._current_rec = None
def reset_iter_cost(self):
r"""Reset iteration cost."""
self.iter_cost = []
def get_iter_cost(self):
r"""Get current iteration cost."""
return self.iter_cost
def update_A(self, new_A, update_spectral_radius=True):
r"""Update current coefficients."""
self.A = new_A
if update_spectral_radius:
PowerMethod.get_spec_rad(self)
def update_H_loc(self, new_H_loc):
r"""Update current local models."""
self.H_loc = new_H_loc
dec_H_loc = np.array(
[nf * utils.degradation_op(H_i, shift_ker, self.D)
for nf, shift_ker, H_i in
zip(self.normfacs,
utils.reg_format(self.ker),
utils.reg_format(self.H_loc))])
self.FdH_loc = utils.rca_format(dec_H_loc)
def MX(self, S):
r"""Apply degradation operator and renormalize.
Parameters
----------
S : numpy.ndarray (rca_format)
Current eigenPSFs in direct space.
Returns
-------
numpy.ndarray result
"""
#S = utils.rca_format(
# np.array([filter_convolve(transf_Sj, self.filters, filter_rot=True)
# for transf_Sj in transf_S]))
# S = utils.rca_format(transf_S)
# print(np.shape(S))
dec_rec = np.array(
[nf * utils.degradation_op(S.dot(A_i), shift_ker, self.D)
for nf, A_i, shift_ker in zip(self.normfacs,
#[nf * utils.degradation_op(Sj, shift_ker, self.D)
#for nf, Sj, shift_ker in zip(self.normfacs,
self.A.T,
utils.reg_format(self.ker))])
self._current_rec = utils.rca_format(dec_rec)
return self._current_rec
def MtX(self, x):
r"""Adjoint to degradation operator :func:`MX`."""
x = utils.reg_format(x * self.SNR_weights)
upsamp_x = np.array(
[nf * utils.adjoint_degradation_op(x_i, shift_ker, self.D) for
nf, x_i, shift_ker
in zip(self.normfacs, x, utils.reg_format(self.ker_rot))])
x = utils.rca_format(x)
upsamp_x = utils.rca_format(upsamp_x)
#return utils.apply_transform(upsamp_x.dot(self.A.T), self.filters)
return upsamp_x.dot(self.A.T)
def cost(self, x, y=None, verbose=False):
r"""Compute data fidelity term.
Notes
-----
``y`` is unused (it's just so ``modopt.opt.algorithms.Condat``
can feed the dual variable.)
"""
if isinstance(self._current_rec, type(None)):
self._current_rec = self.MX(x)
cost_val = 0.5 * np.linalg.norm(
self.obs_weights * (
self._current_rec + self.FdH_loc - self.obs_data) *
self.SNR_weights) ** 2
return cost_val
def get_grad(self, x):
r"""Compute current iteration's gradient."""
self.grad = self.MtX(self.obs_weights ** 2 * (
self.MX(x) + self.FdH_loc - self.obs_data))
if self.save_iter_cost:
self.iter_cost.append(self.cost(x)) | en | 0.549367 | # -*- coding: utf-8 -*- GRADIENTS. Defines the gradient classes that will be used in the optimization procedures from the ModOpt package. : Authors: <NAME> <<EMAIL>> <NAME> <https://github.com/MorganSchmitz> <NAME> <https://github.com/jerome-bonnin> Gradient class for the local coefficient update. Local Alpha, :math:`\\alpha_{k}`. Parameters ---------- data: numpy.ndarray Observed data. weights: numpy.ndarray Corresponding pixel-wise weights. S: numpy.ndarray Current eigenPSFs :math:`S`. VT: numpy.ndarray Matrix of spatial constraint enforcement (in the MCCD-RCA case will be the matrix of concatenated graph Laplacians.) H_glob: numpy.ndarray Current estimation of the global model. flux: numpy.ndarray Per-object flux value. sig: numpy.ndarray Noise levels. ker: numpy.ndarray Shifting kernels. ker_rot: numpy.ndarray Inverted shifting kernels. SNR_weights: numpy.ndarray Array of per star weights. D: float Upsampling factor. save_iter_cost: bool To save iteration diagnostic data. Default is ``False``. data_type: str Data type to be used. Default is ``float``. Initialize class attributes. Reset iteration cost. Get current iteration cost. Update current eigenPSFs. Update current global model. Apply degradation operator and renormalize. Parameters ---------- alpha: numpy.ndarray Current coefficients (after factorization by :math:`V^{\\top}`). Adjoint to degradation operator :func:`MX`. Parameters ---------- x : numpy.ndarray Set of finer-grid images. # [TL] CHECK Compute data fidelity term. Notes ----- ``y`` is unused (it's just so ``modopt.opt.algorithms.Condat`` can feed the dual variable.) Compute current iteration's gradient. Gradient class for the local coefficient update. Global Alpha, :math: \\tilde{\\alpha}`. Parameters ---------- data: numpy.ndarray Observed data. weights: numpy.ndarray Corresponding pixel-wise weights. S: numpy.ndarray Current eigenPSFs :math:`S`. Pi: numpy.ndarray Matrix of positions polynomials. H_loc: numpy.ndarray Current estimation of the local model flux: numpy.ndarray Per-object flux value. sig: numpy.ndarray Noise levels. ker: numpy.ndarray Shifting kernels. ker_rot: numpy.ndarray Inverted shifting kernels. SNR_weights: numpy.ndarray Array of per star weights. D: float Upsampling factor. save_iter_cost: bool To save iteration diagnostic data. Default is ``False``. data_type: str Data type to be used. Default is ``float``. Initialize class attributes. Reset iteration cost. Get current iteration cost. Update current eigenPSFs. Update current local models. Apply degradation operator and renormalize. Parameters ---------- alpha: numpy.ndarray Current coefficients (after factorization by :math:`\\Pi`). Adjoint to degradation operator :func:`MX`. Parameters ---------- x : numpy.ndarray Set of finer-grid images. # [TL] CHECK Compute data fidelity term. Notes ----- ``y`` is unused (it's just so ``modopt.opt.algorithms.Condat`` can feed the dual variable.) Compute current iteration's gradient. Gradient class for the local eigenPSF update. Local S, :math:`S_{k}`. Parameters ---------- data: numpy.ndarray Input data array, a array of 2D observed images (i.e. with noise). weights: numpy.ndarray Corresponding pixel-wise weights. A: numpy.ndarray Current estimation of corresponding coefficients. H_glob: numpy.ndarray Current estimation of the global model flux: numpy.ndarray Per-object flux value. sig: numpy.ndarray Noise levels. ker: numpy.ndarray Shifting kernels. ker_rot: numpy.ndarray Inverted shifting kernels. D: float Upsampling factor. filters: numpy.ndarray Set of filters. save_iter_cost: bool To save iteration diagnostic data. Default is ``False``. data_type: str Data type to be used. Default is ``float``. Initialize class attributes. Reset iteration cost. Get current iteration cost. Update current coefficients. Update current global model. Apply degradation operator and renormalize. Parameters ---------- S : numpy.ndarray Current eigenPSFs in direct space. Returns ------- numpy.ndarray result #S = utils.rca_format( # np.array([filter_convolve(transf_Sj, self.filters, filter_rot=True) # for transf_Sj in transf_S])) # S = utils.rca_format(transf_S) #[nf * utils.degradation_op(Sj, shift_ker, self.D) #for nf, Sj, shift_ker in zip(self.normfacs, Adjoint to degradation operator :func:`MX`. #return utils.apply_transform(upsamp_x.dot(self.A.T), self.filters) Compute data fidelity term. Notes ----- ``y`` is unused (it's just so ``modopt.opt.algorithms.Condat`` can feed the dual variable.) Compute current iteration's gradient. Gradient class for the global eigenPSF update. Global S, :math:`\\tilde{S}`. Parameters ---------- data: numpy.ndarray Input data array, a array of 2D observed images (i.e. with noise). weights: numpy.ndarray Corresponding pixel-wise weights. A: numpy.ndarray Current estimation of corresponding coefficients. H_loc: numpy.ndarray Current estimation of the local models flux: numpy.ndarray Per-object flux value. sig: numpy.ndarray Noise levels. ker: numpy.ndarray Shifting kernels. ker_rot: numpy.ndarray Inverted shifting kernels. D: float Upsampling factor. filters: numpy.ndarray Set of filters. save_iter_cost: bool To save iteration diagnostic data. Default is ``False``. data_type: str Data type to be used. Default is ``float``. Initialize class attributes. Reset iteration cost. Get current iteration cost. Update current coefficients. Update current local models. Apply degradation operator and renormalize. Parameters ---------- S : numpy.ndarray (rca_format) Current eigenPSFs in direct space. Returns ------- numpy.ndarray result #S = utils.rca_format( # np.array([filter_convolve(transf_Sj, self.filters, filter_rot=True) # for transf_Sj in transf_S])) # S = utils.rca_format(transf_S) # print(np.shape(S)) #[nf * utils.degradation_op(Sj, shift_ker, self.D) #for nf, Sj, shift_ker in zip(self.normfacs, Adjoint to degradation operator :func:`MX`. #return utils.apply_transform(upsamp_x.dot(self.A.T), self.filters) Compute data fidelity term. Notes ----- ``y`` is unused (it's just so ``modopt.opt.algorithms.Condat`` can feed the dual variable.) Compute current iteration's gradient. | 2.69647 | 3 |
iota/multisig/crypto/__init__.py | JakeSCahill/iota.py | 2 | 6631720 | # coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
| # coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
| en | 0.644078 | # coding=utf-8 | 0.976469 | 1 |
rl_algorithms/fd/dqn_agent.py | MrSyee/rl_algorithms | 1 | 6631721 | <reponame>MrSyee/rl_algorithms<filename>rl_algorithms/fd/dqn_agent.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""DQfD agent using demo agent for episodic tasks in OpenAI Gym.
- Author: <NAME>, <NAME>
- Contact: <EMAIL>, <EMAIL>
- Paper: https://arxiv.org/pdf/1704.03732.pdf (DQfD)
"""
import pickle
import time
from typing import Tuple
import numpy as np
import torch
from torch.nn.utils import clip_grad_norm_
import wandb
from rl_algorithms.common.buffer.priortized_replay_buffer import PrioritizedReplayBuffer
from rl_algorithms.common.buffer.replay_buffer import ReplayBuffer
import rl_algorithms.common.helper_functions as common_utils
from rl_algorithms.dqn.agent import DQNAgent
from rl_algorithms.registry import AGENTS
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@AGENTS.register_module
class DQfDAgent(DQNAgent):
"""DQN interacting with environment.
Attribute:
memory (PrioritizedReplayBuffer): replay memory
"""
# pylint: disable=attribute-defined-outside-init
def _initialize(self):
"""Initialize non-common things."""
if not self.args.test:
# load demo replay memory
demos = self._load_demos()
if self.use_n_step:
demos, demos_n_step = common_utils.get_n_step_info_from_demo(
demos, self.hyper_params.n_step, self.hyper_params.gamma
)
self.memory_n = ReplayBuffer(
buffer_size=self.hyper_params.buffer_size,
n_step=self.hyper_params.n_step,
gamma=self.hyper_params.gamma,
demo=demos_n_step,
)
# replay memory
self.memory = PrioritizedReplayBuffer(
self.hyper_params.buffer_size,
self.hyper_params.batch_size,
demo=demos,
alpha=self.hyper_params.per_alpha,
epsilon_d=self.hyper_params.per_eps_demo,
)
def _load_demos(self) -> list:
"""Load expert's demonstrations."""
# load demo replay memory
with open(self.args.demo_path, "rb") as f:
demos = pickle.load(f)
return demos
def update_model(self) -> Tuple[torch.Tensor, ...]:
"""Train the model after each episode."""
experiences_1 = self.memory.sample()
weights, indices, eps_d = experiences_1[-3:]
actions = experiences_1[1]
# 1 step loss
gamma = self.hyper_params.gamma
dq_loss_element_wise, q_values = self._get_dqn_loss(experiences_1, gamma)
dq_loss = torch.mean(dq_loss_element_wise * weights)
# n step loss
if self.use_n_step:
experiences_n = self.memory_n.sample(indices)
gamma = self.hyper_params.gamma ** self.hyper_params.n_step
dq_loss_n_element_wise, q_values_n = self._get_dqn_loss(
experiences_n, gamma
)
# to update loss and priorities
q_values = 0.5 * (q_values + q_values_n)
dq_loss_element_wise += dq_loss_n_element_wise * self.hyper_params.lambda1
dq_loss = torch.mean(dq_loss_element_wise * weights)
# supervised loss using demo for only demo transitions
demo_idxs = np.where(eps_d != 0.0)
n_demo = demo_idxs[0].size
if n_demo != 0: # if 1 or more demos are sampled
# get margin for each demo transition
action_idxs = actions[demo_idxs].long()
margin = torch.ones(q_values.size()) * self.hyper_params.margin
margin[demo_idxs, action_idxs] = 0.0 # demo actions have 0 margins
margin = margin.to(device)
# calculate supervised loss
demo_q_values = q_values[demo_idxs, action_idxs].squeeze()
supervised_loss = torch.max(q_values + margin, dim=-1)[0]
supervised_loss = supervised_loss[demo_idxs] - demo_q_values
supervised_loss = torch.mean(supervised_loss) * self.hyper_params.lambda2
else: # no demo sampled
supervised_loss = torch.zeros(1, device=device)
# q_value regularization
q_regular = torch.norm(q_values, 2).mean() * self.hyper_params.w_q_reg
# total loss
loss = dq_loss + supervised_loss + q_regular
# train dqn
self.dqn_optim.zero_grad()
loss.backward()
clip_grad_norm_(self.dqn.parameters(), self.hyper_params.gradient_clip)
self.dqn_optim.step()
# update target networks
common_utils.soft_update(self.dqn, self.dqn_target, self.hyper_params.tau)
# update priorities in PER
loss_for_prior = dq_loss_element_wise.detach().cpu().numpy().squeeze()
new_priorities = loss_for_prior + self.hyper_params.per_eps
new_priorities += eps_d
self.memory.update_priorities(indices, new_priorities)
# increase beta
fraction = min(float(self.i_episode) / self.args.episode_num, 1.0)
self.per_beta: float = self.per_beta + fraction * (1.0 - self.per_beta)
if self.hyper_params.use_noisy_net:
self.dqn.reset_noise()
self.dqn_target.reset_noise()
return (
loss.item(),
dq_loss.item(),
supervised_loss.item(),
q_values.mean().item(),
n_demo,
)
def write_log(self, log_value: tuple):
"""Write log about loss and score"""
i, avg_loss, score, avg_time_cost = log_value
print(
"[INFO] episode %d, episode step: %d, total step: %d, total score: %f\n"
"epsilon: %f, total loss: %f, dq loss: %f, supervised loss: %f\n"
"avg q values: %f, demo num in minibatch: %d (spent %.6f sec/step)\n"
% (
i,
self.episode_step,
self.total_step,
score,
self.epsilon,
avg_loss[0],
avg_loss[1],
avg_loss[2],
avg_loss[3],
avg_loss[4],
avg_time_cost,
)
)
if self.args.log:
wandb.log(
{
"score": score,
"epsilon": self.epsilon,
"total loss": avg_loss[0],
"dq loss": avg_loss[1],
"supervised loss": avg_loss[2],
"avg q values": avg_loss[3],
"demo num in minibatch": avg_loss[4],
"time per each step": avg_time_cost,
}
)
def pretrain(self):
"""Pretraining steps."""
pretrain_loss = list()
pretrain_step = self.hyper_params.pretrain_step
print("[INFO] Pre-Train %d step." % pretrain_step)
for i_step in range(1, pretrain_step + 1):
t_begin = time.time()
loss = self.update_model()
t_end = time.time()
pretrain_loss.append(loss) # for logging
# logging
if i_step == 1 or i_step % 100 == 0:
avg_loss = np.vstack(pretrain_loss).mean(axis=0)
pretrain_loss.clear()
log_value = (0, avg_loss, 0.0, t_end - t_begin)
self.write_log(log_value)
print("[INFO] Pre-Train Complete!\n")
| # -*- coding: utf-8 -*-
"""DQfD agent using demo agent for episodic tasks in OpenAI Gym.
- Author: <NAME>, <NAME>
- Contact: <EMAIL>, <EMAIL>
- Paper: https://arxiv.org/pdf/1704.03732.pdf (DQfD)
"""
import pickle
import time
from typing import Tuple
import numpy as np
import torch
from torch.nn.utils import clip_grad_norm_
import wandb
from rl_algorithms.common.buffer.priortized_replay_buffer import PrioritizedReplayBuffer
from rl_algorithms.common.buffer.replay_buffer import ReplayBuffer
import rl_algorithms.common.helper_functions as common_utils
from rl_algorithms.dqn.agent import DQNAgent
from rl_algorithms.registry import AGENTS
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@AGENTS.register_module
class DQfDAgent(DQNAgent):
"""DQN interacting with environment.
Attribute:
memory (PrioritizedReplayBuffer): replay memory
"""
# pylint: disable=attribute-defined-outside-init
def _initialize(self):
"""Initialize non-common things."""
if not self.args.test:
# load demo replay memory
demos = self._load_demos()
if self.use_n_step:
demos, demos_n_step = common_utils.get_n_step_info_from_demo(
demos, self.hyper_params.n_step, self.hyper_params.gamma
)
self.memory_n = ReplayBuffer(
buffer_size=self.hyper_params.buffer_size,
n_step=self.hyper_params.n_step,
gamma=self.hyper_params.gamma,
demo=demos_n_step,
)
# replay memory
self.memory = PrioritizedReplayBuffer(
self.hyper_params.buffer_size,
self.hyper_params.batch_size,
demo=demos,
alpha=self.hyper_params.per_alpha,
epsilon_d=self.hyper_params.per_eps_demo,
)
def _load_demos(self) -> list:
"""Load expert's demonstrations."""
# load demo replay memory
with open(self.args.demo_path, "rb") as f:
demos = pickle.load(f)
return demos
def update_model(self) -> Tuple[torch.Tensor, ...]:
"""Train the model after each episode."""
experiences_1 = self.memory.sample()
weights, indices, eps_d = experiences_1[-3:]
actions = experiences_1[1]
# 1 step loss
gamma = self.hyper_params.gamma
dq_loss_element_wise, q_values = self._get_dqn_loss(experiences_1, gamma)
dq_loss = torch.mean(dq_loss_element_wise * weights)
# n step loss
if self.use_n_step:
experiences_n = self.memory_n.sample(indices)
gamma = self.hyper_params.gamma ** self.hyper_params.n_step
dq_loss_n_element_wise, q_values_n = self._get_dqn_loss(
experiences_n, gamma
)
# to update loss and priorities
q_values = 0.5 * (q_values + q_values_n)
dq_loss_element_wise += dq_loss_n_element_wise * self.hyper_params.lambda1
dq_loss = torch.mean(dq_loss_element_wise * weights)
# supervised loss using demo for only demo transitions
demo_idxs = np.where(eps_d != 0.0)
n_demo = demo_idxs[0].size
if n_demo != 0: # if 1 or more demos are sampled
# get margin for each demo transition
action_idxs = actions[demo_idxs].long()
margin = torch.ones(q_values.size()) * self.hyper_params.margin
margin[demo_idxs, action_idxs] = 0.0 # demo actions have 0 margins
margin = margin.to(device)
# calculate supervised loss
demo_q_values = q_values[demo_idxs, action_idxs].squeeze()
supervised_loss = torch.max(q_values + margin, dim=-1)[0]
supervised_loss = supervised_loss[demo_idxs] - demo_q_values
supervised_loss = torch.mean(supervised_loss) * self.hyper_params.lambda2
else: # no demo sampled
supervised_loss = torch.zeros(1, device=device)
# q_value regularization
q_regular = torch.norm(q_values, 2).mean() * self.hyper_params.w_q_reg
# total loss
loss = dq_loss + supervised_loss + q_regular
# train dqn
self.dqn_optim.zero_grad()
loss.backward()
clip_grad_norm_(self.dqn.parameters(), self.hyper_params.gradient_clip)
self.dqn_optim.step()
# update target networks
common_utils.soft_update(self.dqn, self.dqn_target, self.hyper_params.tau)
# update priorities in PER
loss_for_prior = dq_loss_element_wise.detach().cpu().numpy().squeeze()
new_priorities = loss_for_prior + self.hyper_params.per_eps
new_priorities += eps_d
self.memory.update_priorities(indices, new_priorities)
# increase beta
fraction = min(float(self.i_episode) / self.args.episode_num, 1.0)
self.per_beta: float = self.per_beta + fraction * (1.0 - self.per_beta)
if self.hyper_params.use_noisy_net:
self.dqn.reset_noise()
self.dqn_target.reset_noise()
return (
loss.item(),
dq_loss.item(),
supervised_loss.item(),
q_values.mean().item(),
n_demo,
)
def write_log(self, log_value: tuple):
"""Write log about loss and score"""
i, avg_loss, score, avg_time_cost = log_value
print(
"[INFO] episode %d, episode step: %d, total step: %d, total score: %f\n"
"epsilon: %f, total loss: %f, dq loss: %f, supervised loss: %f\n"
"avg q values: %f, demo num in minibatch: %d (spent %.6f sec/step)\n"
% (
i,
self.episode_step,
self.total_step,
score,
self.epsilon,
avg_loss[0],
avg_loss[1],
avg_loss[2],
avg_loss[3],
avg_loss[4],
avg_time_cost,
)
)
if self.args.log:
wandb.log(
{
"score": score,
"epsilon": self.epsilon,
"total loss": avg_loss[0],
"dq loss": avg_loss[1],
"supervised loss": avg_loss[2],
"avg q values": avg_loss[3],
"demo num in minibatch": avg_loss[4],
"time per each step": avg_time_cost,
}
)
def pretrain(self):
"""Pretraining steps."""
pretrain_loss = list()
pretrain_step = self.hyper_params.pretrain_step
print("[INFO] Pre-Train %d step." % pretrain_step)
for i_step in range(1, pretrain_step + 1):
t_begin = time.time()
loss = self.update_model()
t_end = time.time()
pretrain_loss.append(loss) # for logging
# logging
if i_step == 1 or i_step % 100 == 0:
avg_loss = np.vstack(pretrain_loss).mean(axis=0)
pretrain_loss.clear()
log_value = (0, avg_loss, 0.0, t_end - t_begin)
self.write_log(log_value)
print("[INFO] Pre-Train Complete!\n") | en | 0.71721 | # -*- coding: utf-8 -*- DQfD agent using demo agent for episodic tasks in OpenAI Gym. - Author: <NAME>, <NAME> - Contact: <EMAIL>, <EMAIL> - Paper: https://arxiv.org/pdf/1704.03732.pdf (DQfD) DQN interacting with environment. Attribute: memory (PrioritizedReplayBuffer): replay memory # pylint: disable=attribute-defined-outside-init Initialize non-common things. # load demo replay memory # replay memory Load expert's demonstrations. # load demo replay memory Train the model after each episode. # 1 step loss # n step loss # to update loss and priorities # supervised loss using demo for only demo transitions # if 1 or more demos are sampled # get margin for each demo transition # demo actions have 0 margins # calculate supervised loss # no demo sampled # q_value regularization # total loss # train dqn # update target networks # update priorities in PER # increase beta Write log about loss and score Pretraining steps. # for logging # logging | 2.054038 | 2 |
copm_spider/se/se/spiders/sogou.py | tzattack/public_opinion_analysis | 0 | 6631722 | <reponame>tzattack/public_opinion_analysis
import scrapy
import datetime
import urllib
import codecs
import re
from se.items import SogouItem
import pytz
class SogouSpider(scrapy.Spider):
name = "sogou"
count = 0
mediaName = "搜狗"
def __init__(self, page='20', *args, **kwargs):
super(SogouSpider, self).__init__(*args, **kwargs)
self.keyword = []
self.page = int(page)
self.count = 0
@classmethod
def _search_query(cls, query):
"""
搜狗搜索的规则如下:
query: 指定搜索词
"""
url = "https://www.sogou.com/web?query=%s"
params = query
url = url % params
return url
@classmethod
def _extract_title(cls, titles):
if titles is None: return
results = []
for item in titles:
if item is None:
item = ''
else:
item = item.replace('\n', '')
item = item[item.index('>') + 1:item.rindex('<')]
results.append(item)
return results
@classmethod
def _extract_date(cls, dates):
if dates is None: return
dates = [
item.replace('\n', '')
for item in dates
]
res = []
for date in dates:
if date is None or date == '':
res.append('')
continue
date = date.split()[-1]
m = re.match(r'^((19|20)\d\d)-(0?[1-9]|1[012])-(0?[1-9]|[12][0-9]|3[01])$',
date)
if m is not None:
res.append(date)
else:
res.append(date)
return res
@classmethod
def _extract_abstract(cls, abstracts):
if abstracts is None: return
results = []
for item in abstracts:
if item is None:
item = ''
else:
item = item.replace('\n', '')
item = item[item.index('>') + 1:item.rindex('<')]
results.append(item)
return results
@classmethod
def _filter_tags(cls, htmlstr):
# 先过滤CDATA
re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I) # 匹配CDATA
re_script = re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I) # Script
re_style = re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I) # style
re_br = re.compile('<br\s*?/?>') # 处理换行
re_h = re.compile('</?\w+[^>]*>') # HTML标签
re_comment = re.compile('<!--[^>]*-->') # HTML注释
s = re_cdata.sub('', htmlstr) # 去掉CDATA
s = re_script.sub('', s) # 去掉SCRIPT
s = re_style.sub('', s) # 去掉style
s = re_br.sub('\n', s) # 将br转换为换行
s = re_h.sub('', s) # 去掉HTML 标签
s = re_comment.sub('', s) # 去掉HTML注释
# 去掉多余的空行
blank_line = re.compile('\n+')
s = blank_line.sub('\n', s)
return s
def start_requests(self):
# q3 = ['雄安市民服务中心', '雄安市民中心', '雄安中海物业', '雄安中海']
# q4 = ['指数']
# q4 = ['指数', '股份', 'A股', 'H股', '板块', '开盘', '楼市', '成交', '售楼', '公寓', '住宅', '首付', '置业', '户型', '在售', '销售', '标书', '中海油']
self.tz = pytz.timezone('Asia/Shanghai')
for word in self.keyword:
query = f'"{word}"' + '+垃圾+恶心+投诉+举报+可恶+差劲+烂'
url = SogouSpider._search_query(query=query)
yield scrapy.Request(url=url, meta={'keyword': word}, callback=self.parse)
# pass
# urls = [
# 'http://quotes.toscrape.com/page/1/',
# 'http://quotes.toscrape.com/page/2/',
# ]
# for url in urls:
# yield scrapy.Request(url=url, callback=self.parse)
def get_ctime(self, timestr):
if timestr == '' or timestr == '-':
return ''
if timestr.find('-') != -1:
return timestr
else:
# 现在时间
d = datetime.datetime.now(self.tz)
# N小时前
if '小时' in timestr:
hour = int(timestr[0:timestr.index('小时')])
delta = datetime.timedelta(hours=hour)
d = d - delta
# N天前
elif '天' in timestr:
day = int(timestr[0:timestr.index('天')])
delta = datetime.timedelta(days=day)
d = d - delta
else:
d = d
timestr = d.strftime('%Y-%m-%d')
return timestr
def parse(self, response):
urls = response.css('h3.vrTitle').css('a::attr(href)').extract()
titles = response.css('h3.vrTitle').css('a').extract()
dates = response.css('div.fb').css('cite::text').extract()
abstracts = response.css('p.str_info').extract()
# titles = [title.replace('\n\t', '') for title in titles]
# dates = [date.replace('\n\t', '') for date in dates]
# abstracts = [abstract.replace('\n\t', '') for abstract in abstracts]
titles = SogouSpider._extract_title(titles)
titles = [
SogouSpider._filter_tags(title)
for title in titles
]
dates = SogouSpider._extract_date(dates)
abstracts = SogouSpider._extract_abstract(abstracts)
abstracts = [
SogouSpider._filter_tags(abstract)
for abstract in abstracts
]
for url, title, date, abstract in zip(urls, titles, dates, abstracts):
item = SogouItem()
item["url"] = "http://www.sogou.com" + url
item["title"] = title
item["date"] = self.get_ctime(date)
item["abstract"] = abstract
item["keyword"] = response.meta['keyword']
item["mediaName"] = self.mediaName
content = item['title'] + ' ' + item['abstract']
if content.strip() == '' or content.find(item['keyword']) == -1:
pass
else:
filtered = False
for word in self.filterword:
if content.find(word) != -1:
filtered = True
break
if not filtered:
yield scrapy.Request(url=item["url"], meta={"item": item}, callback=self.parse_url)
# next page
# next_page = response.css('div#page a::attr(href)').extract()[-1]
next_page = response.css('a#sogou_next::attr(href)').extract_first()
if next_page is None:
return
else:
self.count += 1
if self.count >= self.page:
self.log("Crawled %s pages, stopped" % self.count)
return
else:
yield scrapy.Request(url=response.urljoin(next_page), meta=response.meta, callback=self.parse)
# yield scrapy.Request(url=response.urljoin(next_page), callback=self.parse)
# self.count += 1
# if self.count == 10:
# return
# else:
# yield scrapy.Request(url=response.urljoin(next_page), callback=self.parse)
def parse_url(self, response):
item = response.meta["item"]
item["url"] = response.url
yield item
| import scrapy
import datetime
import urllib
import codecs
import re
from se.items import SogouItem
import pytz
class SogouSpider(scrapy.Spider):
name = "sogou"
count = 0
mediaName = "搜狗"
def __init__(self, page='20', *args, **kwargs):
super(SogouSpider, self).__init__(*args, **kwargs)
self.keyword = []
self.page = int(page)
self.count = 0
@classmethod
def _search_query(cls, query):
"""
搜狗搜索的规则如下:
query: 指定搜索词
"""
url = "https://www.sogou.com/web?query=%s"
params = query
url = url % params
return url
@classmethod
def _extract_title(cls, titles):
if titles is None: return
results = []
for item in titles:
if item is None:
item = ''
else:
item = item.replace('\n', '')
item = item[item.index('>') + 1:item.rindex('<')]
results.append(item)
return results
@classmethod
def _extract_date(cls, dates):
if dates is None: return
dates = [
item.replace('\n', '')
for item in dates
]
res = []
for date in dates:
if date is None or date == '':
res.append('')
continue
date = date.split()[-1]
m = re.match(r'^((19|20)\d\d)-(0?[1-9]|1[012])-(0?[1-9]|[12][0-9]|3[01])$',
date)
if m is not None:
res.append(date)
else:
res.append(date)
return res
@classmethod
def _extract_abstract(cls, abstracts):
if abstracts is None: return
results = []
for item in abstracts:
if item is None:
item = ''
else:
item = item.replace('\n', '')
item = item[item.index('>') + 1:item.rindex('<')]
results.append(item)
return results
@classmethod
def _filter_tags(cls, htmlstr):
# 先过滤CDATA
re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I) # 匹配CDATA
re_script = re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I) # Script
re_style = re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I) # style
re_br = re.compile('<br\s*?/?>') # 处理换行
re_h = re.compile('</?\w+[^>]*>') # HTML标签
re_comment = re.compile('<!--[^>]*-->') # HTML注释
s = re_cdata.sub('', htmlstr) # 去掉CDATA
s = re_script.sub('', s) # 去掉SCRIPT
s = re_style.sub('', s) # 去掉style
s = re_br.sub('\n', s) # 将br转换为换行
s = re_h.sub('', s) # 去掉HTML 标签
s = re_comment.sub('', s) # 去掉HTML注释
# 去掉多余的空行
blank_line = re.compile('\n+')
s = blank_line.sub('\n', s)
return s
def start_requests(self):
# q3 = ['雄安市民服务中心', '雄安市民中心', '雄安中海物业', '雄安中海']
# q4 = ['指数']
# q4 = ['指数', '股份', 'A股', 'H股', '板块', '开盘', '楼市', '成交', '售楼', '公寓', '住宅', '首付', '置业', '户型', '在售', '销售', '标书', '中海油']
self.tz = pytz.timezone('Asia/Shanghai')
for word in self.keyword:
query = f'"{word}"' + '+垃圾+恶心+投诉+举报+可恶+差劲+烂'
url = SogouSpider._search_query(query=query)
yield scrapy.Request(url=url, meta={'keyword': word}, callback=self.parse)
# pass
# urls = [
# 'http://quotes.toscrape.com/page/1/',
# 'http://quotes.toscrape.com/page/2/',
# ]
# for url in urls:
# yield scrapy.Request(url=url, callback=self.parse)
def get_ctime(self, timestr):
if timestr == '' or timestr == '-':
return ''
if timestr.find('-') != -1:
return timestr
else:
# 现在时间
d = datetime.datetime.now(self.tz)
# N小时前
if '小时' in timestr:
hour = int(timestr[0:timestr.index('小时')])
delta = datetime.timedelta(hours=hour)
d = d - delta
# N天前
elif '天' in timestr:
day = int(timestr[0:timestr.index('天')])
delta = datetime.timedelta(days=day)
d = d - delta
else:
d = d
timestr = d.strftime('%Y-%m-%d')
return timestr
def parse(self, response):
urls = response.css('h3.vrTitle').css('a::attr(href)').extract()
titles = response.css('h3.vrTitle').css('a').extract()
dates = response.css('div.fb').css('cite::text').extract()
abstracts = response.css('p.str_info').extract()
# titles = [title.replace('\n\t', '') for title in titles]
# dates = [date.replace('\n\t', '') for date in dates]
# abstracts = [abstract.replace('\n\t', '') for abstract in abstracts]
titles = SogouSpider._extract_title(titles)
titles = [
SogouSpider._filter_tags(title)
for title in titles
]
dates = SogouSpider._extract_date(dates)
abstracts = SogouSpider._extract_abstract(abstracts)
abstracts = [
SogouSpider._filter_tags(abstract)
for abstract in abstracts
]
for url, title, date, abstract in zip(urls, titles, dates, abstracts):
item = SogouItem()
item["url"] = "http://www.sogou.com" + url
item["title"] = title
item["date"] = self.get_ctime(date)
item["abstract"] = abstract
item["keyword"] = response.meta['keyword']
item["mediaName"] = self.mediaName
content = item['title'] + ' ' + item['abstract']
if content.strip() == '' or content.find(item['keyword']) == -1:
pass
else:
filtered = False
for word in self.filterword:
if content.find(word) != -1:
filtered = True
break
if not filtered:
yield scrapy.Request(url=item["url"], meta={"item": item}, callback=self.parse_url)
# next page
# next_page = response.css('div#page a::attr(href)').extract()[-1]
next_page = response.css('a#sogou_next::attr(href)').extract_first()
if next_page is None:
return
else:
self.count += 1
if self.count >= self.page:
self.log("Crawled %s pages, stopped" % self.count)
return
else:
yield scrapy.Request(url=response.urljoin(next_page), meta=response.meta, callback=self.parse)
# yield scrapy.Request(url=response.urljoin(next_page), callback=self.parse)
# self.count += 1
# if self.count == 10:
# return
# else:
# yield scrapy.Request(url=response.urljoin(next_page), callback=self.parse)
def parse_url(self, response):
item = response.meta["item"]
item["url"] = response.url
yield item | en | 0.233614 | 搜狗搜索的规则如下: query: 指定搜索词 # 先过滤CDATA # 匹配CDATA # Script # style # 处理换行 # HTML标签 # HTML注释 # 去掉CDATA # 去掉SCRIPT # 去掉style # 将br转换为换行 # 去掉HTML 标签 # 去掉HTML注释 # 去掉多余的空行 # q3 = ['雄安市民服务中心', '雄安市民中心', '雄安中海物业', '雄安中海'] # q4 = ['指数'] # q4 = ['指数', '股份', 'A股', 'H股', '板块', '开盘', '楼市', '成交', '售楼', '公寓', '住宅', '首付', '置业', '户型', '在售', '销售', '标书', '中海油'] # pass # urls = [ # 'http://quotes.toscrape.com/page/1/', # 'http://quotes.toscrape.com/page/2/', # ] # for url in urls: # yield scrapy.Request(url=url, callback=self.parse) # 现在时间 # N小时前 # N天前 # titles = [title.replace('\n\t', '') for title in titles] # dates = [date.replace('\n\t', '') for date in dates] # abstracts = [abstract.replace('\n\t', '') for abstract in abstracts] # next page # next_page = response.css('div#page a::attr(href)').extract()[-1] #sogou_next::attr(href)').extract_first() # yield scrapy.Request(url=response.urljoin(next_page), callback=self.parse) # self.count += 1 # if self.count == 10: # return # else: # yield scrapy.Request(url=response.urljoin(next_page), callback=self.parse) | 2.791259 | 3 |
examples/scaling-out/strategy.py | simongarisch/pinkfish | 1 | 6631723 | """
stategy
---------
"""
# use future imports for python 3.x forward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# other imports
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from talib.abstract import *
# project imports
import pinkfish as pf
class Strategy():
def __init__(self, symbol, capital, start, end, use_adj=False,
period=7, max_positions=4, sp500_filter=True,
slippage_per_trade=0, commissions_per_trade=0):
self._symbol = symbol
self._capital = capital
self._start = start
self._end = end
self._use_adj = use_adj
self._period = period
self._max_positions = max_positions
self._sp500_filter = sp500_filter
self._slippage_per_trade = slippage_per_trade
self._commissions_per_trade = commissions_per_trade
self._positions = 0
def _algo(self):
""" Algo:
1. The SPY is above its 200-day moving average
2. The SPY closes at a X-day low, buy with full capital.
3. If the SPY closes at a X-day high, sell some.
If it sets further highs, sell some more, etc...
4. If you have free cash, use it all when fresh lows are set.
"""
self._tlog.cash = self._capital
start_flag = True
end_flag = False
stop_loss = 0
for i, row in enumerate(self._ts.itertuples()):
date = row.Index.to_pydatetime()
high = row.high
low = row.low
close = row.close
sma200 = row.sma200
period_high = row.period_high
period_low = row.period_low
sp500_close = row.sp500_close
sp500_sma = row.sp500_sma
end_flag = True if (i == len(self._ts) - 1) else False
trade_state = None
if pd.isnull(sma200) or date < self._start:
continue
elif start_flag:
start_flag = False
# set start and end
self._start = date
self._end = self._ts.index[-1]
# buy
if ((sp500_close > sp500_sma or not self._sp500_filter)
and close > sma200
and close == period_low
and not end_flag):
# calc number of shares
shares = self._tlog.calc_shares(price=close, cash=self._tlog.cash)
# if we have enough cash to buy any shares, then buy them
if shares > 0:
# enter buy in trade log
self._tlog.enter_trade(date, close, shares)
trade_state = pf.TradeState.OPEN
#print("{0} BUY {1} {2} @ {3:.2f}".format(
# date, shares, self._symbol, close))
# set stop loss
stop_loss = 0*close
self._positions = self._max_positions
else:
trade_state = pf.TradeState.HOLD
# sell
#elif (self._tlog.num_open_trades() > 0
# and ((self._sp500_filter and sp500_close < sp500_sma)
# or close == period_high
# or low < stop_loss
# or end_flag)):
elif (self._tlog.num_open_trades() > 0
and (close == period_high
or low < stop_loss
or end_flag)):
if end_flag:
shares = self._tlog.shares
else:
shares = int(self._tlog.shares / (self._positions))
self._positions -= 1
# enter sell in trade log
shares = self._tlog.exit_trade(date, close, shares)
trade_state = pf.TradeState.CLOSE
#print("{0} SELL {1} {2} @ {3:.2f}".format(
# date, shares, self._symbol, close))
# hold
else:
trade_state = pf.TradeState.HOLD
# record daily balance
self._dbal.append(date, high, low, close,
self._tlog.shares, self._tlog.cash,
trade_state)
def run(self):
self._ts = pf.fetch_timeseries(self._symbol)
self._ts = pf.select_tradeperiod(self._ts, self._start,
self._end, use_adj=False)
# Add technical indicator: 200 day sma
sma200 = SMA(self._ts, timeperiod=200)
self._ts['sma200'] = sma200
# Add technical indicator: X day high, and X day low
period_high = pd.Series(self._ts.close).rolling(self._period).max()
period_low = pd.Series(self._ts.close).rolling(self._period).min()
self._ts['period_high'] = period_high
self._ts['period_low'] = period_low
self._tlog = pf.TradeLog()
self._dbal = pf.DailyBal()
# add S&P500 200 sma
sp500 = pf.fetch_timeseries('^GSPC')
sp500 = pf.select_tradeperiod(sp500, self._start,
self._end, False)
self._ts['sp500_close'] = sp500['close']
sp500_sma = SMA(sp500, timeperiod=200)
self._ts['sp500_sma'] = sp500_sma
self._algo()
def get_logs(self):
""" return DataFrames """
tlog = self._tlog.get_log()
dbal = self._dbal.get_log()
return tlog, dbal
def stats(self):
tlog, dbal = self.get_logs()
stats = pf.stats(self._ts, tlog, dbal,
self._start, self._end, self._capital)
return stats
def summary(strategies, *metrics):
""" Stores stats summary in a DataFrame.
stats() must be called before calling this function """
index = []
columns = strategies.index
data = []
# add metrics
for metric in metrics:
index.append(metric)
data.append([strategy.stats[metric] for strategy in strategies])
df = pd.DataFrame(data, columns=columns, index=index)
return df
def plot_bar_graph(df, metric):
""" Plot Bar Graph: Strategy
stats() must be called before calling this function """
df = df.loc[[metric]]
df = df.transpose()
fig = plt.figure()
axes = fig.add_subplot(111, ylabel=metric)
df.plot(kind='bar', ax=axes, legend=False)
axes.set_xticklabels(df.index, rotation=0)
| """
stategy
---------
"""
# use future imports for python 3.x forward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# other imports
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from talib.abstract import *
# project imports
import pinkfish as pf
class Strategy():
def __init__(self, symbol, capital, start, end, use_adj=False,
period=7, max_positions=4, sp500_filter=True,
slippage_per_trade=0, commissions_per_trade=0):
self._symbol = symbol
self._capital = capital
self._start = start
self._end = end
self._use_adj = use_adj
self._period = period
self._max_positions = max_positions
self._sp500_filter = sp500_filter
self._slippage_per_trade = slippage_per_trade
self._commissions_per_trade = commissions_per_trade
self._positions = 0
def _algo(self):
""" Algo:
1. The SPY is above its 200-day moving average
2. The SPY closes at a X-day low, buy with full capital.
3. If the SPY closes at a X-day high, sell some.
If it sets further highs, sell some more, etc...
4. If you have free cash, use it all when fresh lows are set.
"""
self._tlog.cash = self._capital
start_flag = True
end_flag = False
stop_loss = 0
for i, row in enumerate(self._ts.itertuples()):
date = row.Index.to_pydatetime()
high = row.high
low = row.low
close = row.close
sma200 = row.sma200
period_high = row.period_high
period_low = row.period_low
sp500_close = row.sp500_close
sp500_sma = row.sp500_sma
end_flag = True if (i == len(self._ts) - 1) else False
trade_state = None
if pd.isnull(sma200) or date < self._start:
continue
elif start_flag:
start_flag = False
# set start and end
self._start = date
self._end = self._ts.index[-1]
# buy
if ((sp500_close > sp500_sma or not self._sp500_filter)
and close > sma200
and close == period_low
and not end_flag):
# calc number of shares
shares = self._tlog.calc_shares(price=close, cash=self._tlog.cash)
# if we have enough cash to buy any shares, then buy them
if shares > 0:
# enter buy in trade log
self._tlog.enter_trade(date, close, shares)
trade_state = pf.TradeState.OPEN
#print("{0} BUY {1} {2} @ {3:.2f}".format(
# date, shares, self._symbol, close))
# set stop loss
stop_loss = 0*close
self._positions = self._max_positions
else:
trade_state = pf.TradeState.HOLD
# sell
#elif (self._tlog.num_open_trades() > 0
# and ((self._sp500_filter and sp500_close < sp500_sma)
# or close == period_high
# or low < stop_loss
# or end_flag)):
elif (self._tlog.num_open_trades() > 0
and (close == period_high
or low < stop_loss
or end_flag)):
if end_flag:
shares = self._tlog.shares
else:
shares = int(self._tlog.shares / (self._positions))
self._positions -= 1
# enter sell in trade log
shares = self._tlog.exit_trade(date, close, shares)
trade_state = pf.TradeState.CLOSE
#print("{0} SELL {1} {2} @ {3:.2f}".format(
# date, shares, self._symbol, close))
# hold
else:
trade_state = pf.TradeState.HOLD
# record daily balance
self._dbal.append(date, high, low, close,
self._tlog.shares, self._tlog.cash,
trade_state)
def run(self):
self._ts = pf.fetch_timeseries(self._symbol)
self._ts = pf.select_tradeperiod(self._ts, self._start,
self._end, use_adj=False)
# Add technical indicator: 200 day sma
sma200 = SMA(self._ts, timeperiod=200)
self._ts['sma200'] = sma200
# Add technical indicator: X day high, and X day low
period_high = pd.Series(self._ts.close).rolling(self._period).max()
period_low = pd.Series(self._ts.close).rolling(self._period).min()
self._ts['period_high'] = period_high
self._ts['period_low'] = period_low
self._tlog = pf.TradeLog()
self._dbal = pf.DailyBal()
# add S&P500 200 sma
sp500 = pf.fetch_timeseries('^GSPC')
sp500 = pf.select_tradeperiod(sp500, self._start,
self._end, False)
self._ts['sp500_close'] = sp500['close']
sp500_sma = SMA(sp500, timeperiod=200)
self._ts['sp500_sma'] = sp500_sma
self._algo()
def get_logs(self):
""" return DataFrames """
tlog = self._tlog.get_log()
dbal = self._dbal.get_log()
return tlog, dbal
def stats(self):
tlog, dbal = self.get_logs()
stats = pf.stats(self._ts, tlog, dbal,
self._start, self._end, self._capital)
return stats
def summary(strategies, *metrics):
""" Stores stats summary in a DataFrame.
stats() must be called before calling this function """
index = []
columns = strategies.index
data = []
# add metrics
for metric in metrics:
index.append(metric)
data.append([strategy.stats[metric] for strategy in strategies])
df = pd.DataFrame(data, columns=columns, index=index)
return df
def plot_bar_graph(df, metric):
""" Plot Bar Graph: Strategy
stats() must be called before calling this function """
df = df.loc[[metric]]
df = df.transpose()
fig = plt.figure()
axes = fig.add_subplot(111, ylabel=metric)
df.plot(kind='bar', ax=axes, legend=False)
axes.set_xticklabels(df.index, rotation=0)
| en | 0.74717 | stategy --------- # use future imports for python 3.x forward compatibility # other imports # project imports Algo: 1. The SPY is above its 200-day moving average 2. The SPY closes at a X-day low, buy with full capital. 3. If the SPY closes at a X-day high, sell some. If it sets further highs, sell some more, etc... 4. If you have free cash, use it all when fresh lows are set. # set start and end # buy # calc number of shares # if we have enough cash to buy any shares, then buy them # enter buy in trade log #print("{0} BUY {1} {2} @ {3:.2f}".format( # date, shares, self._symbol, close)) # set stop loss # sell #elif (self._tlog.num_open_trades() > 0 # and ((self._sp500_filter and sp500_close < sp500_sma) # or close == period_high # or low < stop_loss # or end_flag)): # enter sell in trade log #print("{0} SELL {1} {2} @ {3:.2f}".format( # date, shares, self._symbol, close)) # hold # record daily balance # Add technical indicator: 200 day sma # Add technical indicator: X day high, and X day low # add S&P500 200 sma return DataFrames Stores stats summary in a DataFrame. stats() must be called before calling this function # add metrics Plot Bar Graph: Strategy stats() must be called before calling this function | 2.492517 | 2 |
vespene/plugins/organizations/github.py | Conan-Kudo/vespene | 11 | 6631724 | # Copyright 2018, <NAME> LLC
# License: Apache License Version 2.0
# -------------------------------------------------------------------------
# github.py - plumbing for supporting GitHub within the Vespene
# organizational imports feature
# --------------------------------------------------------------------------
import os
import shlex
from django.db.models import Q
from github import Github
from vespene.common.logger import Logger
from vespene.workers import commands
LOG = Logger()
class Plugin(object):
def __init__(self, parameters=None):
self.parameters = parameters
if parameters is None:
self.parameters = {}
def get_handle(self, organization):
scm_login = organization.scm_login
if organization.api_endpoint:
g = Github(scm_login.username, scm_login.password(), base_url=organization.api_endpoint)
else:
g = Github(scm_login.username, scm_login.get_password())
return g
def find_all_repos(self, organization, build):
handle = self.get_handle(organization)
org = handle.get_organization(organization.organization_identifier)
repos = org.get_repos(type='all')
results = []
for repo in repos:
results.append(repo.clone_url)
return results
def clone_repo(self, organization, build, repo, count):
# much of code is borrowed from plugins.scm.git - but adapted enough that
# sharing is probably not worthwhile. For instance, this doesn't have
# to deal with SSH checkouts.
build.append_message("cloning repo...")
repo = self.fix_scm_url(repo, organization.scm_login.username)
answer_file = commands.answer_file(organization.scm_login.get_password())
ask_pass = " --config core.askpass=\"<PASSWORD>\"" % answer_file
# TODO: add --depth 1 to git.py checkouts as well
branch_spec = "--depth 1 --single-branch "
clone_path = os.path.join(build.working_dir, str(count))
try:
# run it
cmd = "git clone %s %s %s %s" % (shlex.quote(repo), clone_path, ask_pass, branch_spec)
output = commands.execute_command(build, cmd, output_log=False, message_log=True)
finally:
# delete the answer file if we had one
os.remove(answer_file)
return clone_path
def fix_scm_url(self, repo, username):
# Adds the username and password into the repo URL before checkout, if possible
# This isn't needed if we are using SSH keys, and that's already handled by SshManager
for prefix in [ 'https://', 'http://' ]:
if repo.startswith(prefix):
repo = repo.replace(prefix, "")
return "%s%s@%s" % (prefix, username, repo)
return repo
| # Copyright 2018, <NAME> LLC
# License: Apache License Version 2.0
# -------------------------------------------------------------------------
# github.py - plumbing for supporting GitHub within the Vespene
# organizational imports feature
# --------------------------------------------------------------------------
import os
import shlex
from django.db.models import Q
from github import Github
from vespene.common.logger import Logger
from vespene.workers import commands
LOG = Logger()
class Plugin(object):
def __init__(self, parameters=None):
self.parameters = parameters
if parameters is None:
self.parameters = {}
def get_handle(self, organization):
scm_login = organization.scm_login
if organization.api_endpoint:
g = Github(scm_login.username, scm_login.password(), base_url=organization.api_endpoint)
else:
g = Github(scm_login.username, scm_login.get_password())
return g
def find_all_repos(self, organization, build):
handle = self.get_handle(organization)
org = handle.get_organization(organization.organization_identifier)
repos = org.get_repos(type='all')
results = []
for repo in repos:
results.append(repo.clone_url)
return results
def clone_repo(self, organization, build, repo, count):
# much of code is borrowed from plugins.scm.git - but adapted enough that
# sharing is probably not worthwhile. For instance, this doesn't have
# to deal with SSH checkouts.
build.append_message("cloning repo...")
repo = self.fix_scm_url(repo, organization.scm_login.username)
answer_file = commands.answer_file(organization.scm_login.get_password())
ask_pass = " --config core.askpass=\"<PASSWORD>\"" % answer_file
# TODO: add --depth 1 to git.py checkouts as well
branch_spec = "--depth 1 --single-branch "
clone_path = os.path.join(build.working_dir, str(count))
try:
# run it
cmd = "git clone %s %s %s %s" % (shlex.quote(repo), clone_path, ask_pass, branch_spec)
output = commands.execute_command(build, cmd, output_log=False, message_log=True)
finally:
# delete the answer file if we had one
os.remove(answer_file)
return clone_path
def fix_scm_url(self, repo, username):
# Adds the username and password into the repo URL before checkout, if possible
# This isn't needed if we are using SSH keys, and that's already handled by SshManager
for prefix in [ 'https://', 'http://' ]:
if repo.startswith(prefix):
repo = repo.replace(prefix, "")
return "%s%s@%s" % (prefix, username, repo)
return repo
| en | 0.866404 | # Copyright 2018, <NAME> LLC # License: Apache License Version 2.0 # ------------------------------------------------------------------------- # github.py - plumbing for supporting GitHub within the Vespene # organizational imports feature # -------------------------------------------------------------------------- # much of code is borrowed from plugins.scm.git - but adapted enough that # sharing is probably not worthwhile. For instance, this doesn't have # to deal with SSH checkouts. # TODO: add --depth 1 to git.py checkouts as well # run it # delete the answer file if we had one # Adds the username and password into the repo URL before checkout, if possible # This isn't needed if we are using SSH keys, and that's already handled by SshManager | 2.150689 | 2 |
pydbticket/kci.py | em0lar/pydbticket | 2 | 6631725 | import json
from typing import Union
import jwt
import pytz
import requests
from pydbticket.order import Leg, Ticket
def checkin(ticket: Ticket, leg: Leg,
coach: Union[int, str], seat: Union[int, str]):
"""
Makes a SelfCheckIn-Request
:param ticket: A valid ticket for the leg
:param leg: The leg of the order, wich should be checked in
:param coach: The Waggon Number
:param seat: The Seat Number
:return: The JSON-Body of the response
"""
url = 'https://kanalbackend-navigator-prd-default-kci-tck.dbv.noncd.db.de/sci_sci'
request_headers = {
'User-Agent': 'DB Navigator Beta/20.08.s22.30 (Android REL 28)',
'Content-Type': 'application/json; charset=utf-8'
}
jwt_message = {
"zug": {
"nr": leg.number,
"gat": leg.kind
},
"ticket": {
"tkey": ticket.key,
"issuer": ticket.issuer
},
"version": 1
}
token = gen_token(jwt_message)
body = {
"sci_sci_rq": {
"anz_kind": 0,
"anz_res": 0,
"ticket": {
"reisender_nachname": ticket.lastname,
"ot_nummer": ticket.serial_number,
"bcb_erforderlich": "N",
"tkey": ticket.key,
"issuer": ticket.issuer,
"reisender_vorname": ticket.forename,
},
"zug": {
"nr": leg.number,
"gat": leg.kind,
},
"kl": 2,
"token": token,
"ver": 1,
"bcs": [],
"anz_erw": 1,
"abfahrt": {
"ebhf_nr": leg.departure.station_number,
"zeit": pytz.UTC.normalize(
leg.departure.datetime).replace(
tzinfo=None).isoformat() + 'Z',
"ebhf_name": leg.departure.station_name,
"eva_name": leg.departure.station_name,
"eva_nr": leg.departure.station_number},
"ankunft": {
"ebhf_nr": leg.arrival.station_number,
"zeit": pytz.UTC.normalize(
leg.arrival.datetime).replace(
tzinfo=None).isoformat() + 'Z',
"ebhf_name": leg.arrival.station_name,
"eva_name": leg.arrival.station_name,
"eva_nr": leg.arrival.station_number,
},
"bc_rabatts": [],
"plaetze": [
{
"wagennr": int(coach),
"platznr": int(seat),
},
],
},
}
request_body = json.dumps(body)
print(request_body)
response = requests.post(
url,
data=request_body,
headers=request_headers).content
return response
def gen_token(message):
secret = 'nougat20maybe17bonus'
return jwt.encode(message, secret, algorithm='HS256').decode("utf-8")
| import json
from typing import Union
import jwt
import pytz
import requests
from pydbticket.order import Leg, Ticket
def checkin(ticket: Ticket, leg: Leg,
coach: Union[int, str], seat: Union[int, str]):
"""
Makes a SelfCheckIn-Request
:param ticket: A valid ticket for the leg
:param leg: The leg of the order, wich should be checked in
:param coach: The Waggon Number
:param seat: The Seat Number
:return: The JSON-Body of the response
"""
url = 'https://kanalbackend-navigator-prd-default-kci-tck.dbv.noncd.db.de/sci_sci'
request_headers = {
'User-Agent': 'DB Navigator Beta/20.08.s22.30 (Android REL 28)',
'Content-Type': 'application/json; charset=utf-8'
}
jwt_message = {
"zug": {
"nr": leg.number,
"gat": leg.kind
},
"ticket": {
"tkey": ticket.key,
"issuer": ticket.issuer
},
"version": 1
}
token = gen_token(jwt_message)
body = {
"sci_sci_rq": {
"anz_kind": 0,
"anz_res": 0,
"ticket": {
"reisender_nachname": ticket.lastname,
"ot_nummer": ticket.serial_number,
"bcb_erforderlich": "N",
"tkey": ticket.key,
"issuer": ticket.issuer,
"reisender_vorname": ticket.forename,
},
"zug": {
"nr": leg.number,
"gat": leg.kind,
},
"kl": 2,
"token": token,
"ver": 1,
"bcs": [],
"anz_erw": 1,
"abfahrt": {
"ebhf_nr": leg.departure.station_number,
"zeit": pytz.UTC.normalize(
leg.departure.datetime).replace(
tzinfo=None).isoformat() + 'Z',
"ebhf_name": leg.departure.station_name,
"eva_name": leg.departure.station_name,
"eva_nr": leg.departure.station_number},
"ankunft": {
"ebhf_nr": leg.arrival.station_number,
"zeit": pytz.UTC.normalize(
leg.arrival.datetime).replace(
tzinfo=None).isoformat() + 'Z',
"ebhf_name": leg.arrival.station_name,
"eva_name": leg.arrival.station_name,
"eva_nr": leg.arrival.station_number,
},
"bc_rabatts": [],
"plaetze": [
{
"wagennr": int(coach),
"platznr": int(seat),
},
],
},
}
request_body = json.dumps(body)
print(request_body)
response = requests.post(
url,
data=request_body,
headers=request_headers).content
return response
def gen_token(message):
secret = 'nougat20maybe17bonus'
return jwt.encode(message, secret, algorithm='HS256').decode("utf-8")
| en | 0.753648 | Makes a SelfCheckIn-Request :param ticket: A valid ticket for the leg :param leg: The leg of the order, wich should be checked in :param coach: The Waggon Number :param seat: The Seat Number :return: The JSON-Body of the response | 2.806357 | 3 |
generator/__init__.py | universuen/pokemon_GAN | 1 | 6631726 | <filename>generator/__init__.py
from . import *
from .core import Generator
| <filename>generator/__init__.py
from . import *
from .core import Generator
| none | 1 | 1.091133 | 1 |
|
openfda/downloadstats/pipeline.py | FDA/openfda | 388 | 6631727 | #!/usr/bin/python
''' Pipeline for parsing all the access log files (S3 and CloudFront) for dataset downloads and calculating totals.
'''
import glob
import gzip
import json
import os
import re
from os.path import join, dirname
import arrow
import luigi
import pandas as pd
from openfda import common, config, index_util, parallel
from openfda.tasks import AlwaysRunTask
RUN_DIR = dirname(os.path.abspath(__file__))
S3_ACCESS_LOGS_BUCKET = 's3://openfda-logs/download/'
S3_ACCESS_LOGS_DIR = config.data_dir('downloadstats/s3_logs_raw')
S3_STATS_DB_DIR = config.data_dir('downloadstats/s3_stats.db')
S3_ACCESS_LOGS_CUTOFF = arrow.get('2017-03-01')
CF_ACCESS_LOGS_BUCKET = 's3://openfda-splash-logs/download-cf-logs/'
CF_ACCESS_LOGS_DIR = config.data_dir('downloadstats/cf_logs_raw')
CF_STATS_DB_DIR = config.data_dir('downloadstats/cf_stats.db')
TOTAL_STATS_DB_DIR = config.data_dir('downloadstats/total_stats.db')
ENDPOINT_INDEX_MAP = {
'animalandveterinary/event': 'animalandveterinarydrugevent',
'drug/event': 'drugevent',
'drug/label': 'druglabel',
'drug/enforcement': 'drugenforcement',
'drug/ndc': 'ndc',
'drug/drugsfda': 'drugsfda',
'device/enforcement': 'deviceenforcement',
'food/enforcement': 'foodenforcement',
'food/event': 'foodevent',
'device/event': 'deviceevent',
'device/classification': 'deviceclass',
'device/510k': 'deviceclearance',
'device/pma': 'devicepma',
'device/recall': 'devicerecall',
'device/registrationlisting': 'devicereglist',
'device/udi': 'deviceudi',
'device/covid19serology': 'covid19serology',
'other/nsde': 'othernsde',
'other/substance': 'othersubstance',
'tobacco/problem': 'tobaccoproblem'
}
BOT_USER_AGENTS = map(lambda o: o['pattern'], json.loads(open(join(RUN_DIR, 'crawler-user-agents.json'), 'r').read()))
def isBot(ua):
return next((p for p in BOT_USER_AGENTS if re.search(p, ua)), False) != False
class SyncS3AccessLogs(AlwaysRunTask):
def _run(self):
common.cmd(['mkdir', '-p', S3_ACCESS_LOGS_DIR])
common.cmd(['aws',
'--profile=' + config.aws_profile(),
's3',
'sync',
S3_ACCESS_LOGS_BUCKET,
S3_ACCESS_LOGS_DIR])
# Cut off S3 access logs at the point when CF logs became available
for file in glob.glob(join(S3_ACCESS_LOGS_DIR, '*')):
if arrow.get(os.path.split(file)[1][:10]) > S3_ACCESS_LOGS_CUTOFF:
os.remove(file)
def output(self):
return luigi.LocalTarget(S3_ACCESS_LOGS_DIR)
class SyncCFAccessLogs(AlwaysRunTask):
def _run(self):
common.cmd(['mkdir', '-p', CF_ACCESS_LOGS_DIR])
common.cmd(['aws',
'--profile=' + config.aws_profile(),
's3',
'sync',
CF_ACCESS_LOGS_BUCKET,
CF_ACCESS_LOGS_DIR])
def output(self):
return luigi.LocalTarget(CF_ACCESS_LOGS_DIR)
class CFAccessLogsStats(parallel.MRTask):
agg_stats = {}
def requires(self):
return SyncCFAccessLogs()
def output(self):
return luigi.LocalTarget(CF_STATS_DB_DIR)
def mapreduce_inputs(self):
return parallel.Collection.from_glob(join(self.input().path, '*'))
def map(self, log_file, value, output):
stats = {}
df = pd.read_csv(
gzip.open(log_file, 'rb'),
sep='\t', skiprows=(0, 1),
names=['date', 'time', 'edge', 'bytes', 'ip', 'method', 'host', 'uri',
'status', 'referer', 'ua'],
usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
low_memory=False, na_values=[], keep_default_na=False, index_col=False,
engine='c', memory_map=True)
for row in df.itertuples():
if row.method == 'GET' and (200 <= int(row.status) <= 299) and not isBot(row.ua):
for path, endpoint in ENDPOINT_INDEX_MAP.items():
if row.uri.startswith('/' + path):
if endpoint in stats:
stats[endpoint] = stats[endpoint] + 1
else:
stats[endpoint] = 1
if len(stats) > 0:
output.add('stats', stats)
def reduce(self, key, values, output):
assert key == 'stats'
for value in values:
for endpoint, count in value.items():
if endpoint in self.agg_stats:
self.agg_stats[endpoint] = self.agg_stats[endpoint] + count
else:
self.agg_stats[endpoint] = count
output.put(key, self.agg_stats)
class S3AccessLogsStats(parallel.MRTask):
agg_stats = {}
def requires(self):
return SyncS3AccessLogs()
def output(self):
return luigi.LocalTarget(S3_STATS_DB_DIR)
def mapreduce_inputs(self):
return parallel.Collection.from_glob(join(self.input().path, '*'))
def map(self, log_file, value, output):
stats = {}
df = pd.read_csv(
log_file,
sep=" ",
names=['Owner', 'Bucket', 'Time', 'Tz', 'IP', 'Requester',
'RequestID',
'Operation', 'Key', 'URI', 'Status', 'ErrorCode', 'BytesSent', 'ObjectSize',
'TotalTime',
'TurnAroundTime', 'Referrer', 'UserAgent', 'VersionId', 'HostId'],
low_memory=False, na_values=[], keep_default_na=False, index_col=False,
engine='c', memory_map=True)
for row in df.itertuples():
if row.Operation == 'REST.GET.OBJECT' and isinstance(row.Status, int) and (200 <= row.Status <= 299) and row.ErrorCode == '-' and not isBot(
row.UserAgent):
for path, endpoint in ENDPOINT_INDEX_MAP.items():
if row.Key.startswith(path):
if endpoint in stats:
stats[endpoint] = stats[endpoint] + 1
else:
stats[endpoint] = 1
if len(stats) > 0:
output.add('stats', stats)
def reduce(self, key, values, output):
assert key == 'stats'
for value in values:
for endpoint, count in value.items():
if endpoint in self.agg_stats:
self.agg_stats[endpoint] = self.agg_stats[endpoint] + count
else:
self.agg_stats[endpoint] = count
output.put(key, self.agg_stats)
class TotalStats(parallel.MRTask):
agg_stats = {}
def requires(self):
return [S3AccessLogsStats(), CFAccessLogsStats()]
def output(self):
return luigi.LocalTarget(TOTAL_STATS_DB_DIR)
def mapreduce_inputs(self):
return parallel.Collection.from_sharded_list([path.path for path in self.input()])
def reduce(self, key, values, output):
assert key == 'stats'
for value in values:
for endpoint, count in value.items():
if endpoint in self.agg_stats:
self.agg_stats[endpoint] = self.agg_stats[endpoint] + count
else:
self.agg_stats[endpoint] = count
output.put(key, self.agg_stats)
class LoadJSON(index_util.LoadJSONBase):
index_name = 'downloadstats'
type_name = 'downloadstats'
mapping_file = './schemas/downloadstats_mapping.json'
data_source = TotalStats()
use_checksum = False
optimize_index = True
if __name__ == '__main__':
luigi.run()
| #!/usr/bin/python
''' Pipeline for parsing all the access log files (S3 and CloudFront) for dataset downloads and calculating totals.
'''
import glob
import gzip
import json
import os
import re
from os.path import join, dirname
import arrow
import luigi
import pandas as pd
from openfda import common, config, index_util, parallel
from openfda.tasks import AlwaysRunTask
RUN_DIR = dirname(os.path.abspath(__file__))
S3_ACCESS_LOGS_BUCKET = 's3://openfda-logs/download/'
S3_ACCESS_LOGS_DIR = config.data_dir('downloadstats/s3_logs_raw')
S3_STATS_DB_DIR = config.data_dir('downloadstats/s3_stats.db')
S3_ACCESS_LOGS_CUTOFF = arrow.get('2017-03-01')
CF_ACCESS_LOGS_BUCKET = 's3://openfda-splash-logs/download-cf-logs/'
CF_ACCESS_LOGS_DIR = config.data_dir('downloadstats/cf_logs_raw')
CF_STATS_DB_DIR = config.data_dir('downloadstats/cf_stats.db')
TOTAL_STATS_DB_DIR = config.data_dir('downloadstats/total_stats.db')
ENDPOINT_INDEX_MAP = {
'animalandveterinary/event': 'animalandveterinarydrugevent',
'drug/event': 'drugevent',
'drug/label': 'druglabel',
'drug/enforcement': 'drugenforcement',
'drug/ndc': 'ndc',
'drug/drugsfda': 'drugsfda',
'device/enforcement': 'deviceenforcement',
'food/enforcement': 'foodenforcement',
'food/event': 'foodevent',
'device/event': 'deviceevent',
'device/classification': 'deviceclass',
'device/510k': 'deviceclearance',
'device/pma': 'devicepma',
'device/recall': 'devicerecall',
'device/registrationlisting': 'devicereglist',
'device/udi': 'deviceudi',
'device/covid19serology': 'covid19serology',
'other/nsde': 'othernsde',
'other/substance': 'othersubstance',
'tobacco/problem': 'tobaccoproblem'
}
BOT_USER_AGENTS = map(lambda o: o['pattern'], json.loads(open(join(RUN_DIR, 'crawler-user-agents.json'), 'r').read()))
def isBot(ua):
return next((p for p in BOT_USER_AGENTS if re.search(p, ua)), False) != False
class SyncS3AccessLogs(AlwaysRunTask):
def _run(self):
common.cmd(['mkdir', '-p', S3_ACCESS_LOGS_DIR])
common.cmd(['aws',
'--profile=' + config.aws_profile(),
's3',
'sync',
S3_ACCESS_LOGS_BUCKET,
S3_ACCESS_LOGS_DIR])
# Cut off S3 access logs at the point when CF logs became available
for file in glob.glob(join(S3_ACCESS_LOGS_DIR, '*')):
if arrow.get(os.path.split(file)[1][:10]) > S3_ACCESS_LOGS_CUTOFF:
os.remove(file)
def output(self):
return luigi.LocalTarget(S3_ACCESS_LOGS_DIR)
class SyncCFAccessLogs(AlwaysRunTask):
def _run(self):
common.cmd(['mkdir', '-p', CF_ACCESS_LOGS_DIR])
common.cmd(['aws',
'--profile=' + config.aws_profile(),
's3',
'sync',
CF_ACCESS_LOGS_BUCKET,
CF_ACCESS_LOGS_DIR])
def output(self):
return luigi.LocalTarget(CF_ACCESS_LOGS_DIR)
class CFAccessLogsStats(parallel.MRTask):
agg_stats = {}
def requires(self):
return SyncCFAccessLogs()
def output(self):
return luigi.LocalTarget(CF_STATS_DB_DIR)
def mapreduce_inputs(self):
return parallel.Collection.from_glob(join(self.input().path, '*'))
def map(self, log_file, value, output):
stats = {}
df = pd.read_csv(
gzip.open(log_file, 'rb'),
sep='\t', skiprows=(0, 1),
names=['date', 'time', 'edge', 'bytes', 'ip', 'method', 'host', 'uri',
'status', 'referer', 'ua'],
usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
low_memory=False, na_values=[], keep_default_na=False, index_col=False,
engine='c', memory_map=True)
for row in df.itertuples():
if row.method == 'GET' and (200 <= int(row.status) <= 299) and not isBot(row.ua):
for path, endpoint in ENDPOINT_INDEX_MAP.items():
if row.uri.startswith('/' + path):
if endpoint in stats:
stats[endpoint] = stats[endpoint] + 1
else:
stats[endpoint] = 1
if len(stats) > 0:
output.add('stats', stats)
def reduce(self, key, values, output):
assert key == 'stats'
for value in values:
for endpoint, count in value.items():
if endpoint in self.agg_stats:
self.agg_stats[endpoint] = self.agg_stats[endpoint] + count
else:
self.agg_stats[endpoint] = count
output.put(key, self.agg_stats)
class S3AccessLogsStats(parallel.MRTask):
agg_stats = {}
def requires(self):
return SyncS3AccessLogs()
def output(self):
return luigi.LocalTarget(S3_STATS_DB_DIR)
def mapreduce_inputs(self):
return parallel.Collection.from_glob(join(self.input().path, '*'))
def map(self, log_file, value, output):
stats = {}
df = pd.read_csv(
log_file,
sep=" ",
names=['Owner', 'Bucket', 'Time', 'Tz', 'IP', 'Requester',
'RequestID',
'Operation', 'Key', 'URI', 'Status', 'ErrorCode', 'BytesSent', 'ObjectSize',
'TotalTime',
'TurnAroundTime', 'Referrer', 'UserAgent', 'VersionId', 'HostId'],
low_memory=False, na_values=[], keep_default_na=False, index_col=False,
engine='c', memory_map=True)
for row in df.itertuples():
if row.Operation == 'REST.GET.OBJECT' and isinstance(row.Status, int) and (200 <= row.Status <= 299) and row.ErrorCode == '-' and not isBot(
row.UserAgent):
for path, endpoint in ENDPOINT_INDEX_MAP.items():
if row.Key.startswith(path):
if endpoint in stats:
stats[endpoint] = stats[endpoint] + 1
else:
stats[endpoint] = 1
if len(stats) > 0:
output.add('stats', stats)
def reduce(self, key, values, output):
assert key == 'stats'
for value in values:
for endpoint, count in value.items():
if endpoint in self.agg_stats:
self.agg_stats[endpoint] = self.agg_stats[endpoint] + count
else:
self.agg_stats[endpoint] = count
output.put(key, self.agg_stats)
class TotalStats(parallel.MRTask):
agg_stats = {}
def requires(self):
return [S3AccessLogsStats(), CFAccessLogsStats()]
def output(self):
return luigi.LocalTarget(TOTAL_STATS_DB_DIR)
def mapreduce_inputs(self):
return parallel.Collection.from_sharded_list([path.path for path in self.input()])
def reduce(self, key, values, output):
assert key == 'stats'
for value in values:
for endpoint, count in value.items():
if endpoint in self.agg_stats:
self.agg_stats[endpoint] = self.agg_stats[endpoint] + count
else:
self.agg_stats[endpoint] = count
output.put(key, self.agg_stats)
class LoadJSON(index_util.LoadJSONBase):
index_name = 'downloadstats'
type_name = 'downloadstats'
mapping_file = './schemas/downloadstats_mapping.json'
data_source = TotalStats()
use_checksum = False
optimize_index = True
if __name__ == '__main__':
luigi.run()
| en | 0.860417 | #!/usr/bin/python Pipeline for parsing all the access log files (S3 and CloudFront) for dataset downloads and calculating totals. # Cut off S3 access logs at the point when CF logs became available | 1.926046 | 2 |
pisat/sensor/apds9301.py | jjj999/pisat | 1 | 6631728 | <reponame>jjj999/pisat
#! python3
"""
pisat.sensor.sensor.apds9301
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sensor class of APDS280 compatible with the pisat system.
This module works completely, regardless of whether using the pisat system or not.
[info]
APDS9301 datasheet
https://datasheetspdf.com/datasheet/APDS-9301.html
TODO interrupt settings, debug, docstring
"""
import math
from typing import Optional, Tuple
from pisat.handler.i2c_handler_base import I2CHandlerBase
from pisat.model.datamodel import DataModelBase, loggable
from pisat.sensor.sensor_base import HandlerMismatchError, HandlerNotSetError
from pisat.sensor.sensor_base import SensorBase
class Apds9301(SensorBase):
ADDRESS_I2C_GND = 0x29
ADDRESS_I2C_FLOAT = 0x39
ADDRESS_I2C_VDD = 0x49
# - - - - - - - - - - - - - - - -
# NOTE
#
# 1. Each Registers are represented as compination of
# the bits of command fields and one of the bits
# of register address.
# - - - - - - - - - - - - - - - -
# BITS OF COMMAND FIELD
BITS_COMMAND_CMD = 0b10000000
BITS_COMMAND_CLEAR = 0b01000000
BITS_COMMAND_WORD = 0b00100000
# BITS OF RESISTOR ADDRESS
BITS_REG_CTRL = 0x0
BITS_REG_TIMING = 0x1
BITS_REG_THRESH_LOW_LOW = 0x2
BITS_REG_THRESH_LOW_HIGH = 0x3
BITS_REG_THRESH_HIGH_LOW = 0x4
BITS_REG_THRESH_HIGH_HIGH = 0x5
BITS_REG_INTERRUPT = 0x6
BITS_REG_ID = 0xA
BITS_REG_DATA0 = (0xC, 0xD)
BITS_REG_DATA1 = (0xE, 0xF)
# BITS ABOUT CONTROL REGISTER
BITS_POW_UP = 0x03
BITS_POW_DOWN = 0x00
# BITS ABOUT TIMING REGISTER
BITS_TIMING_GAIN_HIGH = 0b00010000
BITS_TIMING_GAIN_LOW = 0b00000000
BITS_TIMING_MANUAL_START = 0b00001000
BITS_TIMING_MANUAL_STOP = 0b00000000
BITS_TIMING_INTEGRATION_0 = 0b00000000
BITS_TIMING_INTEGRATION_1 = 0b00000001
BITS_TIMING_INTEGRATION_2 = 0b00000010
BITS_TIMING_INTEGRATION_MANUAL = 0b00000011
# BITS ABOUT INTERRUPT CONTROL REGISTER
BITS_INTR_LEVEL_DISABLED = 0b00000000
BITS_INTR_LEVEL_ENABLED = 0b00010000
# CONSTANT VALUES ABOUT REGISTORS
SIZE_BYTES_REG_DATA = 4
BITS_TIMING_INTEG_DEFAULT = BITS_TIMING_INTEGRATION_2
BITS_THRESHOLD_DEFAULT = 0x0000
THRESHOLD_MAX = 0xFFFF
THRESHOLD_MIN = 0x0000
PERSISTENCE_MAX = 0xF
PERSISTENCE_MIN = 0x0
ID_ON_DEBUG = -1
# - - - - - - - - - - - - - - - -
# OPTIONS
#
# * Gain
# value | mode
# -----------------------------
# 0 | high gain mode
# 1 | low gain mode
#
# * Manual Timing Control
# value | feature
# -----------------------------------------
# 0 | stop an integration cycle
# 1 | begin an integration cycle
# NOTE
# The Manual Timing Control option will work only when INTEG
# is set as 0x11.
#
# * INTEG
# value | nominal integration time
# -----------------------------------------
# 00 | 13.7 ms
# 01 | 101 ms
# 10 | 402 ms
# 11 | N/A
# - - - - - - - - - - - - - - - -
class DataModel(DataModelBase):
def setup(self, illum):
self._illum = illum
@loggable
def illuminance(self):
return self._illum
def __init__(self,
handler: I2CHandlerBase,
name: Optional[str] = None) -> None:
if not isinstance(handler, I2CHandlerBase):
raise HandlerMismatchError(
"'handler' must be HandlerI2C."
)
super().__init__(name)
self._handler: Optional[I2CHandlerBase] = handler
self._gain: int = self.BITS_TIMING_GAIN_LOW
self._manual: int = self.BITS_TIMING_MANUAL_STOP
self._integ: int = self.BITS_TIMING_INTEG_DEFAULT
self._id: int = self.ID_ON_DEBUG
self._threshold_low: int = self.BITS_THRESHOLD_DEFAULT
self._threshold_high: int = self.BITS_THRESHOLD_DEFAULT
self._level: int = self.BITS_INTR_LEVEL_DISABLED
self._persistence: int = 0
# setup device when a HandlerI2C is given.
self.power_up()
self._id: int = self._read_id()
def read(self):
ch0, ch1 = self._read_raw_data()
illum = self.calc_illum(ch0, ch1)
model = self.DataModel(self.name)
model.setup(illum)
return model
@classmethod
def calc_illum(cls, ch0, ch1) -> float:
p = ch1 / ch0
lux = 0.
if 0 < p <= 0.5:
lux = 0.0304 * ch0 - 0.062 * ch0 * math.pow(p, 1.4)
elif p <= 0.61:
lux = 0.0224 * ch0 - 0.031 * ch1
elif p <= 0.80:
lux = 0.0128 * ch0 - 0.0153 * ch1
elif p <= 1.30:
lux = 0.00146 * ch0 - 0.00112 * ch1
return lux
@property
def id(self):
return self._id
def power_up(self):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_CTRL,
self.BITS_POW_UP)
def power_down(self):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_CTRL,
self.BITS_POW_DOWN)
def set_timing(self,
highgain: Optional[bool] = None,
manual: Optional[bool] = None,
integ: Optional[int] = None):
self._check_handler()
if highgain is not None:
if isinstance(highgain, bool):
if highgain:
self._gain = self.BITS_TIMING_GAIN_HIGH
else:
self._gain = self.BITS_TIMING_GAIN_LOW
else:
raise TypeError(
"'highgain' must be bool."
)
if manual is not None:
if isinstance(manual, bool):
if manual:
self._manual = self.BITS_TIMING_MANUAL_START
else:
self._manual = self.BITS_TIMING_MANUAL_STOP
else:
raise TypeError(
"'manual' must be bool."
)
if integ is not None:
if self.BITS_TIMING_INTEGRATION_0 <= integ <= self.BITS_TIMING_INTEGRATION_MANUAL:
self._integ = integ
else:
raise ValueError(
"'integ' must be int and no less than 0 and no more than 3."
)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_TIMING,
self._gain | self._manual | self._integ)
def start_manual_integ(self):
self._check_handler()
self.set_timing(manual=True, integ=self.BITS_TIMING_INTEGRATION_MANUAL)
def stop_manual_integ(self):
self._check_handler()
self.set_timing(manual=False, integ=self.BITS_TIMING_INTEGRATION_MANUAL)
def clear_interrupt(self):
self._handler.read(self.BITS_COMMAND_CMD | self.BITS_COMMAND_CLEAR | self.BITS_REG_ID, 1)
def set_interrupt(self,
low: Optional[int] = None,
high: Optional[int] = None,
islevel: Optional[int] = None,
persistence: Optional[int] = None):
if low is not None:
if self.THRESHOLD_MIN <= low <= self.THRESHOLD_MIN:
self._threshold_low = low
lower = low & 0x00FF
upper = low & 0xFF00
self._set_threshold_low(lower, upper)
else:
raise ValueError(
"'low' must be int and in {} ~ {}"
.format(self.THRESHOLD_MIN, self.THRESHOLD_MAX)
)
if high is not None:
if self.THRESHOLD_MIN <= high <= self.THRESHOLD_MIN:
self._threshold_high = high
lower = high & 0x00FF
upper = high & 0xFF00
self._set_threshold_high(lower, upper)
else:
raise ValueError(
"'high' must be int and in {} ~ {}"
.format(self.THRESHOLD_MIN, self.THRESHOLD_MAX)
)
if islevel is not None:
if isinstance(islevel, bool):
if islevel:
self._level = self.BITS_INTR_LEVEL_ENABLED
else:
self._level = self.BITS_INTR_LEVEL_DISABLED
else:
raise TypeError(
"'islevel' must be bool."
)
if persistence is not None:
if self.PERSISTENCE_MIN <= persistence <= self.PERSISTENCE_MAX:
self._persistence = persistence
else:
raise ValueError(
"'persistance' must be int and in {} ~ {}"
.format(self.PERSISTENCE_MIN, self.PERSISTENCE_MAX)
)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_INTERRUPT,
self._level | self._persistence)
def _check_handler(self):
if self._handler is None:
raise HandlerNotSetError(
"A hanlder must be set for executing this method."
)
def _read_raw_data(self) -> Tuple[int]:
_, raw = self._handler.read(self.BITS_COMMAND_CMD | self.BITS_REG_DATA0[0],
self.SIZE_BYTES_REG_DATA)
return (raw[1] << 8 | raw[0], raw[3] << 8 | raw[2])
def _read_id(self) -> int:
_, raw = self._handler.read(self.BITS_COMMAND_CMD | self.BITS_REG_ID, 1)
return raw[0]
def _set_threshold_low(self, lower: int, upper: int):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_LOW_LOW, lower)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_LOW_HIGH, upper)
def _set_threshold_high(self, lower: int, upper: int):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_HIGH_LOW, lower)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_HIGH_HIGH, upper)
| #! python3
"""
pisat.sensor.sensor.apds9301
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sensor class of APDS280 compatible with the pisat system.
This module works completely, regardless of whether using the pisat system or not.
[info]
APDS9301 datasheet
https://datasheetspdf.com/datasheet/APDS-9301.html
TODO interrupt settings, debug, docstring
"""
import math
from typing import Optional, Tuple
from pisat.handler.i2c_handler_base import I2CHandlerBase
from pisat.model.datamodel import DataModelBase, loggable
from pisat.sensor.sensor_base import HandlerMismatchError, HandlerNotSetError
from pisat.sensor.sensor_base import SensorBase
class Apds9301(SensorBase):
ADDRESS_I2C_GND = 0x29
ADDRESS_I2C_FLOAT = 0x39
ADDRESS_I2C_VDD = 0x49
# - - - - - - - - - - - - - - - -
# NOTE
#
# 1. Each Registers are represented as compination of
# the bits of command fields and one of the bits
# of register address.
# - - - - - - - - - - - - - - - -
# BITS OF COMMAND FIELD
BITS_COMMAND_CMD = 0b10000000
BITS_COMMAND_CLEAR = 0b01000000
BITS_COMMAND_WORD = 0b00100000
# BITS OF RESISTOR ADDRESS
BITS_REG_CTRL = 0x0
BITS_REG_TIMING = 0x1
BITS_REG_THRESH_LOW_LOW = 0x2
BITS_REG_THRESH_LOW_HIGH = 0x3
BITS_REG_THRESH_HIGH_LOW = 0x4
BITS_REG_THRESH_HIGH_HIGH = 0x5
BITS_REG_INTERRUPT = 0x6
BITS_REG_ID = 0xA
BITS_REG_DATA0 = (0xC, 0xD)
BITS_REG_DATA1 = (0xE, 0xF)
# BITS ABOUT CONTROL REGISTER
BITS_POW_UP = 0x03
BITS_POW_DOWN = 0x00
# BITS ABOUT TIMING REGISTER
BITS_TIMING_GAIN_HIGH = 0b00010000
BITS_TIMING_GAIN_LOW = 0b00000000
BITS_TIMING_MANUAL_START = 0b00001000
BITS_TIMING_MANUAL_STOP = 0b00000000
BITS_TIMING_INTEGRATION_0 = 0b00000000
BITS_TIMING_INTEGRATION_1 = 0b00000001
BITS_TIMING_INTEGRATION_2 = 0b00000010
BITS_TIMING_INTEGRATION_MANUAL = 0b00000011
# BITS ABOUT INTERRUPT CONTROL REGISTER
BITS_INTR_LEVEL_DISABLED = 0b00000000
BITS_INTR_LEVEL_ENABLED = 0b00010000
# CONSTANT VALUES ABOUT REGISTORS
SIZE_BYTES_REG_DATA = 4
BITS_TIMING_INTEG_DEFAULT = BITS_TIMING_INTEGRATION_2
BITS_THRESHOLD_DEFAULT = 0x0000
THRESHOLD_MAX = 0xFFFF
THRESHOLD_MIN = 0x0000
PERSISTENCE_MAX = 0xF
PERSISTENCE_MIN = 0x0
ID_ON_DEBUG = -1
# - - - - - - - - - - - - - - - -
# OPTIONS
#
# * Gain
# value | mode
# -----------------------------
# 0 | high gain mode
# 1 | low gain mode
#
# * Manual Timing Control
# value | feature
# -----------------------------------------
# 0 | stop an integration cycle
# 1 | begin an integration cycle
# NOTE
# The Manual Timing Control option will work only when INTEG
# is set as 0x11.
#
# * INTEG
# value | nominal integration time
# -----------------------------------------
# 00 | 13.7 ms
# 01 | 101 ms
# 10 | 402 ms
# 11 | N/A
# - - - - - - - - - - - - - - - -
class DataModel(DataModelBase):
def setup(self, illum):
self._illum = illum
@loggable
def illuminance(self):
return self._illum
def __init__(self,
handler: I2CHandlerBase,
name: Optional[str] = None) -> None:
if not isinstance(handler, I2CHandlerBase):
raise HandlerMismatchError(
"'handler' must be HandlerI2C."
)
super().__init__(name)
self._handler: Optional[I2CHandlerBase] = handler
self._gain: int = self.BITS_TIMING_GAIN_LOW
self._manual: int = self.BITS_TIMING_MANUAL_STOP
self._integ: int = self.BITS_TIMING_INTEG_DEFAULT
self._id: int = self.ID_ON_DEBUG
self._threshold_low: int = self.BITS_THRESHOLD_DEFAULT
self._threshold_high: int = self.BITS_THRESHOLD_DEFAULT
self._level: int = self.BITS_INTR_LEVEL_DISABLED
self._persistence: int = 0
# setup device when a HandlerI2C is given.
self.power_up()
self._id: int = self._read_id()
def read(self):
ch0, ch1 = self._read_raw_data()
illum = self.calc_illum(ch0, ch1)
model = self.DataModel(self.name)
model.setup(illum)
return model
@classmethod
def calc_illum(cls, ch0, ch1) -> float:
p = ch1 / ch0
lux = 0.
if 0 < p <= 0.5:
lux = 0.0304 * ch0 - 0.062 * ch0 * math.pow(p, 1.4)
elif p <= 0.61:
lux = 0.0224 * ch0 - 0.031 * ch1
elif p <= 0.80:
lux = 0.0128 * ch0 - 0.0153 * ch1
elif p <= 1.30:
lux = 0.00146 * ch0 - 0.00112 * ch1
return lux
@property
def id(self):
return self._id
def power_up(self):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_CTRL,
self.BITS_POW_UP)
def power_down(self):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_CTRL,
self.BITS_POW_DOWN)
def set_timing(self,
highgain: Optional[bool] = None,
manual: Optional[bool] = None,
integ: Optional[int] = None):
self._check_handler()
if highgain is not None:
if isinstance(highgain, bool):
if highgain:
self._gain = self.BITS_TIMING_GAIN_HIGH
else:
self._gain = self.BITS_TIMING_GAIN_LOW
else:
raise TypeError(
"'highgain' must be bool."
)
if manual is not None:
if isinstance(manual, bool):
if manual:
self._manual = self.BITS_TIMING_MANUAL_START
else:
self._manual = self.BITS_TIMING_MANUAL_STOP
else:
raise TypeError(
"'manual' must be bool."
)
if integ is not None:
if self.BITS_TIMING_INTEGRATION_0 <= integ <= self.BITS_TIMING_INTEGRATION_MANUAL:
self._integ = integ
else:
raise ValueError(
"'integ' must be int and no less than 0 and no more than 3."
)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_TIMING,
self._gain | self._manual | self._integ)
def start_manual_integ(self):
self._check_handler()
self.set_timing(manual=True, integ=self.BITS_TIMING_INTEGRATION_MANUAL)
def stop_manual_integ(self):
self._check_handler()
self.set_timing(manual=False, integ=self.BITS_TIMING_INTEGRATION_MANUAL)
def clear_interrupt(self):
self._handler.read(self.BITS_COMMAND_CMD | self.BITS_COMMAND_CLEAR | self.BITS_REG_ID, 1)
def set_interrupt(self,
low: Optional[int] = None,
high: Optional[int] = None,
islevel: Optional[int] = None,
persistence: Optional[int] = None):
if low is not None:
if self.THRESHOLD_MIN <= low <= self.THRESHOLD_MIN:
self._threshold_low = low
lower = low & 0x00FF
upper = low & 0xFF00
self._set_threshold_low(lower, upper)
else:
raise ValueError(
"'low' must be int and in {} ~ {}"
.format(self.THRESHOLD_MIN, self.THRESHOLD_MAX)
)
if high is not None:
if self.THRESHOLD_MIN <= high <= self.THRESHOLD_MIN:
self._threshold_high = high
lower = high & 0x00FF
upper = high & 0xFF00
self._set_threshold_high(lower, upper)
else:
raise ValueError(
"'high' must be int and in {} ~ {}"
.format(self.THRESHOLD_MIN, self.THRESHOLD_MAX)
)
if islevel is not None:
if isinstance(islevel, bool):
if islevel:
self._level = self.BITS_INTR_LEVEL_ENABLED
else:
self._level = self.BITS_INTR_LEVEL_DISABLED
else:
raise TypeError(
"'islevel' must be bool."
)
if persistence is not None:
if self.PERSISTENCE_MIN <= persistence <= self.PERSISTENCE_MAX:
self._persistence = persistence
else:
raise ValueError(
"'persistance' must be int and in {} ~ {}"
.format(self.PERSISTENCE_MIN, self.PERSISTENCE_MAX)
)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_INTERRUPT,
self._level | self._persistence)
def _check_handler(self):
if self._handler is None:
raise HandlerNotSetError(
"A hanlder must be set for executing this method."
)
def _read_raw_data(self) -> Tuple[int]:
_, raw = self._handler.read(self.BITS_COMMAND_CMD | self.BITS_REG_DATA0[0],
self.SIZE_BYTES_REG_DATA)
return (raw[1] << 8 | raw[0], raw[3] << 8 | raw[2])
def _read_id(self) -> int:
_, raw = self._handler.read(self.BITS_COMMAND_CMD | self.BITS_REG_ID, 1)
return raw[0]
def _set_threshold_low(self, lower: int, upper: int):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_LOW_LOW, lower)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_LOW_HIGH, upper)
def _set_threshold_high(self, lower: int, upper: int):
self._check_handler()
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_HIGH_LOW, lower)
self._handler.write(self.BITS_COMMAND_CMD | self.BITS_REG_THRESH_HIGH_HIGH, upper) | en | 0.48012 | #! python3 pisat.sensor.sensor.apds9301 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sensor class of APDS280 compatible with the pisat system. This module works completely, regardless of whether using the pisat system or not. [info] APDS9301 datasheet https://datasheetspdf.com/datasheet/APDS-9301.html TODO interrupt settings, debug, docstring # - - - - - - - - - - - - - - - - # NOTE # # 1. Each Registers are represented as compination of # the bits of command fields and one of the bits # of register address. # - - - - - - - - - - - - - - - - # BITS OF COMMAND FIELD # BITS OF RESISTOR ADDRESS # BITS ABOUT CONTROL REGISTER # BITS ABOUT TIMING REGISTER # BITS ABOUT INTERRUPT CONTROL REGISTER # CONSTANT VALUES ABOUT REGISTORS # - - - - - - - - - - - - - - - - # OPTIONS # # * Gain # value | mode # ----------------------------- # 0 | high gain mode # 1 | low gain mode # # * Manual Timing Control # value | feature # ----------------------------------------- # 0 | stop an integration cycle # 1 | begin an integration cycle # NOTE # The Manual Timing Control option will work only when INTEG # is set as 0x11. # # * INTEG # value | nominal integration time # ----------------------------------------- # 00 | 13.7 ms # 01 | 101 ms # 10 | 402 ms # 11 | N/A # - - - - - - - - - - - - - - - - # setup device when a HandlerI2C is given. | 2.552639 | 3 |
admin/update_sparkle_xml.py | isabella232/zulip-desktop-legacy | 13 | 6631729 | #!/usr/bin/env python
# Updates a sparkle appcast xml file with a new release
from optparse import OptionParser
from xml.dom.minidom import parse
from datetime import datetime
import os
parser = OptionParser(r"""
%prog --f xmlfile -s "signature" -v X.X.X -l <bytelength>""")
parser.add_option("-f", "--file", dest="filename",
help="XML appcast file to update", metavar="FILE")
parser.add_option("-s", "--sig", dest="sig",
help="Signature from Sparkle signing process")
parser.add_option("-v", "--version", dest="version",
help="New version to update the appcast file with")
parser.add_option("-l", "--length", dest="length",
help="Length in bytes of .tar.bz2 file")
parser.add_option("-w", "--windows", dest="windows",
help="Write a windows sparkle formatted XML file",
action="store_true", default=False)
parser.add_option("-o", "--sso", dest="sso",
help="Write an SSO build file",
action="store_true", default=False)
(options, _) = parser.parse_args()
if not options.windows and (options.filename is None or
options.sig is None or options.version is None or
options.length is None):
parser.error("Please pass all four required arguments")
elif options.windows and (options.filename is None or options.version is None):
parser.error("Please provide an XML filename and version string")
try:
xml = parse(options.filename)
except IOError:
print "Failed to parse filename: %s" % options.filename
os.exit(1)
pubDate = datetime.now().strftime("%a, %d %b %Y %H:%M:%S %z")
version = "Version %s" % options.version
sso_path = "sso/" if options.sso else ""
if options.windows:
path = "dist/apps/%swin/zulip-%s.exe" % (sso_path, options.version,)
else:
path = "dist/apps/%smac/Zulip-%s.tar.bz2" % (sso_path, options.version,)
url = "https://zulip.com/%s" % (path,)
channel = xml.getElementsByTagName('channel')[0]
latest = channel.getElementsByTagName('item')[0]
newItem = latest.cloneNode(True)
newItem.getElementsByTagName('title')[0].firstChild.replaceWholeText(version)
newItem.getElementsByTagName('pubDate')[0].firstChild.replaceWholeText(pubDate)
newItem.getElementsByTagName('enclosure')[0].setAttribute("url", url)
newItem.getElementsByTagName('enclosure')[0].setAttribute("sparkle:version",
options.version)
if not options.windows:
newItem.getElementsByTagName('enclosure')[0].setAttribute("length",
options.length)
newItem.getElementsByTagName('enclosure')[0].setAttribute("sparkle:dsaSignature",
options.sig)
channel.insertBefore(newItem, latest)
outfile = open(options.filename, 'w')
xml.writexml(outfile)
outfile.close()
| #!/usr/bin/env python
# Updates a sparkle appcast xml file with a new release
from optparse import OptionParser
from xml.dom.minidom import parse
from datetime import datetime
import os
parser = OptionParser(r"""
%prog --f xmlfile -s "signature" -v X.X.X -l <bytelength>""")
parser.add_option("-f", "--file", dest="filename",
help="XML appcast file to update", metavar="FILE")
parser.add_option("-s", "--sig", dest="sig",
help="Signature from Sparkle signing process")
parser.add_option("-v", "--version", dest="version",
help="New version to update the appcast file with")
parser.add_option("-l", "--length", dest="length",
help="Length in bytes of .tar.bz2 file")
parser.add_option("-w", "--windows", dest="windows",
help="Write a windows sparkle formatted XML file",
action="store_true", default=False)
parser.add_option("-o", "--sso", dest="sso",
help="Write an SSO build file",
action="store_true", default=False)
(options, _) = parser.parse_args()
if not options.windows and (options.filename is None or
options.sig is None or options.version is None or
options.length is None):
parser.error("Please pass all four required arguments")
elif options.windows and (options.filename is None or options.version is None):
parser.error("Please provide an XML filename and version string")
try:
xml = parse(options.filename)
except IOError:
print "Failed to parse filename: %s" % options.filename
os.exit(1)
pubDate = datetime.now().strftime("%a, %d %b %Y %H:%M:%S %z")
version = "Version %s" % options.version
sso_path = "sso/" if options.sso else ""
if options.windows:
path = "dist/apps/%swin/zulip-%s.exe" % (sso_path, options.version,)
else:
path = "dist/apps/%smac/Zulip-%s.tar.bz2" % (sso_path, options.version,)
url = "https://zulip.com/%s" % (path,)
channel = xml.getElementsByTagName('channel')[0]
latest = channel.getElementsByTagName('item')[0]
newItem = latest.cloneNode(True)
newItem.getElementsByTagName('title')[0].firstChild.replaceWholeText(version)
newItem.getElementsByTagName('pubDate')[0].firstChild.replaceWholeText(pubDate)
newItem.getElementsByTagName('enclosure')[0].setAttribute("url", url)
newItem.getElementsByTagName('enclosure')[0].setAttribute("sparkle:version",
options.version)
if not options.windows:
newItem.getElementsByTagName('enclosure')[0].setAttribute("length",
options.length)
newItem.getElementsByTagName('enclosure')[0].setAttribute("sparkle:dsaSignature",
options.sig)
channel.insertBefore(newItem, latest)
outfile = open(options.filename, 'w')
xml.writexml(outfile)
outfile.close()
| en | 0.346812 | #!/usr/bin/env python # Updates a sparkle appcast xml file with a new release %prog --f xmlfile -s "signature" -v X.X.X -l <bytelength> | 2.277746 | 2 |
otcCard/__init__.py | otclab/EstApp | 0 | 6631730 | <reponame>otclab/EstApp<filename>otcCard/__init__.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
from .SerialDevice import *
from .OTCCard import *
from .OTCProtocol import *
from .OTCProtocolError import *
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from .SerialDevice import *
from .OTCCard import *
from .OTCProtocol import *
from .OTCProtocolError import * | en | 0.44423 | #!/usr/bin/python # -*- coding: utf-8 -*- | 1.259944 | 1 |
intro/part04-18_mean/src/mean.py | Hannah-Abi/python-pro-21 | 0 | 6631731 | <reponame>Hannah-Abi/python-pro-21
# Write your solution here
def mean(list):
sum = 0
for i in range(len(list)):
sum += list[i]
return sum/len(list)
# You can test your function by calling it within the following block
if __name__ == "__main__":
my_list = [3, 6, -4]
result = mean(my_list)
print(result) | # Write your solution here
def mean(list):
sum = 0
for i in range(len(list)):
sum += list[i]
return sum/len(list)
# You can test your function by calling it within the following block
if __name__ == "__main__":
my_list = [3, 6, -4]
result = mean(my_list)
print(result) | en | 0.928411 | # Write your solution here # You can test your function by calling it within the following block | 3.8772 | 4 |
src/script/sconsign.py | datalogics/scons-1 | 0 | 6631732 | #! /usr/bin/env python
#
# SCons - a Software Constructor
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__version__ = "__VERSION__"
__build__ = "__BUILD__"
__buildsys__ = "__BUILDSYS__"
__date__ = "__DATE__"
__developer__ = "__DEVELOPER__"
import os
import sys
##############################################################################
# BEGIN STANDARD SCons SCRIPT HEADER
#
# This is the cut-and-paste logic so that a self-contained script can
# interoperate correctly with different SCons versions and installation
# locations for the engine. If you modify anything in this section, you
# should also change other scripts that use this same header.
##############################################################################
# compatibility check
if sys.version_info < (3,5,0):
msg = "scons: *** SCons version %s does not run under Python version %s.\n\
Python >= 3.5 is required.\n"
sys.stderr.write(msg % (__version__, sys.version.split()[0]))
sys.exit(1)
# Strip the script directory from sys.path so on case-insensitive
# (WIN32) systems Python doesn't think that the "scons" script is the
# "SCons" package.
script_dir = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.realpath(os.path.dirname(__file__))
if script_path in sys.path:
sys.path.remove(script_path)
libs = []
if "SCONS_LIB_DIR" in os.environ:
libs.append(os.environ["SCONS_LIB_DIR"])
# running from source takes 2nd priority (since 2.3.2), following SCONS_LIB_DIR
source_path = os.path.join(script_path, os.pardir, 'engine')
if os.path.isdir(source_path):
libs.append(source_path)
# add local-install locations
local_version = 'scons-local-' + __version__
local = 'scons-local'
if script_dir:
local_version = os.path.join(script_dir, local_version)
local = os.path.join(script_dir, local)
if os.path.isdir(local_version):
libs.append(os.path.abspath(local_version))
if os.path.isdir(local):
libs.append(os.path.abspath(local))
scons_version = 'scons-%s' % __version__
# preferred order of scons lookup paths
prefs = []
# if we can find package information, use it
try:
import pkg_resources
except ImportError:
pass
else:
try:
d = pkg_resources.get_distribution('scons')
except pkg_resources.DistributionNotFound:
pass
else:
prefs.append(d.location)
if sys.platform == 'win32':
# Use only sys.prefix on Windows
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, 'Lib', 'site-packages'))
else:
# On other (POSIX) platforms, things are more complicated due to
# the variety of path names and library locations.
# Build up some possibilities, then transform them into candidates
temp = []
if script_dir == 'bin':
# script_dir is `pwd`/bin;
# check `pwd`/lib/scons*.
temp.append(os.getcwd())
else:
if script_dir in ('.', ''):
script_dir = os.getcwd()
head, tail = os.path.split(script_dir)
if tail == "bin":
# script_dir is /foo/bin;
# check /foo/lib/scons*.
temp.append(head)
head, tail = os.path.split(sys.prefix)
if tail == "usr":
# sys.prefix is /foo/usr;
# check /foo/usr/lib/scons* first,
# then /foo/usr/local/lib/scons*.
temp.append(sys.prefix)
temp.append(os.path.join(sys.prefix, "local"))
elif tail == "local":
h, t = os.path.split(head)
if t == "usr":
# sys.prefix is /foo/usr/local;
# check /foo/usr/local/lib/scons* first,
# then /foo/usr/lib/scons*.
temp.append(sys.prefix)
temp.append(head)
else:
# sys.prefix is /foo/local;
# check only /foo/local/lib/scons*.
temp.append(sys.prefix)
else:
# sys.prefix is /foo (ends in neither /usr or /local);
# check only /foo/lib/scons*.
temp.append(sys.prefix)
# suffix these to add to our original prefs:
prefs.extend([os.path.join(x, 'lib') for x in temp])
prefs.extend([os.path.join(x, 'lib', 'python' + sys.version[:3],
'site-packages') for x in temp])
# Add the parent directory of the current python's library to the
# preferences. This picks up differences between, e.g., lib and lib64,
# and finds the base location in case of a non-copying virtualenv.
try:
libpath = os.__file__
except AttributeError:
pass
else:
# Split /usr/libfoo/python*/os.py to /usr/libfoo/python*.
libpath, _ = os.path.split(libpath)
# Split /usr/libfoo/python* to /usr/libfoo
libpath, tail = os.path.split(libpath)
# Check /usr/libfoo/scons*.
prefs.append(libpath)
# Look first for 'scons-__version__' in all of our preference libs,
# then for 'scons'. Skip paths that do not exist.
libs.extend([os.path.join(x, scons_version) for x in prefs if os.path.isdir(x)])
libs.extend([os.path.join(x, 'scons') for x in prefs if os.path.isdir(x)])
sys.path = libs + sys.path
##############################################################################
# END STANDARD SCons SCRIPT HEADER
##############################################################################
import SCons.compat
try:
import whichdb
whichdb = whichdb.whichdb
except ImportError as e:
from dbm import whichdb
import time
import pickle
import SCons.SConsign
def my_whichdb(filename):
if filename[-7:] == ".dblite":
return "SCons.dblite"
try:
with open(filename + ".dblite", "rb"):
return "SCons.dblite"
except IOError:
pass
return _orig_whichdb(filename)
# Should work on python2
_orig_whichdb = whichdb
whichdb = my_whichdb
# was changed for python3
#_orig_whichdb = whichdb.whichdb
#dbm.whichdb = my_whichdb
def my_import(mname):
import imp
if '.' in mname:
i = mname.rfind('.')
parent = my_import(mname[:i])
fp, pathname, description = imp.find_module(mname[i+1:],
parent.__path__)
else:
fp, pathname, description = imp.find_module(mname)
return imp.load_module(mname, fp, pathname, description)
class Flagger(object):
default_value = 1
def __setitem__(self, item, value):
self.__dict__[item] = value
self.default_value = 0
def __getitem__(self, item):
return self.__dict__.get(item, self.default_value)
Do_Call = None
Print_Directories = []
Print_Entries = []
Print_Flags = Flagger()
Verbose = 0
Readable = 0
Warns = 0
def default_mapper(entry, name):
"""
Stringify an entry that doesn't have an explicit mapping.
Args:
entry: entry
name: field name
Returns: str
"""
try:
val = eval("entry." + name)
except AttributeError:
val = None
if sys.version_info.major >= 3 and isinstance(val, bytes):
# This is a dirty hack for py 2/3 compatibility. csig is a bytes object
# in Python3 while Python2 bytes are str. Hence, we decode the csig to a
# Python3 string
val = val.decode()
return str(val)
def map_action(entry, _):
"""
Stringify an action entry and signature.
Args:
entry: action entry
second argument is not used
Returns: str
"""
try:
bact = entry.bact
bactsig = entry.bactsig
except AttributeError:
return None
return '%s [%s]' % (bactsig, bact)
def map_timestamp(entry, _):
"""
Stringify a timestamp entry.
Args:
entry: timestamp entry
second argument is not used
Returns: str
"""
try:
timestamp = entry.timestamp
except AttributeError:
timestamp = None
if Readable and timestamp:
return "'" + time.ctime(timestamp) + "'"
else:
return str(timestamp)
def map_bkids(entry, _):
"""
Stringify an implicit entry.
Args:
entry:
second argument is not used
Returns: str
"""
try:
bkids = entry.bsources + entry.bdepends + entry.bimplicit
bkidsigs = entry.bsourcesigs + entry.bdependsigs + entry.bimplicitsigs
except AttributeError:
return None
if len(bkids) != len(bkidsigs):
global Warns
Warns += 1
# add warning to result rather than direct print so it will line up
msg = "Warning: missing information, {} ids but {} sigs"
result = [msg.format(len(bkids), len(bkidsigs))]
else:
result = []
result += [nodeinfo_string(bkid, bkidsig, " ")
for bkid, bkidsig in zip(bkids, bkidsigs)]
if not result:
return None
return "\n ".join(result)
map_field = {
'action' : map_action,
'timestamp' : map_timestamp,
'bkids' : map_bkids,
}
map_name = {
'implicit' : 'bkids',
}
def field(name, entry, verbose=Verbose):
if not Print_Flags[name]:
return None
fieldname = map_name.get(name, name)
mapper = map_field.get(fieldname, default_mapper)
val = mapper(entry, name)
if verbose:
val = name + ": " + val
return val
def nodeinfo_raw(name, ninfo, prefix=""):
# This just formats the dictionary, which we would normally use str()
# to do, except that we want the keys sorted for deterministic output.
d = ninfo.__getstate__()
try:
keys = ninfo.field_list + ['_version_id']
except AttributeError:
keys = sorted(d.keys())
l = []
for k in keys:
l.append('%s: %s' % (repr(k), repr(d.get(k))))
if '\n' in name:
name = repr(name)
return name + ': {' + ', '.join(l) + '}'
def nodeinfo_cooked(name, ninfo, prefix=""):
try:
field_list = ninfo.field_list
except AttributeError:
field_list = []
if '\n' in name:
name = repr(name)
outlist = [name + ':'] + [
f for f in [field(x, ninfo, Verbose) for x in field_list] if f
]
if Verbose:
sep = '\n ' + prefix
else:
sep = ' '
return sep.join(outlist)
nodeinfo_string = nodeinfo_cooked
def printfield(name, entry, prefix=""):
outlist = field("implicit", entry, 0)
if outlist:
if Verbose:
print(" implicit:")
print(" " + outlist)
outact = field("action", entry, 0)
if outact:
if Verbose:
print(" action: " + outact)
else:
print(" " + outact)
def printentries(entries, location):
if Print_Entries:
for name in Print_Entries:
try:
entry = entries[name]
except KeyError:
err = "sconsign: no entry `%s' in `%s'\n" % (name, location)
sys.stderr.write(err)
else:
try:
ninfo = entry.ninfo
except AttributeError:
print(name + ":")
else:
print(nodeinfo_string(name, entry.ninfo))
printfield(name, entry.binfo)
else:
for name in sorted(entries.keys()):
entry = entries[name]
try:
ninfo = entry.ninfo
except AttributeError:
print(name + ":")
else:
print(nodeinfo_string(name, entry.ninfo))
printfield(name, entry.binfo)
class Do_SConsignDB(object):
def __init__(self, dbm_name, dbm):
self.dbm_name = dbm_name
self.dbm = dbm
def __call__(self, fname):
# The *dbm modules stick their own file suffixes on the names
# that are passed in. This causes us to jump through some
# hoops here.
try:
# Try opening the specified file name. Example:
# SPECIFIED OPENED BY self.dbm.open()
# --------- -------------------------
# .sconsign => .sconsign.dblite
# .sconsign.dblite => .sconsign.dblite.dblite
db = self.dbm.open(fname, "r")
except (IOError, OSError) as e:
print_e = e
try:
# That didn't work, so try opening the base name,
# so that if they actually passed in 'sconsign.dblite'
# (for example), the dbm module will put the suffix back
# on for us and open it anyway.
db = self.dbm.open(os.path.splitext(fname)[0], "r")
except (IOError, OSError):
# That didn't work either. See if the file name
# they specified even exists (independent of the dbm
# suffix-mangling).
try:
with open(fname, "rb"):
pass # this is a touch only, we don't use it here.
except (IOError, OSError) as e:
# Nope, that file doesn't even exist, so report that
# fact back.
print_e = e
sys.stderr.write("sconsign: %s\n" % print_e)
return
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s'\n"
% (self.dbm_name, fname))
return
except Exception as e:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s': %s\n"
% (self.dbm_name, fname, e))
exc_type, _, _ = sys.exc_info()
if exc_type.__name__ == "ValueError" and sys.version_info < (3,0,0):
sys.stderr.write("Python 2 only supports pickle protocols 0-2.\n")
return
if Print_Directories:
for dir in Print_Directories:
try:
val = db[dir]
except KeyError:
err = "sconsign: no dir `%s' in `%s'\n" % (dir, args[0])
sys.stderr.write(err)
else:
self.printentries(dir, val)
else:
for dir in sorted(db.keys()):
self.printentries(dir, db[dir])
@staticmethod
def printentries(dir, val):
try:
print('=== ' + dir + ':')
except TypeError:
print('=== ' + dir.decode() + ':')
printentries(pickle.loads(val), dir)
def Do_SConsignDir(name):
try:
with open(name, 'rb') as fp:
try:
sconsign = SCons.SConsign.Dir(fp)
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
err = "sconsign: ignoring invalid .sconsign file `%s'\n" % name
sys.stderr.write(err)
return
except Exception as e:
err = "sconsign: ignoring invalid .sconsign file `%s': %s\n" % (name, e)
sys.stderr.write(err)
return
printentries(sconsign.entries, args[0])
except (IOError, OSError) as e:
sys.stderr.write("sconsign: %s\n" % e)
return
##############################################################################
import getopt
helpstr = """\
Usage: sconsign [OPTIONS] [FILE ...]
Options:
-a, --act, --action Print build action information.
-c, --csig Print content signature information.
-d DIR, --dir=DIR Print only info about DIR.
-e ENTRY, --entry=ENTRY Print only info about ENTRY.
-f FORMAT, --format=FORMAT FILE is in the specified FORMAT.
-h, --help Print this message and exit.
-i, --implicit Print implicit dependency information.
-r, --readable Print timestamps in human-readable form.
--raw Print raw Python object representations.
-s, --size Print file sizes.
-t, --timestamp Print timestamp information.
-v, --verbose Verbose, describe each field.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "acd:e:f:hirstv",
['act', 'action',
'csig', 'dir=', 'entry=',
'format=', 'help', 'implicit',
'raw', 'readable',
'size', 'timestamp', 'verbose'])
except getopt.GetoptError as err:
sys.stderr.write(str(err) + '\n')
print(helpstr)
sys.exit(2)
for o, a in opts:
if o in ('-a', '--act', '--action'):
Print_Flags['action'] = 1
elif o in ('-c', '--csig'):
Print_Flags['csig'] = 1
elif o in ('-d', '--dir'):
Print_Directories.append(a)
elif o in ('-e', '--entry'):
Print_Entries.append(a)
elif o in ('-f', '--format'):
# Try to map the given DB format to a known module
# name, that we can then try to import...
Module_Map = {'dblite': 'SCons.dblite', 'sconsign': None}
dbm_name = Module_Map.get(a, a)
if dbm_name:
try:
if dbm_name != "SCons.dblite":
dbm = my_import(dbm_name)
else:
import SCons.dblite
dbm = SCons.dblite
# Ensure that we don't ignore corrupt DB files,
# this was handled by calling my_import('SCons.dblite')
# again in earlier versions...
SCons.dblite.ignore_corrupt_dbfiles = 0
except ImportError:
sys.stderr.write("sconsign: illegal file format `%s'\n" % a)
print(helpstr)
sys.exit(2)
Do_Call = Do_SConsignDB(a, dbm)
else:
Do_Call = Do_SConsignDir
elif o in ('-h', '--help'):
print(helpstr)
sys.exit(0)
elif o in ('-i', '--implicit'):
Print_Flags['implicit'] = 1
elif o in ('--raw',):
nodeinfo_string = nodeinfo_raw
elif o in ('-r', '--readable'):
Readable = 1
elif o in ('-s', '--size'):
Print_Flags['size'] = 1
elif o in ('-t', '--timestamp'):
Print_Flags['timestamp'] = 1
elif o in ('-v', '--verbose'):
Verbose = 1
if Do_Call:
for a in args:
Do_Call(a)
else:
if not args:
args = [".sconsign.dblite"]
for a in args:
dbm_name = whichdb(a)
if dbm_name:
Map_Module = {'SCons.dblite': 'dblite'}
if dbm_name != "SCons.dblite":
dbm = my_import(dbm_name)
else:
import SCons.dblite
dbm = SCons.dblite
# Ensure that we don't ignore corrupt DB files,
# this was handled by calling my_import('SCons.dblite')
# again in earlier versions...
SCons.dblite.ignore_corrupt_dbfiles = 0
Do_SConsignDB(Map_Module.get(dbm_name, dbm_name), dbm)(a)
else:
Do_SConsignDir(a)
if Warns:
print("NOTE: there were %d warnings, please check output" % Warns)
sys.exit(0)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| #! /usr/bin/env python
#
# SCons - a Software Constructor
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__version__ = "__VERSION__"
__build__ = "__BUILD__"
__buildsys__ = "__BUILDSYS__"
__date__ = "__DATE__"
__developer__ = "__DEVELOPER__"
import os
import sys
##############################################################################
# BEGIN STANDARD SCons SCRIPT HEADER
#
# This is the cut-and-paste logic so that a self-contained script can
# interoperate correctly with different SCons versions and installation
# locations for the engine. If you modify anything in this section, you
# should also change other scripts that use this same header.
##############################################################################
# compatibility check
if sys.version_info < (3,5,0):
msg = "scons: *** SCons version %s does not run under Python version %s.\n\
Python >= 3.5 is required.\n"
sys.stderr.write(msg % (__version__, sys.version.split()[0]))
sys.exit(1)
# Strip the script directory from sys.path so on case-insensitive
# (WIN32) systems Python doesn't think that the "scons" script is the
# "SCons" package.
script_dir = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.realpath(os.path.dirname(__file__))
if script_path in sys.path:
sys.path.remove(script_path)
libs = []
if "SCONS_LIB_DIR" in os.environ:
libs.append(os.environ["SCONS_LIB_DIR"])
# running from source takes 2nd priority (since 2.3.2), following SCONS_LIB_DIR
source_path = os.path.join(script_path, os.pardir, 'engine')
if os.path.isdir(source_path):
libs.append(source_path)
# add local-install locations
local_version = 'scons-local-' + __version__
local = 'scons-local'
if script_dir:
local_version = os.path.join(script_dir, local_version)
local = os.path.join(script_dir, local)
if os.path.isdir(local_version):
libs.append(os.path.abspath(local_version))
if os.path.isdir(local):
libs.append(os.path.abspath(local))
scons_version = 'scons-%s' % __version__
# preferred order of scons lookup paths
prefs = []
# if we can find package information, use it
try:
import pkg_resources
except ImportError:
pass
else:
try:
d = pkg_resources.get_distribution('scons')
except pkg_resources.DistributionNotFound:
pass
else:
prefs.append(d.location)
if sys.platform == 'win32':
# Use only sys.prefix on Windows
prefs.append(sys.prefix)
prefs.append(os.path.join(sys.prefix, 'Lib', 'site-packages'))
else:
# On other (POSIX) platforms, things are more complicated due to
# the variety of path names and library locations.
# Build up some possibilities, then transform them into candidates
temp = []
if script_dir == 'bin':
# script_dir is `pwd`/bin;
# check `pwd`/lib/scons*.
temp.append(os.getcwd())
else:
if script_dir in ('.', ''):
script_dir = os.getcwd()
head, tail = os.path.split(script_dir)
if tail == "bin":
# script_dir is /foo/bin;
# check /foo/lib/scons*.
temp.append(head)
head, tail = os.path.split(sys.prefix)
if tail == "usr":
# sys.prefix is /foo/usr;
# check /foo/usr/lib/scons* first,
# then /foo/usr/local/lib/scons*.
temp.append(sys.prefix)
temp.append(os.path.join(sys.prefix, "local"))
elif tail == "local":
h, t = os.path.split(head)
if t == "usr":
# sys.prefix is /foo/usr/local;
# check /foo/usr/local/lib/scons* first,
# then /foo/usr/lib/scons*.
temp.append(sys.prefix)
temp.append(head)
else:
# sys.prefix is /foo/local;
# check only /foo/local/lib/scons*.
temp.append(sys.prefix)
else:
# sys.prefix is /foo (ends in neither /usr or /local);
# check only /foo/lib/scons*.
temp.append(sys.prefix)
# suffix these to add to our original prefs:
prefs.extend([os.path.join(x, 'lib') for x in temp])
prefs.extend([os.path.join(x, 'lib', 'python' + sys.version[:3],
'site-packages') for x in temp])
# Add the parent directory of the current python's library to the
# preferences. This picks up differences between, e.g., lib and lib64,
# and finds the base location in case of a non-copying virtualenv.
try:
libpath = os.__file__
except AttributeError:
pass
else:
# Split /usr/libfoo/python*/os.py to /usr/libfoo/python*.
libpath, _ = os.path.split(libpath)
# Split /usr/libfoo/python* to /usr/libfoo
libpath, tail = os.path.split(libpath)
# Check /usr/libfoo/scons*.
prefs.append(libpath)
# Look first for 'scons-__version__' in all of our preference libs,
# then for 'scons'. Skip paths that do not exist.
libs.extend([os.path.join(x, scons_version) for x in prefs if os.path.isdir(x)])
libs.extend([os.path.join(x, 'scons') for x in prefs if os.path.isdir(x)])
sys.path = libs + sys.path
##############################################################################
# END STANDARD SCons SCRIPT HEADER
##############################################################################
import SCons.compat
try:
import whichdb
whichdb = whichdb.whichdb
except ImportError as e:
from dbm import whichdb
import time
import pickle
import SCons.SConsign
def my_whichdb(filename):
if filename[-7:] == ".dblite":
return "SCons.dblite"
try:
with open(filename + ".dblite", "rb"):
return "SCons.dblite"
except IOError:
pass
return _orig_whichdb(filename)
# Should work on python2
_orig_whichdb = whichdb
whichdb = my_whichdb
# was changed for python3
#_orig_whichdb = whichdb.whichdb
#dbm.whichdb = my_whichdb
def my_import(mname):
import imp
if '.' in mname:
i = mname.rfind('.')
parent = my_import(mname[:i])
fp, pathname, description = imp.find_module(mname[i+1:],
parent.__path__)
else:
fp, pathname, description = imp.find_module(mname)
return imp.load_module(mname, fp, pathname, description)
class Flagger(object):
default_value = 1
def __setitem__(self, item, value):
self.__dict__[item] = value
self.default_value = 0
def __getitem__(self, item):
return self.__dict__.get(item, self.default_value)
Do_Call = None
Print_Directories = []
Print_Entries = []
Print_Flags = Flagger()
Verbose = 0
Readable = 0
Warns = 0
def default_mapper(entry, name):
"""
Stringify an entry that doesn't have an explicit mapping.
Args:
entry: entry
name: field name
Returns: str
"""
try:
val = eval("entry." + name)
except AttributeError:
val = None
if sys.version_info.major >= 3 and isinstance(val, bytes):
# This is a dirty hack for py 2/3 compatibility. csig is a bytes object
# in Python3 while Python2 bytes are str. Hence, we decode the csig to a
# Python3 string
val = val.decode()
return str(val)
def map_action(entry, _):
"""
Stringify an action entry and signature.
Args:
entry: action entry
second argument is not used
Returns: str
"""
try:
bact = entry.bact
bactsig = entry.bactsig
except AttributeError:
return None
return '%s [%s]' % (bactsig, bact)
def map_timestamp(entry, _):
"""
Stringify a timestamp entry.
Args:
entry: timestamp entry
second argument is not used
Returns: str
"""
try:
timestamp = entry.timestamp
except AttributeError:
timestamp = None
if Readable and timestamp:
return "'" + time.ctime(timestamp) + "'"
else:
return str(timestamp)
def map_bkids(entry, _):
"""
Stringify an implicit entry.
Args:
entry:
second argument is not used
Returns: str
"""
try:
bkids = entry.bsources + entry.bdepends + entry.bimplicit
bkidsigs = entry.bsourcesigs + entry.bdependsigs + entry.bimplicitsigs
except AttributeError:
return None
if len(bkids) != len(bkidsigs):
global Warns
Warns += 1
# add warning to result rather than direct print so it will line up
msg = "Warning: missing information, {} ids but {} sigs"
result = [msg.format(len(bkids), len(bkidsigs))]
else:
result = []
result += [nodeinfo_string(bkid, bkidsig, " ")
for bkid, bkidsig in zip(bkids, bkidsigs)]
if not result:
return None
return "\n ".join(result)
map_field = {
'action' : map_action,
'timestamp' : map_timestamp,
'bkids' : map_bkids,
}
map_name = {
'implicit' : 'bkids',
}
def field(name, entry, verbose=Verbose):
if not Print_Flags[name]:
return None
fieldname = map_name.get(name, name)
mapper = map_field.get(fieldname, default_mapper)
val = mapper(entry, name)
if verbose:
val = name + ": " + val
return val
def nodeinfo_raw(name, ninfo, prefix=""):
# This just formats the dictionary, which we would normally use str()
# to do, except that we want the keys sorted for deterministic output.
d = ninfo.__getstate__()
try:
keys = ninfo.field_list + ['_version_id']
except AttributeError:
keys = sorted(d.keys())
l = []
for k in keys:
l.append('%s: %s' % (repr(k), repr(d.get(k))))
if '\n' in name:
name = repr(name)
return name + ': {' + ', '.join(l) + '}'
def nodeinfo_cooked(name, ninfo, prefix=""):
try:
field_list = ninfo.field_list
except AttributeError:
field_list = []
if '\n' in name:
name = repr(name)
outlist = [name + ':'] + [
f for f in [field(x, ninfo, Verbose) for x in field_list] if f
]
if Verbose:
sep = '\n ' + prefix
else:
sep = ' '
return sep.join(outlist)
nodeinfo_string = nodeinfo_cooked
def printfield(name, entry, prefix=""):
outlist = field("implicit", entry, 0)
if outlist:
if Verbose:
print(" implicit:")
print(" " + outlist)
outact = field("action", entry, 0)
if outact:
if Verbose:
print(" action: " + outact)
else:
print(" " + outact)
def printentries(entries, location):
if Print_Entries:
for name in Print_Entries:
try:
entry = entries[name]
except KeyError:
err = "sconsign: no entry `%s' in `%s'\n" % (name, location)
sys.stderr.write(err)
else:
try:
ninfo = entry.ninfo
except AttributeError:
print(name + ":")
else:
print(nodeinfo_string(name, entry.ninfo))
printfield(name, entry.binfo)
else:
for name in sorted(entries.keys()):
entry = entries[name]
try:
ninfo = entry.ninfo
except AttributeError:
print(name + ":")
else:
print(nodeinfo_string(name, entry.ninfo))
printfield(name, entry.binfo)
class Do_SConsignDB(object):
def __init__(self, dbm_name, dbm):
self.dbm_name = dbm_name
self.dbm = dbm
def __call__(self, fname):
# The *dbm modules stick their own file suffixes on the names
# that are passed in. This causes us to jump through some
# hoops here.
try:
# Try opening the specified file name. Example:
# SPECIFIED OPENED BY self.dbm.open()
# --------- -------------------------
# .sconsign => .sconsign.dblite
# .sconsign.dblite => .sconsign.dblite.dblite
db = self.dbm.open(fname, "r")
except (IOError, OSError) as e:
print_e = e
try:
# That didn't work, so try opening the base name,
# so that if they actually passed in 'sconsign.dblite'
# (for example), the dbm module will put the suffix back
# on for us and open it anyway.
db = self.dbm.open(os.path.splitext(fname)[0], "r")
except (IOError, OSError):
# That didn't work either. See if the file name
# they specified even exists (independent of the dbm
# suffix-mangling).
try:
with open(fname, "rb"):
pass # this is a touch only, we don't use it here.
except (IOError, OSError) as e:
# Nope, that file doesn't even exist, so report that
# fact back.
print_e = e
sys.stderr.write("sconsign: %s\n" % print_e)
return
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s'\n"
% (self.dbm_name, fname))
return
except Exception as e:
sys.stderr.write("sconsign: ignoring invalid `%s' file `%s': %s\n"
% (self.dbm_name, fname, e))
exc_type, _, _ = sys.exc_info()
if exc_type.__name__ == "ValueError" and sys.version_info < (3,0,0):
sys.stderr.write("Python 2 only supports pickle protocols 0-2.\n")
return
if Print_Directories:
for dir in Print_Directories:
try:
val = db[dir]
except KeyError:
err = "sconsign: no dir `%s' in `%s'\n" % (dir, args[0])
sys.stderr.write(err)
else:
self.printentries(dir, val)
else:
for dir in sorted(db.keys()):
self.printentries(dir, db[dir])
@staticmethod
def printentries(dir, val):
try:
print('=== ' + dir + ':')
except TypeError:
print('=== ' + dir.decode() + ':')
printentries(pickle.loads(val), dir)
def Do_SConsignDir(name):
try:
with open(name, 'rb') as fp:
try:
sconsign = SCons.SConsign.Dir(fp)
except KeyboardInterrupt:
raise
except pickle.UnpicklingError:
err = "sconsign: ignoring invalid .sconsign file `%s'\n" % name
sys.stderr.write(err)
return
except Exception as e:
err = "sconsign: ignoring invalid .sconsign file `%s': %s\n" % (name, e)
sys.stderr.write(err)
return
printentries(sconsign.entries, args[0])
except (IOError, OSError) as e:
sys.stderr.write("sconsign: %s\n" % e)
return
##############################################################################
import getopt
helpstr = """\
Usage: sconsign [OPTIONS] [FILE ...]
Options:
-a, --act, --action Print build action information.
-c, --csig Print content signature information.
-d DIR, --dir=DIR Print only info about DIR.
-e ENTRY, --entry=ENTRY Print only info about ENTRY.
-f FORMAT, --format=FORMAT FILE is in the specified FORMAT.
-h, --help Print this message and exit.
-i, --implicit Print implicit dependency information.
-r, --readable Print timestamps in human-readable form.
--raw Print raw Python object representations.
-s, --size Print file sizes.
-t, --timestamp Print timestamp information.
-v, --verbose Verbose, describe each field.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "acd:e:f:hirstv",
['act', 'action',
'csig', 'dir=', 'entry=',
'format=', 'help', 'implicit',
'raw', 'readable',
'size', 'timestamp', 'verbose'])
except getopt.GetoptError as err:
sys.stderr.write(str(err) + '\n')
print(helpstr)
sys.exit(2)
for o, a in opts:
if o in ('-a', '--act', '--action'):
Print_Flags['action'] = 1
elif o in ('-c', '--csig'):
Print_Flags['csig'] = 1
elif o in ('-d', '--dir'):
Print_Directories.append(a)
elif o in ('-e', '--entry'):
Print_Entries.append(a)
elif o in ('-f', '--format'):
# Try to map the given DB format to a known module
# name, that we can then try to import...
Module_Map = {'dblite': 'SCons.dblite', 'sconsign': None}
dbm_name = Module_Map.get(a, a)
if dbm_name:
try:
if dbm_name != "SCons.dblite":
dbm = my_import(dbm_name)
else:
import SCons.dblite
dbm = SCons.dblite
# Ensure that we don't ignore corrupt DB files,
# this was handled by calling my_import('SCons.dblite')
# again in earlier versions...
SCons.dblite.ignore_corrupt_dbfiles = 0
except ImportError:
sys.stderr.write("sconsign: illegal file format `%s'\n" % a)
print(helpstr)
sys.exit(2)
Do_Call = Do_SConsignDB(a, dbm)
else:
Do_Call = Do_SConsignDir
elif o in ('-h', '--help'):
print(helpstr)
sys.exit(0)
elif o in ('-i', '--implicit'):
Print_Flags['implicit'] = 1
elif o in ('--raw',):
nodeinfo_string = nodeinfo_raw
elif o in ('-r', '--readable'):
Readable = 1
elif o in ('-s', '--size'):
Print_Flags['size'] = 1
elif o in ('-t', '--timestamp'):
Print_Flags['timestamp'] = 1
elif o in ('-v', '--verbose'):
Verbose = 1
if Do_Call:
for a in args:
Do_Call(a)
else:
if not args:
args = [".sconsign.dblite"]
for a in args:
dbm_name = whichdb(a)
if dbm_name:
Map_Module = {'SCons.dblite': 'dblite'}
if dbm_name != "SCons.dblite":
dbm = my_import(dbm_name)
else:
import SCons.dblite
dbm = SCons.dblite
# Ensure that we don't ignore corrupt DB files,
# this was handled by calling my_import('SCons.dblite')
# again in earlier versions...
SCons.dblite.ignore_corrupt_dbfiles = 0
Do_SConsignDB(Map_Module.get(dbm_name, dbm_name), dbm)(a)
else:
Do_SConsignDir(a)
if Warns:
print("NOTE: there were %d warnings, please check output" % Warns)
sys.exit(0)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| en | 0.692321 | #! /usr/bin/env python # # SCons - a Software Constructor # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ############################################################################## # BEGIN STANDARD SCons SCRIPT HEADER # # This is the cut-and-paste logic so that a self-contained script can # interoperate correctly with different SCons versions and installation # locations for the engine. If you modify anything in this section, you # should also change other scripts that use this same header. ############################################################################## # compatibility check # Strip the script directory from sys.path so on case-insensitive # (WIN32) systems Python doesn't think that the "scons" script is the # "SCons" package. # running from source takes 2nd priority (since 2.3.2), following SCONS_LIB_DIR # add local-install locations # preferred order of scons lookup paths # if we can find package information, use it # Use only sys.prefix on Windows # On other (POSIX) platforms, things are more complicated due to # the variety of path names and library locations. # Build up some possibilities, then transform them into candidates # script_dir is `pwd`/bin; # check `pwd`/lib/scons*. # script_dir is /foo/bin; # check /foo/lib/scons*. # sys.prefix is /foo/usr; # check /foo/usr/lib/scons* first, # then /foo/usr/local/lib/scons*. # sys.prefix is /foo/usr/local; # check /foo/usr/local/lib/scons* first, # then /foo/usr/lib/scons*. # sys.prefix is /foo/local; # check only /foo/local/lib/scons*. # sys.prefix is /foo (ends in neither /usr or /local); # check only /foo/lib/scons*. # suffix these to add to our original prefs: # Add the parent directory of the current python's library to the # preferences. This picks up differences between, e.g., lib and lib64, # and finds the base location in case of a non-copying virtualenv. # Split /usr/libfoo/python*/os.py to /usr/libfoo/python*. # Split /usr/libfoo/python* to /usr/libfoo # Check /usr/libfoo/scons*. # Look first for 'scons-__version__' in all of our preference libs, # then for 'scons'. Skip paths that do not exist. ############################################################################## # END STANDARD SCons SCRIPT HEADER ############################################################################## # Should work on python2 # was changed for python3 #_orig_whichdb = whichdb.whichdb #dbm.whichdb = my_whichdb Stringify an entry that doesn't have an explicit mapping. Args: entry: entry name: field name Returns: str # This is a dirty hack for py 2/3 compatibility. csig is a bytes object # in Python3 while Python2 bytes are str. Hence, we decode the csig to a # Python3 string Stringify an action entry and signature. Args: entry: action entry second argument is not used Returns: str Stringify a timestamp entry. Args: entry: timestamp entry second argument is not used Returns: str Stringify an implicit entry. Args: entry: second argument is not used Returns: str # add warning to result rather than direct print so it will line up # This just formats the dictionary, which we would normally use str() # to do, except that we want the keys sorted for deterministic output. # The *dbm modules stick their own file suffixes on the names # that are passed in. This causes us to jump through some # hoops here. # Try opening the specified file name. Example: # SPECIFIED OPENED BY self.dbm.open() # --------- ------------------------- # .sconsign => .sconsign.dblite # .sconsign.dblite => .sconsign.dblite.dblite # That didn't work, so try opening the base name, # so that if they actually passed in 'sconsign.dblite' # (for example), the dbm module will put the suffix back # on for us and open it anyway. # That didn't work either. See if the file name # they specified even exists (independent of the dbm # suffix-mangling). # this is a touch only, we don't use it here. # Nope, that file doesn't even exist, so report that # fact back. ############################################################################## \ Usage: sconsign [OPTIONS] [FILE ...] Options: -a, --act, --action Print build action information. -c, --csig Print content signature information. -d DIR, --dir=DIR Print only info about DIR. -e ENTRY, --entry=ENTRY Print only info about ENTRY. -f FORMAT, --format=FORMAT FILE is in the specified FORMAT. -h, --help Print this message and exit. -i, --implicit Print implicit dependency information. -r, --readable Print timestamps in human-readable form. --raw Print raw Python object representations. -s, --size Print file sizes. -t, --timestamp Print timestamp information. -v, --verbose Verbose, describe each field. # Try to map the given DB format to a known module # name, that we can then try to import... # Ensure that we don't ignore corrupt DB files, # this was handled by calling my_import('SCons.dblite') # again in earlier versions... # Ensure that we don't ignore corrupt DB files, # this was handled by calling my_import('SCons.dblite') # again in earlier versions... # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4: | 1.91717 | 2 |
dp_tornado/engine/template/example/model/calc/__init__.py | donghak-shin/dp-tornado | 18 | 6631733 | # -*- coding: utf-8 -*-
from dp_tornado.engine.model import Model as dpModel
class CalcModel(dpModel):
pass
| # -*- coding: utf-8 -*-
from dp_tornado.engine.model import Model as dpModel
class CalcModel(dpModel):
pass
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.301772 | 1 |
baselines/convnets-keras/video/datagenerator.py | leix28/ML-Fabri | 0 | 6631734 | <reponame>leix28/ML-Fabri
#!/usr/bin/env python
# encoding: utf-8
# File Name: dataloader.py
import numpy as np
from progressbar import ProgressBar, Percentage, Bar, ETA, RotatingMarker
from keras.utils import np_utils
import cv2
import gc
import random
def test_generator(datasplit, feature, batch_size=32, val=True, nb_classes=14, sz=227):
labels = open(datasplit).readlines()
labels = [item.split() for item in labels]
X, Y, Z = [], [], []
ids = range(len(labels))
random.seed(0)
#random.shuffle(ids)
def f(im):
hf = int(sz/2)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
points = [(360-hf-sz, 480-hf-sz),
(360-hf-sz, 480+hf),
(360-hf, 480-hf),
(360+hf, 480-hf-sz),
(360+hf, 480+hf)
#(360-hf-sz, 480-hf),
#(360-hf, 480-hf-sz),
#(360-hf, 480+hf),
#(360+hf, 480-hf)
#(360-hf-2*sz, 480-hf),
#(360-hf, 480-hf-2*sz),
#(360-hf, 480+hf+sz),
#(360+hf+sz, 480-hf)
]
'''
points = [(360-114, 480-114)]
'''
ret = []
for p in points:
x, y = p
ret.append(im[x:x+sz,y:y+sz])
return ret
cnt = 0
for i in ids:
label = labels[i]
flag = False
if label[2] == '0':
pass
#X_train.append(fea)
#Y_train.append(int(label[1]))
elif label[2] == '1' and val:
flag = True
elif label[2] == '2' and not(val):
flag = True
if flag:
im = cv2.imread('../../../../data/dataset/' + label[0])
fea = f(im)
for j in range(len(fea)):
X.append(fea[j])
Y.append(int(label[1]))
Z.append([label[0], int(label[1]), j])
cnt += 1
if len(X) == batch_size:
X = np.array(X).astype('float32')
X = (X - 128) / 128
Y = np_utils.to_categorical(Y, nb_classes)
yield X, Y, Z
X, Y, Z = [], [], []
if cnt > 300:
pass
#break
def load(datasplit, feature):
labels = open(datasplit).readlines()
labels = [item.split() for item in labels]
#X = np.load(feature)['arr_0']
#print len(labels), len(X)
#assert len(labels) == len(X)
X_train, Y_train, Z_train = [], [], []
X_val, Y_val, Z_val = [], [], []
X_test, Y_test, Z_test = [], [], []
ids = range(len(labels))
random.seed(0)
random.shuffle(ids)
#for label, fea in zip(labels, X):
def f(im):
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
points = [(360-114-227, 480-114-227),
(360-114-227, 480+113),
(360-114, 480-114),
(360+113, 480-114-227),
(360+113, 480+113)]
'''
points = [(360-114, 480-114)]
'''
ret = []
for p in points:
x, y = p
ret.append(im[x:x+227,y:y+227])
return ret
for i in ids:
label = labels[i]
if label[2] == '0':
pass
#X_train.append(fea)
#Y_train.append(int(label[1]))
elif label[2] == '1':
im = cv2.imread('../../../../data/dataset/' + label[0])
fea = f(im)
for j in range(len(fea)):
X_val.append(fea[j])
Y_val.append(int(label[1]))
Z_val.append([label[0], int(label[1]), j])
elif label[2] == '2':
im = cv2.imread('../../../../data/dataset/' + label[0])
fea = f(im)
for j in range(len(fea)):
X_test.append(fea[j])
Y_test.append(int(label[1]))
Z_test.append([label[0], int(label[1]), j])
if len(X_test) % 1000 == 0:
print len(X_test)
#break
print(len(X_train), len(X_val), len(X_test))
#X_train, Y_train = np.array(X_train), np.array(Y_train)
X_val, Y_val = np.array(X_val), np.array(Y_val)
X_test, Y_test = np.array(X_test), np.array(Y_test)
fout = open('y_test_label.txt', 'w')
for i in range(len(Y_test)):
print >>fout, Y_test[i]
fout.flush()
return (X_train, Y_train, Z_train), (X_val, Y_val, Z_val), (X_test, Y_test, Z_test)
def center():
def f(img):
#return img[360-112:360+112, 480-112:480+112]
return img[360-114:360+113, 480-114:480+113]
return 'center_227x227', f
def rescaled(d):
def f(img):
#return cv2.resize(img[360-d:360+d, 480-d:480+d], (224, 224))
return cv2.resize(img[360-d:360+d, 480-d:480+d], (227, 227))
return 'rescaled_{}_227x227'.format(d), f
def data_gen(datasplit, d=300, sz=227, batch_size=32, datagen=None, nb_classes=14):
gc.collect()
def f(img):
#return cv2.resize(img[360-d:360+d, 480-d:480+d], (224, 224))
x = random.randint(360-d, 360+d-sz)
y = random.randint(360-d, 360+d-sz)
#x = 360-114
#y = 480-114
return img[x:x+sz, y:y+sz]
labels = open(datasplit).readlines()
labels = [item.rstrip().split() for item in labels]
random.seed(0)
random.shuffle(labels)
X = []
y = []
while True:
for c, label in enumerate(labels):
if label[2] == '0':
fn = '../../../../data/dataset/' + label[0]
img = cv2.imread(fn)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
x = f(img)
X.append(x)
y.append(int(label[1]))
if len(X) == batch_size:
y = np_utils.to_categorical(y, nb_classes)
X = np.array(X).astype('float32')
X = (X-128)/128.0
if datagen:
X, y = next(datagen.flow(X, y, batch_size=batch_size))
yield X, y
del X, y
X = []
y = []
if __name__ == '__main__':
#save_images('../../data_preprocessing/material_dataset.txt', center())
#save_images('../../data_preprocessing/material_dataset.txt', rescaled(360))
#save_images('../../data_preprocessing/material_dataset.txt', rescaled(180))
#load('../../../data_preprocessing/material_dataset.txt', '../../../../storage/center_227x227.npz')
print next(data_gen('../../../data_preprocessing/material_dataset.txt'))
| #!/usr/bin/env python
# encoding: utf-8
# File Name: dataloader.py
import numpy as np
from progressbar import ProgressBar, Percentage, Bar, ETA, RotatingMarker
from keras.utils import np_utils
import cv2
import gc
import random
def test_generator(datasplit, feature, batch_size=32, val=True, nb_classes=14, sz=227):
labels = open(datasplit).readlines()
labels = [item.split() for item in labels]
X, Y, Z = [], [], []
ids = range(len(labels))
random.seed(0)
#random.shuffle(ids)
def f(im):
hf = int(sz/2)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
points = [(360-hf-sz, 480-hf-sz),
(360-hf-sz, 480+hf),
(360-hf, 480-hf),
(360+hf, 480-hf-sz),
(360+hf, 480+hf)
#(360-hf-sz, 480-hf),
#(360-hf, 480-hf-sz),
#(360-hf, 480+hf),
#(360+hf, 480-hf)
#(360-hf-2*sz, 480-hf),
#(360-hf, 480-hf-2*sz),
#(360-hf, 480+hf+sz),
#(360+hf+sz, 480-hf)
]
'''
points = [(360-114, 480-114)]
'''
ret = []
for p in points:
x, y = p
ret.append(im[x:x+sz,y:y+sz])
return ret
cnt = 0
for i in ids:
label = labels[i]
flag = False
if label[2] == '0':
pass
#X_train.append(fea)
#Y_train.append(int(label[1]))
elif label[2] == '1' and val:
flag = True
elif label[2] == '2' and not(val):
flag = True
if flag:
im = cv2.imread('../../../../data/dataset/' + label[0])
fea = f(im)
for j in range(len(fea)):
X.append(fea[j])
Y.append(int(label[1]))
Z.append([label[0], int(label[1]), j])
cnt += 1
if len(X) == batch_size:
X = np.array(X).astype('float32')
X = (X - 128) / 128
Y = np_utils.to_categorical(Y, nb_classes)
yield X, Y, Z
X, Y, Z = [], [], []
if cnt > 300:
pass
#break
def load(datasplit, feature):
labels = open(datasplit).readlines()
labels = [item.split() for item in labels]
#X = np.load(feature)['arr_0']
#print len(labels), len(X)
#assert len(labels) == len(X)
X_train, Y_train, Z_train = [], [], []
X_val, Y_val, Z_val = [], [], []
X_test, Y_test, Z_test = [], [], []
ids = range(len(labels))
random.seed(0)
random.shuffle(ids)
#for label, fea in zip(labels, X):
def f(im):
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
points = [(360-114-227, 480-114-227),
(360-114-227, 480+113),
(360-114, 480-114),
(360+113, 480-114-227),
(360+113, 480+113)]
'''
points = [(360-114, 480-114)]
'''
ret = []
for p in points:
x, y = p
ret.append(im[x:x+227,y:y+227])
return ret
for i in ids:
label = labels[i]
if label[2] == '0':
pass
#X_train.append(fea)
#Y_train.append(int(label[1]))
elif label[2] == '1':
im = cv2.imread('../../../../data/dataset/' + label[0])
fea = f(im)
for j in range(len(fea)):
X_val.append(fea[j])
Y_val.append(int(label[1]))
Z_val.append([label[0], int(label[1]), j])
elif label[2] == '2':
im = cv2.imread('../../../../data/dataset/' + label[0])
fea = f(im)
for j in range(len(fea)):
X_test.append(fea[j])
Y_test.append(int(label[1]))
Z_test.append([label[0], int(label[1]), j])
if len(X_test) % 1000 == 0:
print len(X_test)
#break
print(len(X_train), len(X_val), len(X_test))
#X_train, Y_train = np.array(X_train), np.array(Y_train)
X_val, Y_val = np.array(X_val), np.array(Y_val)
X_test, Y_test = np.array(X_test), np.array(Y_test)
fout = open('y_test_label.txt', 'w')
for i in range(len(Y_test)):
print >>fout, Y_test[i]
fout.flush()
return (X_train, Y_train, Z_train), (X_val, Y_val, Z_val), (X_test, Y_test, Z_test)
def center():
def f(img):
#return img[360-112:360+112, 480-112:480+112]
return img[360-114:360+113, 480-114:480+113]
return 'center_227x227', f
def rescaled(d):
def f(img):
#return cv2.resize(img[360-d:360+d, 480-d:480+d], (224, 224))
return cv2.resize(img[360-d:360+d, 480-d:480+d], (227, 227))
return 'rescaled_{}_227x227'.format(d), f
def data_gen(datasplit, d=300, sz=227, batch_size=32, datagen=None, nb_classes=14):
gc.collect()
def f(img):
#return cv2.resize(img[360-d:360+d, 480-d:480+d], (224, 224))
x = random.randint(360-d, 360+d-sz)
y = random.randint(360-d, 360+d-sz)
#x = 360-114
#y = 480-114
return img[x:x+sz, y:y+sz]
labels = open(datasplit).readlines()
labels = [item.rstrip().split() for item in labels]
random.seed(0)
random.shuffle(labels)
X = []
y = []
while True:
for c, label in enumerate(labels):
if label[2] == '0':
fn = '../../../../data/dataset/' + label[0]
img = cv2.imread(fn)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
x = f(img)
X.append(x)
y.append(int(label[1]))
if len(X) == batch_size:
y = np_utils.to_categorical(y, nb_classes)
X = np.array(X).astype('float32')
X = (X-128)/128.0
if datagen:
X, y = next(datagen.flow(X, y, batch_size=batch_size))
yield X, y
del X, y
X = []
y = []
if __name__ == '__main__':
#save_images('../../data_preprocessing/material_dataset.txt', center())
#save_images('../../data_preprocessing/material_dataset.txt', rescaled(360))
#save_images('../../data_preprocessing/material_dataset.txt', rescaled(180))
#load('../../../data_preprocessing/material_dataset.txt', '../../../../storage/center_227x227.npz')
print next(data_gen('../../../data_preprocessing/material_dataset.txt')) | en | 0.292815 | #!/usr/bin/env python # encoding: utf-8 # File Name: dataloader.py #random.shuffle(ids) #(360-hf-sz, 480-hf), #(360-hf, 480-hf-sz), #(360-hf, 480+hf), #(360+hf, 480-hf) #(360-hf-2*sz, 480-hf), #(360-hf, 480-hf-2*sz), #(360-hf, 480+hf+sz), #(360+hf+sz, 480-hf) points = [(360-114, 480-114)] #X_train.append(fea) #Y_train.append(int(label[1])) #break #X = np.load(feature)['arr_0'] #print len(labels), len(X) #assert len(labels) == len(X) #for label, fea in zip(labels, X): points = [(360-114, 480-114)] #X_train.append(fea) #Y_train.append(int(label[1])) #break #X_train, Y_train = np.array(X_train), np.array(Y_train) #return img[360-112:360+112, 480-112:480+112] #return cv2.resize(img[360-d:360+d, 480-d:480+d], (224, 224)) #return cv2.resize(img[360-d:360+d, 480-d:480+d], (224, 224)) #x = 360-114 #y = 480-114 #save_images('../../data_preprocessing/material_dataset.txt', center()) #save_images('../../data_preprocessing/material_dataset.txt', rescaled(360)) #save_images('../../data_preprocessing/material_dataset.txt', rescaled(180)) #load('../../../data_preprocessing/material_dataset.txt', '../../../../storage/center_227x227.npz') | 2.488924 | 2 |
python/year2018/tests/test_sol08.py | dhruvmanila/advent-of-code | 2 | 6631735 | import utils
from year2018.sol08 import Node
def test_node():
data = utils.read(day=8, year=2018, test=True)
datastream = map(int, data.split())
root = Node.from_datastream(datastream)
assert root.checksum == 138
assert root.value == 66
| import utils
from year2018.sol08 import Node
def test_node():
data = utils.read(day=8, year=2018, test=True)
datastream = map(int, data.split())
root = Node.from_datastream(datastream)
assert root.checksum == 138
assert root.value == 66
| none | 1 | 2.479642 | 2 |
|
fourlang/service/parser_wrapper.py | recski/wikt2def | 0 | 6631736 | import stanfordnlp
import os
class ParserWrapper(object):
def set_parser(self, language):
language_model = {"en": "en_ewt", "it": "it_isdt", "de": "de_gsd", "tr": "tr_imst", "hr": "hr_set"}
if not os.path.exists(os.path.join(os.path.expanduser("~"),
"stanfordnlp_resources/{}_models".format(language_model[language]))):
stanfordnlp.download(language, confirm_if_exists=True)
self.nlp = stanfordnlp.Pipeline(lang=language)
| import stanfordnlp
import os
class ParserWrapper(object):
def set_parser(self, language):
language_model = {"en": "en_ewt", "it": "it_isdt", "de": "de_gsd", "tr": "tr_imst", "hr": "hr_set"}
if not os.path.exists(os.path.join(os.path.expanduser("~"),
"stanfordnlp_resources/{}_models".format(language_model[language]))):
stanfordnlp.download(language, confirm_if_exists=True)
self.nlp = stanfordnlp.Pipeline(lang=language)
| none | 1 | 2.63274 | 3 |
|
LintCode/uncategorized/380. Intersection of Two Linked Lists/.ipynb_checkpoints/solution-checkpoint.py | vincent507cpu/Comprehensive-Algorithm-Solution | 4 | 6631737 | <gh_stars>1-10
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param headA: the first list
@param headB: the second list
@return: a ListNode
"""
def getIntersectionNode(self, headA, headB):
# write your code here
if not headA or not headB:
return None
a, b = headA, headB
while a != b:
a = a.next if a else headB
b = b.next if b else headA
return a | """
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param headA: the first list
@param headB: the second list
@return: a ListNode
"""
def getIntersectionNode(self, headA, headB):
# write your code here
if not headA or not headB:
return None
a, b = headA, headB
while a != b:
a = a.next if a else headB
b = b.next if b else headA
return a | en | 0.476702 | Definition of ListNode class ListNode(object): def __init__(self, val, next=None): self.val = val self.next = next @param headA: the first list @param headB: the second list @return: a ListNode # write your code here | 3.814061 | 4 |
setup.py | danielgoqueiroz/preview-generator | 0 | 6631738 | <reponame>danielgoqueiroz/preview-generator
# -*- coding: utf-8 -*-
# python setup.py sdist upload -r pypi
import os
import sys
from typing import List
from preview_generator import infos
py_version = sys.version_info[:2]
try:
from setuptools import find_packages
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
try:
documentation = open(os.path.join(here, "README.rst")).read()
except IOError:
documentation = ""
except UnicodeDecodeError:
documentation = ""
testpkgs = [] # type: List[str]
install_requires = [
"python-magic",
"Wand",
"PyPDF2",
"pyexifinfo",
"xvfbwrapper",
"pathlib",
"pdf2image",
"cairosvg",
"ffmpeg-python",
"filelock",
]
if py_version <= (3, 5):
# NOTE - SG - 2021-04-19 - python 3.5 is dropped starting with 8.0.0
install_requires.append("Pillow<8.0.0")
else:
install_requires.append("Pillow")
tests_require = ["pytest"]
devtools_require = ["flake8", "isort", "mypy", "pre-commit"]
# add black for python 3.6+
if sys.version_info.major == 3 and sys.version_info.minor >= 6:
devtools_require.append("black")
if py_version <= (3, 4):
install_requires.append("typing")
# TODO - G.M - 2019-11-05 - restore vtk as normal requirement, vtk is not compatible
# with current version of vtk see https://gitlab.kitware.com/vtk/vtk/issues/17670,
if py_version < (3, 8):
install_requires.append("vtk")
setup(
name="preview_generator",
version=infos.__version__,
description=(
"A library for generating preview (thumbnails, text or json overview) "
"for file-based content"
),
long_description=documentation,
author="Algoo",
author_email="<EMAIL>",
url="https://github.com/algoo/preview-generator",
download_url=(
"https://github.com/algoo/preview-generator/archive/release_{}.tar.gz".format(
infos.__version__
)
),
keywords=["preview", "preview_generator", "thumbnail", "cache"],
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages=find_packages(exclude=["ez_setup"]),
install_requires=install_requires,
python_requires=">= 3.5",
include_package_data=True,
extras_require={"testing": tests_require, "dev": tests_require + devtools_require},
test_suite="py.test", # TODO : change test_suite
tests_require=testpkgs,
package_data={"preview_generator": ["i18n/*/LC_MESSAGES/*.mo", "templates/*/*", "public/*/*"]},
entry_points={"console_scripts": ["preview = preview_generator.__main__:main"]},
)
| # -*- coding: utf-8 -*-
# python setup.py sdist upload -r pypi
import os
import sys
from typing import List
from preview_generator import infos
py_version = sys.version_info[:2]
try:
from setuptools import find_packages
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import find_packages
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
try:
documentation = open(os.path.join(here, "README.rst")).read()
except IOError:
documentation = ""
except UnicodeDecodeError:
documentation = ""
testpkgs = [] # type: List[str]
install_requires = [
"python-magic",
"Wand",
"PyPDF2",
"pyexifinfo",
"xvfbwrapper",
"pathlib",
"pdf2image",
"cairosvg",
"ffmpeg-python",
"filelock",
]
if py_version <= (3, 5):
# NOTE - SG - 2021-04-19 - python 3.5 is dropped starting with 8.0.0
install_requires.append("Pillow<8.0.0")
else:
install_requires.append("Pillow")
tests_require = ["pytest"]
devtools_require = ["flake8", "isort", "mypy", "pre-commit"]
# add black for python 3.6+
if sys.version_info.major == 3 and sys.version_info.minor >= 6:
devtools_require.append("black")
if py_version <= (3, 4):
install_requires.append("typing")
# TODO - G.M - 2019-11-05 - restore vtk as normal requirement, vtk is not compatible
# with current version of vtk see https://gitlab.kitware.com/vtk/vtk/issues/17670,
if py_version < (3, 8):
install_requires.append("vtk")
setup(
name="preview_generator",
version=infos.__version__,
description=(
"A library for generating preview (thumbnails, text or json overview) "
"for file-based content"
),
long_description=documentation,
author="Algoo",
author_email="<EMAIL>",
url="https://github.com/algoo/preview-generator",
download_url=(
"https://github.com/algoo/preview-generator/archive/release_{}.tar.gz".format(
infos.__version__
)
),
keywords=["preview", "preview_generator", "thumbnail", "cache"],
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages=find_packages(exclude=["ez_setup"]),
install_requires=install_requires,
python_requires=">= 3.5",
include_package_data=True,
extras_require={"testing": tests_require, "dev": tests_require + devtools_require},
test_suite="py.test", # TODO : change test_suite
tests_require=testpkgs,
package_data={"preview_generator": ["i18n/*/LC_MESSAGES/*.mo", "templates/*/*", "public/*/*"]},
entry_points={"console_scripts": ["preview = preview_generator.__main__:main"]},
) | en | 0.725888 | # -*- coding: utf-8 -*- # python setup.py sdist upload -r pypi # type: List[str] # NOTE - SG - 2021-04-19 - python 3.5 is dropped starting with 8.0.0 # add black for python 3.6+ # TODO - G.M - 2019-11-05 - restore vtk as normal requirement, vtk is not compatible # with current version of vtk see https://gitlab.kitware.com/vtk/vtk/issues/17670, # TODO : change test_suite | 1.586563 | 2 |
usaspending_api/etl/management/commands/load_cleanup.py | truthiswill/usaspending-api | 0 | 6631739 | """
Cleans up the model description fields.
"""
from django.core.management.base import BaseCommand
from usaspending_api.etl.helpers import update_model_description_fields
import logging
class Command(BaseCommand):
help = "Cleaning up model description fields takes a long time, so use this to update them if you are using" \
"--noclean to skip this during load_submission."
def handle(self, *args, **options):
logger = logging.getLogger('console')
logger.info('Updating model description fields...')
update_model_description_fields()
logger.info('SKIPPING - Done in load_base - Updating awards to reflect their latest associated transaction '
'info...')
# update_awards()
logger.info('SKIPPING - Done in load_base - Updating contract-specific awards to reflect their latest '
'transaction info...')
# update_contract_awards()
logger.info('SKIPPING - Done in load_base - Updating award category variables...')
# update_award_categories()
| """
Cleans up the model description fields.
"""
from django.core.management.base import BaseCommand
from usaspending_api.etl.helpers import update_model_description_fields
import logging
class Command(BaseCommand):
help = "Cleaning up model description fields takes a long time, so use this to update them if you are using" \
"--noclean to skip this during load_submission."
def handle(self, *args, **options):
logger = logging.getLogger('console')
logger.info('Updating model description fields...')
update_model_description_fields()
logger.info('SKIPPING - Done in load_base - Updating awards to reflect their latest associated transaction '
'info...')
# update_awards()
logger.info('SKIPPING - Done in load_base - Updating contract-specific awards to reflect their latest '
'transaction info...')
# update_contract_awards()
logger.info('SKIPPING - Done in load_base - Updating award category variables...')
# update_award_categories()
| en | 0.115002 | Cleans up the model description fields. # update_awards() # update_contract_awards() # update_award_categories() | 1.992255 | 2 |
demos/common/python/html_reader.py | APrigarina/open_model_zoo | 1,031 | 6631740 | import urllib.request
import re
from html.parser import HTMLParser
import logging as log
class HTMLDataExtractor(HTMLParser):
def __init__(self, tags):
super(HTMLDataExtractor, self).__init__()
self.started_tags = {k: [] for k in tags}
self.ended_tags = {k: [] for k in tags}
def handle_starttag(self, tag, attrs):
if tag in self.started_tags:
self.started_tags[tag].append([])
def handle_endtag(self, tag):
if tag in self.ended_tags:
txt = ''.join(self.started_tags[tag].pop())
self.ended_tags[tag].append(txt)
def handle_data(self, data):
for tag, l in self.started_tags.items():
for d in l:
d.append(data)
# read html urls and list of all paragraphs data
def get_paragraphs(url_list):
paragraphs_all = []
for url in url_list:
log.info("Get paragraphs from {}".format(url))
with urllib.request.urlopen(url) as response:
parser = HTMLDataExtractor(['title', 'p'])
charset='utf-8'
if 'Content-type' in response.headers:
m = re.match(r'.*charset=(\S+).*', response.headers['Content-type'])
if m:
charset = m.group(1)
data = response.read()
parser.feed(data.decode(charset))
title = ' '.join(parser.ended_tags['title'])
paragraphs = parser.ended_tags['p']
log.info("Page '{}' has {} chars in {} paragraphs".format(title, sum(len(p) for p in paragraphs), len(paragraphs)))
paragraphs_all.extend(paragraphs)
return paragraphs_all
| import urllib.request
import re
from html.parser import HTMLParser
import logging as log
class HTMLDataExtractor(HTMLParser):
def __init__(self, tags):
super(HTMLDataExtractor, self).__init__()
self.started_tags = {k: [] for k in tags}
self.ended_tags = {k: [] for k in tags}
def handle_starttag(self, tag, attrs):
if tag in self.started_tags:
self.started_tags[tag].append([])
def handle_endtag(self, tag):
if tag in self.ended_tags:
txt = ''.join(self.started_tags[tag].pop())
self.ended_tags[tag].append(txt)
def handle_data(self, data):
for tag, l in self.started_tags.items():
for d in l:
d.append(data)
# read html urls and list of all paragraphs data
def get_paragraphs(url_list):
paragraphs_all = []
for url in url_list:
log.info("Get paragraphs from {}".format(url))
with urllib.request.urlopen(url) as response:
parser = HTMLDataExtractor(['title', 'p'])
charset='utf-8'
if 'Content-type' in response.headers:
m = re.match(r'.*charset=(\S+).*', response.headers['Content-type'])
if m:
charset = m.group(1)
data = response.read()
parser.feed(data.decode(charset))
title = ' '.join(parser.ended_tags['title'])
paragraphs = parser.ended_tags['p']
log.info("Page '{}' has {} chars in {} paragraphs".format(title, sum(len(p) for p in paragraphs), len(paragraphs)))
paragraphs_all.extend(paragraphs)
return paragraphs_all
| en | 0.851803 | # read html urls and list of all paragraphs data | 2.981181 | 3 |
spidermon/contrib/monitors/mixins/spider.py | zanachka/spidermon | 405 | 6631741 | <reponame>zanachka/spidermon
from spidermon.contrib.stats.analyzer import StatsAnalyzer
from spidermon.contrib.stats.counters import DictPercentCounter, PercentCounter
from spidermon.exceptions import NotConfigured
from .job import JobMonitorMixin
from .stats import StatsMonitorMixin
DOWNLOADER_RESPONSE_COUNT = "downloader/response_count"
DOWNLOADER_RESPONSE_STATUS = "downloader/response_status_count/"
DOWNLOADER_STATUS_CODES_INFORMATIONAL = [r"1\d{2}$"]
DOWNLOADER_STATUS_CODES_SUCCESSFUL = [r"2\d{2}$"]
DOWNLOADER_STATUS_CODES_REDIRECTIONS = [r"3\d{2}$"]
DOWNLOADER_STATUS_CODES_BAD_REQUESTS = [r"4\d{2}$"]
DOWNLOADER_STATUS_CODES_INTERNAL_SERVER_ERRORS = [r"5\d{2}$"]
DOWNLOADER_STATUS_CODES_OTHERS = ["[^1-5].*$"]
DOWNLOADER_STATUS_CODES_ERRORS = (
DOWNLOADER_STATUS_CODES_BAD_REQUESTS
+ DOWNLOADER_STATUS_CODES_INTERNAL_SERVER_ERRORS
)
class ResponsesInfo:
def __init__(self, stats):
self._stats_analyzer = StatsAnalyzer(stats=stats)
self.count = self._stats_analyzer.search(DOWNLOADER_RESPONSE_COUNT + "$").get(
DOWNLOADER_RESPONSE_COUNT, 0
)
# all status codes
self.all = DictPercentCounter(total=self.count)
self._add_status_codes(pattern=None, target=self.all)
# 1xx. informational
self.informational = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_INFORMATIONAL, target=self.informational
)
# 2xx. successful
self.successful = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_SUCCESSFUL, target=self.successful
)
# 3xx. redirections
self.redirections = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_REDIRECTIONS, target=self.redirections
)
# 4xx. bad requests
self.bad_requests = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_BAD_REQUESTS, target=self.bad_requests
)
# 5xx. internal server errors
self.internal_server_errors = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_INTERNAL_SERVER_ERRORS,
target=self.internal_server_errors,
)
# >= 6xx. others
self.others = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_OTHERS, target=self.others
)
# errors (4xx + 5xx)
self.errors = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_ERRORS, target=self.errors
)
def _add_status_codes(self, pattern, target):
for code, counter in self._get_response_codes(pattern).items():
target.add_value(code, counter.count)
def _get_response_codes(self, codes=None):
codes = codes or ["[^/]+"]
return_codes = {}
for code in codes:
return_codes.update(self._get_response_code(code))
return return_codes
def _get_response_code(self, code):
return {
code: PercentCounter(count, self.count)
for count, code in self._stats_analyzer.search(
pattern=DOWNLOADER_RESPONSE_STATUS + ("(%s)$" % code),
include_matches=True,
).values()
}
class SpiderMonitorMixin(StatsMonitorMixin, JobMonitorMixin):
@property
def crawler(self):
if not self.data.crawler:
raise NotConfigured("Crawler not available!")
return self.data.crawler
@property
def spider(self):
if not self.data.spider:
raise NotConfigured("Spider not available!")
return self.data.spider
@property
def responses(self):
if not hasattr(self, "_responses"):
self._responses = ResponsesInfo(self.stats)
return self._responses
| from spidermon.contrib.stats.analyzer import StatsAnalyzer
from spidermon.contrib.stats.counters import DictPercentCounter, PercentCounter
from spidermon.exceptions import NotConfigured
from .job import JobMonitorMixin
from .stats import StatsMonitorMixin
DOWNLOADER_RESPONSE_COUNT = "downloader/response_count"
DOWNLOADER_RESPONSE_STATUS = "downloader/response_status_count/"
DOWNLOADER_STATUS_CODES_INFORMATIONAL = [r"1\d{2}$"]
DOWNLOADER_STATUS_CODES_SUCCESSFUL = [r"2\d{2}$"]
DOWNLOADER_STATUS_CODES_REDIRECTIONS = [r"3\d{2}$"]
DOWNLOADER_STATUS_CODES_BAD_REQUESTS = [r"4\d{2}$"]
DOWNLOADER_STATUS_CODES_INTERNAL_SERVER_ERRORS = [r"5\d{2}$"]
DOWNLOADER_STATUS_CODES_OTHERS = ["[^1-5].*$"]
DOWNLOADER_STATUS_CODES_ERRORS = (
DOWNLOADER_STATUS_CODES_BAD_REQUESTS
+ DOWNLOADER_STATUS_CODES_INTERNAL_SERVER_ERRORS
)
class ResponsesInfo:
def __init__(self, stats):
self._stats_analyzer = StatsAnalyzer(stats=stats)
self.count = self._stats_analyzer.search(DOWNLOADER_RESPONSE_COUNT + "$").get(
DOWNLOADER_RESPONSE_COUNT, 0
)
# all status codes
self.all = DictPercentCounter(total=self.count)
self._add_status_codes(pattern=None, target=self.all)
# 1xx. informational
self.informational = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_INFORMATIONAL, target=self.informational
)
# 2xx. successful
self.successful = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_SUCCESSFUL, target=self.successful
)
# 3xx. redirections
self.redirections = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_REDIRECTIONS, target=self.redirections
)
# 4xx. bad requests
self.bad_requests = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_BAD_REQUESTS, target=self.bad_requests
)
# 5xx. internal server errors
self.internal_server_errors = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_INTERNAL_SERVER_ERRORS,
target=self.internal_server_errors,
)
# >= 6xx. others
self.others = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_OTHERS, target=self.others
)
# errors (4xx + 5xx)
self.errors = DictPercentCounter(total=self.count)
self._add_status_codes(
pattern=DOWNLOADER_STATUS_CODES_ERRORS, target=self.errors
)
def _add_status_codes(self, pattern, target):
for code, counter in self._get_response_codes(pattern).items():
target.add_value(code, counter.count)
def _get_response_codes(self, codes=None):
codes = codes or ["[^/]+"]
return_codes = {}
for code in codes:
return_codes.update(self._get_response_code(code))
return return_codes
def _get_response_code(self, code):
return {
code: PercentCounter(count, self.count)
for count, code in self._stats_analyzer.search(
pattern=DOWNLOADER_RESPONSE_STATUS + ("(%s)$" % code),
include_matches=True,
).values()
}
class SpiderMonitorMixin(StatsMonitorMixin, JobMonitorMixin):
@property
def crawler(self):
if not self.data.crawler:
raise NotConfigured("Crawler not available!")
return self.data.crawler
@property
def spider(self):
if not self.data.spider:
raise NotConfigured("Spider not available!")
return self.data.spider
@property
def responses(self):
if not hasattr(self, "_responses"):
self._responses = ResponsesInfo(self.stats)
return self._responses | en | 0.526799 | # all status codes # 1xx. informational # 2xx. successful # 3xx. redirections # 4xx. bad requests # 5xx. internal server errors # >= 6xx. others # errors (4xx + 5xx) | 2.384419 | 2 |
examples/machining/turning/turning_advanced.py | shanep805/p3l-library | 0 | 6631742 | # A - Geometric Definitions
# A.1 - Set units
units_in()
# A.2 - Set our geometric interrogation.
# Read more here: https://help.paperlessparts.com/article/42-p3l-language-features
lathe = analyze_lathe()
# B - Part Variables
# B.1 - Set Turning Radius, Turning Length, Radial Buffer, Length Buffer, Part Volume, and Stock Volume
turning_radius = var('Turning Radius', 0, 'Outermost radius of part in inches', number, frozen=False)
turning_length = var('Turning Length', 0, 'Length of part along turning axis in inches', number, frozen=False)
radial_buffer = var('Radial Stock Buffer, in', 0.0625, 'Buffer applied to the outer radius of part in inches', number)
length_buffer = var('Length Stock Buffer, in', 0.125, 'Buffer to the length of the part along the turning axis in inches', number)
stock_volume = var('Stock Volume (in^3)', 0, '', number, frozen = False)
part_volume = var('Part Volume', 0, '', number, frozen = False)
volume_removed = var('Volume Removed (in^3)', 0, '', number, frozen = False)
percent_volume_removed = var('Percent Volume Removed (%)', 0, '', number, frozen = False)
# B.2 - Update variables based on interrogated values
turning_radius.update(lathe.stock_radius)
turning_radius.freeze()
turning_length.update(lathe.stock_length)
turning_length.freeze()
stock_radius = turning_radius + radial_buffer
stock_length = turning_length + length_buffer
part_volume.update(round(part.volume, 3))
part_volume.freeze()
stock_volume.update(round(stock_radius**2 * 3.1415926535 * turning_length, 3))
stock_volume.freeze()
# C - Project Variables
# C.1 Define Features
labor_rate = var('Labor Rate ($)', 0, '', currency)
features = var('Feature Count', 0, '', number, frozen=False)
setup_time_per_setup = var('Setup Time Per Setup', 0.5, 'Setup time per setup in hours', number)
setup_count = var('Setup Count', 0, '', number, frozen = False)
setup_time = var('setup_time', 0, 'Setup time, specified in hours', number, frozen = False)
setup_count.update(lathe.setup_count)
setup_count.freeze()
setup_time.update(setup_count * setup_time_per_setup)
setup_time.freeze()
# C.2 - Gather Features and Feedback Count
feature_count = len(get_features(lathe))
feedback_count = len(get_feedback(lathe))
features.update(feature_count + feedback_count)
features.freeze()
# C.3 - Define number of tools required based on Feature and Feedback count. The default number of features per tool is 4
num_tools = var('Number of Tools', 0, '', number, frozen=False)
num_tools.update(ceil(features / 4))
num_tools.freeze()
# D - Runtime Estimates
# D.1 - Set Removal Rate, Volume Cut Rate, Volume Removed, Runtime, Runtime per Setup and Runtime Multiplier variables
removal_rate = var('Material Removal Rate', 1, 'Material removal rate in cu.in./min', number, frozen = False)
vol_cut_rate = var('Volume Cut Rate (in^3 / hr)', 0, '', number, frozen = False)
runtime = var('runtime', 0, 'Runtime, specified in hours', number, frozen=False)
runtime_per_setup = var('Runtime per Setup (min)', 2, '', number)
runtime_mult = var('Runtime Multiplier', 1, '', number, frozen = False)
# D.2 - Update our Runtime Multiplier depending on which material family we use.
if part.material_family == 'Aluminum':
runtime_mult.update(1)
elif 'Steel' in part.material_family:
runtime_mult.update(1.5)
elif part.material_family == 'Titanium':
runtime_mult.update(2)
else:
runtime_mult.update(1)
runtime_mult.freeze()
# D.3 - Update Volume Removed based on interrogated values
volume_removed.update(round(stock_volume - part.volume, 3))
volume_removed.freeze()
percent_volume_removed.update(round((volume_removed / stock_volume) * 100, 2))
percent_volume_removed.freeze()
# D.4 - Update Removal Rate based on total volume removed
if volume_removed <= 1:
removal_rate.update(0.50 / runtime_mult)
elif 1 < volume_removed <= 9:
removal_rate.update(1 / runtime_mult)
elif 9 < volume_removed <= 16:
removal_rate.update(1.5 / runtime_mult)
elif 16 < volume_removed:
removal_rate.update(2 / runtime_mult)
removal_rate.freeze()
# D.5 - Update Volume Cut Rate based on removal rate - defined in cubic inches per hour
vol_cut_rate.update(70 * removal_rate)
vol_cut_rate.freeze()
# D.6 Set the 'Runtime Per Tool' - how long does it take for the typical tool change to occur?
tool_rate = var('Runtime Per Tool (seconds)', 15, '', number)
tool_runtime = var('Tool Runtime (minutes)', 0, '', number, frozen=False)
tool_runtime.update((tool_rate * num_tools) / 60)
tool_runtime.freeze()
# D.7 - Calculate estimated runtime based on defined variables. To set a minimum runtime, use (max(1/6, calculations...).
runtime.update((stock_volume / vol_cut_rate) # A - Calculate our runtime estimate by dividing the total volume removed by our defined removal rate.
+ (get_workpiece_value('Tool Runtime', 0) / 60) # B - Add Tool Runtime, calculated in our Critical Information operation.
+ ((runtime_per_setup / 60) * setup_count)) # C - Add Setup Runtime.
runtime.freeze()
# E - Compile Calculations
# E.1 - Compile Cycle and Setup Costs
total_cycle_cost = part.qty * runtime * labor_rate
setup_cost = setup_time * labor_rate
# E.2 - Compile our total costs
PRICE = setup_cost + total_cycle_cost
# E.3 - Define how many days this operation will contribute to the project lead time.
DAYS = 0
# F - Workpiece Values
# F.1 - Set workpiece values to be used in subsequent operations.
set_workpiece_value('total_setup_time', get_workpiece_value('total_setup_time', 0) + setup_time) # A - Cumulative project setup time
set_workpiece_value('total_runtime', get_workpiece_value('total_runtime', 0) + runtime) # B - Cumulative project runtime
| # A - Geometric Definitions
# A.1 - Set units
units_in()
# A.2 - Set our geometric interrogation.
# Read more here: https://help.paperlessparts.com/article/42-p3l-language-features
lathe = analyze_lathe()
# B - Part Variables
# B.1 - Set Turning Radius, Turning Length, Radial Buffer, Length Buffer, Part Volume, and Stock Volume
turning_radius = var('Turning Radius', 0, 'Outermost radius of part in inches', number, frozen=False)
turning_length = var('Turning Length', 0, 'Length of part along turning axis in inches', number, frozen=False)
radial_buffer = var('Radial Stock Buffer, in', 0.0625, 'Buffer applied to the outer radius of part in inches', number)
length_buffer = var('Length Stock Buffer, in', 0.125, 'Buffer to the length of the part along the turning axis in inches', number)
stock_volume = var('Stock Volume (in^3)', 0, '', number, frozen = False)
part_volume = var('Part Volume', 0, '', number, frozen = False)
volume_removed = var('Volume Removed (in^3)', 0, '', number, frozen = False)
percent_volume_removed = var('Percent Volume Removed (%)', 0, '', number, frozen = False)
# B.2 - Update variables based on interrogated values
turning_radius.update(lathe.stock_radius)
turning_radius.freeze()
turning_length.update(lathe.stock_length)
turning_length.freeze()
stock_radius = turning_radius + radial_buffer
stock_length = turning_length + length_buffer
part_volume.update(round(part.volume, 3))
part_volume.freeze()
stock_volume.update(round(stock_radius**2 * 3.1415926535 * turning_length, 3))
stock_volume.freeze()
# C - Project Variables
# C.1 Define Features
labor_rate = var('Labor Rate ($)', 0, '', currency)
features = var('Feature Count', 0, '', number, frozen=False)
setup_time_per_setup = var('Setup Time Per Setup', 0.5, 'Setup time per setup in hours', number)
setup_count = var('Setup Count', 0, '', number, frozen = False)
setup_time = var('setup_time', 0, 'Setup time, specified in hours', number, frozen = False)
setup_count.update(lathe.setup_count)
setup_count.freeze()
setup_time.update(setup_count * setup_time_per_setup)
setup_time.freeze()
# C.2 - Gather Features and Feedback Count
feature_count = len(get_features(lathe))
feedback_count = len(get_feedback(lathe))
features.update(feature_count + feedback_count)
features.freeze()
# C.3 - Define number of tools required based on Feature and Feedback count. The default number of features per tool is 4
num_tools = var('Number of Tools', 0, '', number, frozen=False)
num_tools.update(ceil(features / 4))
num_tools.freeze()
# D - Runtime Estimates
# D.1 - Set Removal Rate, Volume Cut Rate, Volume Removed, Runtime, Runtime per Setup and Runtime Multiplier variables
removal_rate = var('Material Removal Rate', 1, 'Material removal rate in cu.in./min', number, frozen = False)
vol_cut_rate = var('Volume Cut Rate (in^3 / hr)', 0, '', number, frozen = False)
runtime = var('runtime', 0, 'Runtime, specified in hours', number, frozen=False)
runtime_per_setup = var('Runtime per Setup (min)', 2, '', number)
runtime_mult = var('Runtime Multiplier', 1, '', number, frozen = False)
# D.2 - Update our Runtime Multiplier depending on which material family we use.
if part.material_family == 'Aluminum':
runtime_mult.update(1)
elif 'Steel' in part.material_family:
runtime_mult.update(1.5)
elif part.material_family == 'Titanium':
runtime_mult.update(2)
else:
runtime_mult.update(1)
runtime_mult.freeze()
# D.3 - Update Volume Removed based on interrogated values
volume_removed.update(round(stock_volume - part.volume, 3))
volume_removed.freeze()
percent_volume_removed.update(round((volume_removed / stock_volume) * 100, 2))
percent_volume_removed.freeze()
# D.4 - Update Removal Rate based on total volume removed
if volume_removed <= 1:
removal_rate.update(0.50 / runtime_mult)
elif 1 < volume_removed <= 9:
removal_rate.update(1 / runtime_mult)
elif 9 < volume_removed <= 16:
removal_rate.update(1.5 / runtime_mult)
elif 16 < volume_removed:
removal_rate.update(2 / runtime_mult)
removal_rate.freeze()
# D.5 - Update Volume Cut Rate based on removal rate - defined in cubic inches per hour
vol_cut_rate.update(70 * removal_rate)
vol_cut_rate.freeze()
# D.6 Set the 'Runtime Per Tool' - how long does it take for the typical tool change to occur?
tool_rate = var('Runtime Per Tool (seconds)', 15, '', number)
tool_runtime = var('Tool Runtime (minutes)', 0, '', number, frozen=False)
tool_runtime.update((tool_rate * num_tools) / 60)
tool_runtime.freeze()
# D.7 - Calculate estimated runtime based on defined variables. To set a minimum runtime, use (max(1/6, calculations...).
runtime.update((stock_volume / vol_cut_rate) # A - Calculate our runtime estimate by dividing the total volume removed by our defined removal rate.
+ (get_workpiece_value('Tool Runtime', 0) / 60) # B - Add Tool Runtime, calculated in our Critical Information operation.
+ ((runtime_per_setup / 60) * setup_count)) # C - Add Setup Runtime.
runtime.freeze()
# E - Compile Calculations
# E.1 - Compile Cycle and Setup Costs
total_cycle_cost = part.qty * runtime * labor_rate
setup_cost = setup_time * labor_rate
# E.2 - Compile our total costs
PRICE = setup_cost + total_cycle_cost
# E.3 - Define how many days this operation will contribute to the project lead time.
DAYS = 0
# F - Workpiece Values
# F.1 - Set workpiece values to be used in subsequent operations.
set_workpiece_value('total_setup_time', get_workpiece_value('total_setup_time', 0) + setup_time) # A - Cumulative project setup time
set_workpiece_value('total_runtime', get_workpiece_value('total_runtime', 0) + runtime) # B - Cumulative project runtime
| en | 0.790526 | # A - Geometric Definitions # A.1 - Set units # A.2 - Set our geometric interrogation. # Read more here: https://help.paperlessparts.com/article/42-p3l-language-features # B - Part Variables # B.1 - Set Turning Radius, Turning Length, Radial Buffer, Length Buffer, Part Volume, and Stock Volume # B.2 - Update variables based on interrogated values # C - Project Variables # C.1 Define Features # C.2 - Gather Features and Feedback Count # C.3 - Define number of tools required based on Feature and Feedback count. The default number of features per tool is 4 # D - Runtime Estimates # D.1 - Set Removal Rate, Volume Cut Rate, Volume Removed, Runtime, Runtime per Setup and Runtime Multiplier variables # D.2 - Update our Runtime Multiplier depending on which material family we use. # D.3 - Update Volume Removed based on interrogated values # D.4 - Update Removal Rate based on total volume removed # D.5 - Update Volume Cut Rate based on removal rate - defined in cubic inches per hour # D.6 Set the 'Runtime Per Tool' - how long does it take for the typical tool change to occur? # D.7 - Calculate estimated runtime based on defined variables. To set a minimum runtime, use (max(1/6, calculations...). # A - Calculate our runtime estimate by dividing the total volume removed by our defined removal rate. # B - Add Tool Runtime, calculated in our Critical Information operation. # C - Add Setup Runtime. # E - Compile Calculations # E.1 - Compile Cycle and Setup Costs # E.2 - Compile our total costs # E.3 - Define how many days this operation will contribute to the project lead time. # F - Workpiece Values # F.1 - Set workpiece values to be used in subsequent operations. # A - Cumulative project setup time # B - Cumulative project runtime | 3.118858 | 3 |
captain_comeback/test/queue_assertion_helper.py | almathew/captain-comeback | 0 | 6631743 | <gh_stars>0
# coding:utf-8
from six.moves import queue
class QueueAssertionHelper(object):
ANY_CG = object()
def assertHasMessageForCg(self, q, message_class, cg_path, **attrs):
msg = q.get_nowait()
self.assertIsInstance(msg, message_class)
if cg_path is self.ANY_CG:
return
self.assertEqual(cg_path, msg.cg.path)
for k, v in attrs.items():
self.assertEqual(v, getattr(msg, k))
def assertEvnetuallyHasMessageForCg(self, *args, **kwargs):
for i in range(100):
yield
try:
self.assertHasMessageForCg(*args, **kwargs)
except AssertionError:
pass
else:
break
else:
self.assertHasMessageForCg(*args, **kwargs)
def assertHasNoMessages(self, q):
self.assertRaises(queue.Empty, q.get_nowait)
| # coding:utf-8
from six.moves import queue
class QueueAssertionHelper(object):
ANY_CG = object()
def assertHasMessageForCg(self, q, message_class, cg_path, **attrs):
msg = q.get_nowait()
self.assertIsInstance(msg, message_class)
if cg_path is self.ANY_CG:
return
self.assertEqual(cg_path, msg.cg.path)
for k, v in attrs.items():
self.assertEqual(v, getattr(msg, k))
def assertEvnetuallyHasMessageForCg(self, *args, **kwargs):
for i in range(100):
yield
try:
self.assertHasMessageForCg(*args, **kwargs)
except AssertionError:
pass
else:
break
else:
self.assertHasMessageForCg(*args, **kwargs)
def assertHasNoMessages(self, q):
self.assertRaises(queue.Empty, q.get_nowait) | en | 0.786515 | # coding:utf-8 | 2.48229 | 2 |
metaci/testresults/tests/test_robot_importer.py | giveclarity/MetaCI | 0 | 6631744 | import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
from pathlib import Path, PurePath
from unittest import mock
from shutil import copyfile
import pytest
from cumulusci.utils import elementtree_parse_file, temporary_dir
from django.utils import timezone
from metaci.build.exceptions import BuildError
from metaci.build.models import BuildFlowAsset
from metaci.conftest import FlowTaskFactory
from metaci.testresults import models, robot_importer
from metaci.build.tests.test_flows import TEST_ROBOT_OUTPUT_FILES
@pytest.mark.django_db
def test_invalid_test_result_filepath():
with pytest.raises(BuildError):
robot_importer.import_robot_test_results(mock.Mock, "invalid/file/path")
@pytest.mark.django_db
def test_nested_suites():
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_nested_suites.xml",
Path(output_dir) / "output.xml",
)
flowtask = FlowTaskFactory()
robot_importer.import_robot_test_results(flowtask, output_dir)
assert models.TestResult.objects.all(), "Test results should have been created"
test_result = models.TestResult.objects.get(method__name__contains="AAAAA")
assert test_result.duration == 0.25
assert test_result.method.name == "AAAAA Test Set Login Url"
assert test_result.method.testclass.name == "Nested/Cumulusci/Base"
@pytest.mark.django_db
def test_basic_parsing():
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_1.xml", Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_results = models.TestResult.objects.filter(method__name="FakeTestResult")
assert test_results
@pytest.mark.django_db
def test_duration_calculations():
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_setup_teardown.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
correct = 14.002
duration = models.TestResult.objects.get(method__name="FakeTestResult2").duration
assert duration == correct
correct = 15.001
duration = models.TestResult.objects.get(
method__name="FakeTestResult_setup_no_teardown"
).duration
assert duration == correct
correct = 20.002
duration = models.TestResult.objects.get(
method__name="FakeTestResult_teardown_no_setup"
).duration
assert duration == correct
@pytest.mark.django_db
def test_field_robot_task():
"""verify that the task field of a TestResult has been
set for a robot test result
The importer uses the timestamp of the output file to figure
out which task generated the file. It then uses the options
of this task to generate the log files.
This test creates a few FlowTask objects where one has a start
time and end time that encompasses the mtime of the output
file. That task should get saved with the test result.
"""
with temporary_dir() as output_dir:
output_dir = Path(output_dir)
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_setup_teardown.xml",
output_dir / "output.xml",
)
output_xml_mtime = timezone.make_aware(
datetime.fromtimestamp(output_dir.stat().st_mtime)
)
flowtask = FlowTaskFactory(stepnum=2)
time_offsets = ((-60, -30), (-29, +1), (+2, +10))
FlowTaskFactory.reset_sequence(value=1)
for (start_offset, end_offset) in time_offsets:
time_start = output_xml_mtime + timedelta(seconds=start_offset)
time_end = output_xml_mtime + timedelta(seconds=end_offset)
task = FlowTaskFactory(
build_flow=flowtask.build_flow,
time_start=time_start,
time_end=time_end,
)
task.save()
robot_importer.import_robot_test_results(flowtask, output_dir)
for result in models.TestResult.objects.all():
assert result.task is not None
assert result.task.stepnum == "2"
@pytest.mark.django_db
def test_import_all_tests():
"""Verifies that we import all tests in a suite"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_failures.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
failing_test_results = models.TestResult.objects.filter(outcome="Fail")
passing_test_results = models.TestResult.objects.filter(outcome="Pass")
assert len(failing_test_results) == 3
assert len(passing_test_results) == 1
@pytest.mark.django_db
def test_field_keyword_and_message():
"""Verify that the keyword and message fields are populated"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_failures.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_result = models.TestResult.objects.get(method__name="Failing test 1")
assert test_result.message == "Danger, <NAME>!"
assert test_result.robot_keyword == "Keyword with failure"
@pytest.mark.django_db
def test_field_keyword_and_message_nested_keywords():
"""Verify that the keyword and message fields are set when failure is in a nested keyword"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_failures.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_result = models.TestResult.objects.get(method__name="Failing test 2")
assert test_result.message == "I'm sorry, Dave. I'm afraid I can't do that."
assert test_result.robot_keyword == "Keyword which calls a failing keyword"
@pytest.mark.django_db
def test_field_keyword_and_message_passing_test():
"""Verify that the failing_keyword field is set correctly for passing tests"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_failures.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_result = models.TestResult.objects.get(method__name="Passing test")
assert test_result.message == "Life is good, yo."
assert test_result.robot_keyword is None
@pytest.mark.django_db
def test_import_robot_tags():
"""Verify that robot tags are added to the database"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_1.xml", Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_results = models.TestResult.objects.filter(method__name="FakeTestResult")
assert test_results[0].robot_tags == "tag with spaces,w-123456"
@pytest.mark.django_db
def test_execution_errors():
"""Verify pre-test execution errors are imported
If robot has errors before the first test runs (eg: import
errors) these errors were being thrown away. This test verifies
that execution errors appear in imported test results.
"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_import_errors.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_result = models.TestResult.objects.last()
root = ET.fromstring(test_result.robot_xml)
msg_elements = root.findall("./errors/msg")
error_messages = [element.text for element in msg_elements]
expected_error_messages = [
"Error in file 'example.robot' on line 2: Library setting requires value.",
"Error in file 'example.robot' on line 3: Resource setting requires value.",
]
assert len(error_messages) == len(expected_error_messages)
@pytest.mark.django_db
def test_screenshots_generated():
"""Verify that screenshots were created properly.
For the robot_screenshot.xml output file, there should be:
* A BuildFlowAsset created for the output.xml file
* A BuildFlowAsset created for the screenshot taken during suite setup
* A TestResultAsset created for the 'Via UI' robot test
"""
with temporary_dir() as output_dir:
output_dir = Path(output_dir)
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_screenshots.xml",
output_dir / "output.xml",
)
open(output_dir / "selenium-screenshot-1.png", mode="w+")
open(output_dir / "selenium-screenshot-2.png", mode="w+")
flowtask = FlowTaskFactory()
robot_importer.import_robot_test_results(flowtask, output_dir)
# output.xml asset created
assert 1 == BuildFlowAsset.objects.filter(category="robot-output").count()
# suite setup screenshot assets created
assert 1 == BuildFlowAsset.objects.filter(category="robot-screenshot").count()
# No screenshots created for 'Via API' test
tr_method = models.TestMethod.objects.get(name="Via API")
test_api = models.TestResult.objects.get(method=tr_method, task=flowtask)
assert 0 == test_api.assets.count()
# One screenshot created for 'Via UI' test
tr_method = models.TestMethod.objects.get(name="Via UI")
test_ui = models.TestResult.objects.get(method=tr_method, task=flowtask)
assert 1 == test_ui.assets.count()
@pytest.mark.django_db
def test_find_screenshots():
path = PurePath(__file__).parent / "robot_screenshots.xml"
tree = elementtree_parse_file(path)
screenshots = robot_importer.find_screenshots(tree.getroot())
assert len(screenshots) == 2
| import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
from pathlib import Path, PurePath
from unittest import mock
from shutil import copyfile
import pytest
from cumulusci.utils import elementtree_parse_file, temporary_dir
from django.utils import timezone
from metaci.build.exceptions import BuildError
from metaci.build.models import BuildFlowAsset
from metaci.conftest import FlowTaskFactory
from metaci.testresults import models, robot_importer
from metaci.build.tests.test_flows import TEST_ROBOT_OUTPUT_FILES
@pytest.mark.django_db
def test_invalid_test_result_filepath():
with pytest.raises(BuildError):
robot_importer.import_robot_test_results(mock.Mock, "invalid/file/path")
@pytest.mark.django_db
def test_nested_suites():
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_nested_suites.xml",
Path(output_dir) / "output.xml",
)
flowtask = FlowTaskFactory()
robot_importer.import_robot_test_results(flowtask, output_dir)
assert models.TestResult.objects.all(), "Test results should have been created"
test_result = models.TestResult.objects.get(method__name__contains="AAAAA")
assert test_result.duration == 0.25
assert test_result.method.name == "AAAAA Test Set Login Url"
assert test_result.method.testclass.name == "Nested/Cumulusci/Base"
@pytest.mark.django_db
def test_basic_parsing():
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_1.xml", Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_results = models.TestResult.objects.filter(method__name="FakeTestResult")
assert test_results
@pytest.mark.django_db
def test_duration_calculations():
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_setup_teardown.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
correct = 14.002
duration = models.TestResult.objects.get(method__name="FakeTestResult2").duration
assert duration == correct
correct = 15.001
duration = models.TestResult.objects.get(
method__name="FakeTestResult_setup_no_teardown"
).duration
assert duration == correct
correct = 20.002
duration = models.TestResult.objects.get(
method__name="FakeTestResult_teardown_no_setup"
).duration
assert duration == correct
@pytest.mark.django_db
def test_field_robot_task():
"""verify that the task field of a TestResult has been
set for a robot test result
The importer uses the timestamp of the output file to figure
out which task generated the file. It then uses the options
of this task to generate the log files.
This test creates a few FlowTask objects where one has a start
time and end time that encompasses the mtime of the output
file. That task should get saved with the test result.
"""
with temporary_dir() as output_dir:
output_dir = Path(output_dir)
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_setup_teardown.xml",
output_dir / "output.xml",
)
output_xml_mtime = timezone.make_aware(
datetime.fromtimestamp(output_dir.stat().st_mtime)
)
flowtask = FlowTaskFactory(stepnum=2)
time_offsets = ((-60, -30), (-29, +1), (+2, +10))
FlowTaskFactory.reset_sequence(value=1)
for (start_offset, end_offset) in time_offsets:
time_start = output_xml_mtime + timedelta(seconds=start_offset)
time_end = output_xml_mtime + timedelta(seconds=end_offset)
task = FlowTaskFactory(
build_flow=flowtask.build_flow,
time_start=time_start,
time_end=time_end,
)
task.save()
robot_importer.import_robot_test_results(flowtask, output_dir)
for result in models.TestResult.objects.all():
assert result.task is not None
assert result.task.stepnum == "2"
@pytest.mark.django_db
def test_import_all_tests():
"""Verifies that we import all tests in a suite"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_failures.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
failing_test_results = models.TestResult.objects.filter(outcome="Fail")
passing_test_results = models.TestResult.objects.filter(outcome="Pass")
assert len(failing_test_results) == 3
assert len(passing_test_results) == 1
@pytest.mark.django_db
def test_field_keyword_and_message():
"""Verify that the keyword and message fields are populated"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_failures.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_result = models.TestResult.objects.get(method__name="Failing test 1")
assert test_result.message == "Danger, <NAME>!"
assert test_result.robot_keyword == "Keyword with failure"
@pytest.mark.django_db
def test_field_keyword_and_message_nested_keywords():
"""Verify that the keyword and message fields are set when failure is in a nested keyword"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_failures.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_result = models.TestResult.objects.get(method__name="Failing test 2")
assert test_result.message == "I'm sorry, Dave. I'm afraid I can't do that."
assert test_result.robot_keyword == "Keyword which calls a failing keyword"
@pytest.mark.django_db
def test_field_keyword_and_message_passing_test():
"""Verify that the failing_keyword field is set correctly for passing tests"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_failures.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_result = models.TestResult.objects.get(method__name="Passing test")
assert test_result.message == "Life is good, yo."
assert test_result.robot_keyword is None
@pytest.mark.django_db
def test_import_robot_tags():
"""Verify that robot tags are added to the database"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_1.xml", Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_results = models.TestResult.objects.filter(method__name="FakeTestResult")
assert test_results[0].robot_tags == "tag with spaces,w-123456"
@pytest.mark.django_db
def test_execution_errors():
"""Verify pre-test execution errors are imported
If robot has errors before the first test runs (eg: import
errors) these errors were being thrown away. This test verifies
that execution errors appear in imported test results.
"""
with temporary_dir() as output_dir:
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_with_import_errors.xml",
Path(output_dir) / "output.xml",
)
robot_importer.import_robot_test_results(FlowTaskFactory(), output_dir)
test_result = models.TestResult.objects.last()
root = ET.fromstring(test_result.robot_xml)
msg_elements = root.findall("./errors/msg")
error_messages = [element.text for element in msg_elements]
expected_error_messages = [
"Error in file 'example.robot' on line 2: Library setting requires value.",
"Error in file 'example.robot' on line 3: Resource setting requires value.",
]
assert len(error_messages) == len(expected_error_messages)
@pytest.mark.django_db
def test_screenshots_generated():
"""Verify that screenshots were created properly.
For the robot_screenshot.xml output file, there should be:
* A BuildFlowAsset created for the output.xml file
* A BuildFlowAsset created for the screenshot taken during suite setup
* A TestResultAsset created for the 'Via UI' robot test
"""
with temporary_dir() as output_dir:
output_dir = Path(output_dir)
copyfile(
TEST_ROBOT_OUTPUT_FILES / "robot_screenshots.xml",
output_dir / "output.xml",
)
open(output_dir / "selenium-screenshot-1.png", mode="w+")
open(output_dir / "selenium-screenshot-2.png", mode="w+")
flowtask = FlowTaskFactory()
robot_importer.import_robot_test_results(flowtask, output_dir)
# output.xml asset created
assert 1 == BuildFlowAsset.objects.filter(category="robot-output").count()
# suite setup screenshot assets created
assert 1 == BuildFlowAsset.objects.filter(category="robot-screenshot").count()
# No screenshots created for 'Via API' test
tr_method = models.TestMethod.objects.get(name="Via API")
test_api = models.TestResult.objects.get(method=tr_method, task=flowtask)
assert 0 == test_api.assets.count()
# One screenshot created for 'Via UI' test
tr_method = models.TestMethod.objects.get(name="Via UI")
test_ui = models.TestResult.objects.get(method=tr_method, task=flowtask)
assert 1 == test_ui.assets.count()
@pytest.mark.django_db
def test_find_screenshots():
path = PurePath(__file__).parent / "robot_screenshots.xml"
tree = elementtree_parse_file(path)
screenshots = robot_importer.find_screenshots(tree.getroot())
assert len(screenshots) == 2
| en | 0.889471 | verify that the task field of a TestResult has been set for a robot test result The importer uses the timestamp of the output file to figure out which task generated the file. It then uses the options of this task to generate the log files. This test creates a few FlowTask objects where one has a start time and end time that encompasses the mtime of the output file. That task should get saved with the test result. Verifies that we import all tests in a suite Verify that the keyword and message fields are populated Verify that the keyword and message fields are set when failure is in a nested keyword Verify that the failing_keyword field is set correctly for passing tests Verify that robot tags are added to the database Verify pre-test execution errors are imported If robot has errors before the first test runs (eg: import errors) these errors were being thrown away. This test verifies that execution errors appear in imported test results. Verify that screenshots were created properly. For the robot_screenshot.xml output file, there should be: * A BuildFlowAsset created for the output.xml file * A BuildFlowAsset created for the screenshot taken during suite setup * A TestResultAsset created for the 'Via UI' robot test # output.xml asset created # suite setup screenshot assets created # No screenshots created for 'Via API' test # One screenshot created for 'Via UI' test | 2.084718 | 2 |
api/commodities/serializers.py | uktrade/market-access-api | 0 | 6631745 | from rest_framework import serializers
from api.barriers.models import BarrierCommodity
from api.metadata.fields import CountryField, TradingBlocField
from .models import Commodity
class CommoditySerializer(serializers.ModelSerializer):
class Meta:
model = Commodity
fields = (
"id",
"code",
"suffix",
"level",
"description",
"full_description",
)
class BarrierCommoditySerializer(serializers.ModelSerializer):
country = CountryField(allow_null=True)
trading_bloc = TradingBlocField(allow_blank=True)
commodity = CommoditySerializer()
class Meta:
model = BarrierCommodity
fields = (
"commodity",
"code",
"country",
"trading_bloc",
)
| from rest_framework import serializers
from api.barriers.models import BarrierCommodity
from api.metadata.fields import CountryField, TradingBlocField
from .models import Commodity
class CommoditySerializer(serializers.ModelSerializer):
class Meta:
model = Commodity
fields = (
"id",
"code",
"suffix",
"level",
"description",
"full_description",
)
class BarrierCommoditySerializer(serializers.ModelSerializer):
country = CountryField(allow_null=True)
trading_bloc = TradingBlocField(allow_blank=True)
commodity = CommoditySerializer()
class Meta:
model = BarrierCommodity
fields = (
"commodity",
"code",
"country",
"trading_bloc",
)
| none | 1 | 2.33171 | 2 |
|
src/ralph/cmdb/models_audits.py | quamilek/ralph | 0 | 6631746 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import unicodedata
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from lck.django.choices import Choices
from lck.django.common.models import TimeTrackable
from ralph.cmdb.integration.issuetracker import IssueTracker
from ralph.cmdb.models import CI
def _normalize_name(name):
# Polish Ł is not handled properly
ret = name.lower().replace(' ', '.').replace(
'Ł', 'L').replace('ł', 'l')
return unicodedata.normalize('NFD', ret).encode('ascii', 'ignore')
def get_login_from_owner_name(owner):
return '.'.join(_normalize_name(n) for n in (
owner.first_name, owner.last_name))
def get_technical_owner(device):
if not device.venture:
return ''
owners = device.venture.technical_owners()
return get_login_from_owner_name(owners[0]) if owners else ''
def get_business_owner(device):
if not device.venture:
return ''
owners = device.venture.business_owners()
return get_login_from_owner_name(owners[0]) if owners else ''
class AuditStatus(Choices):
_ = Choices.Choice
created = _('created')
accepted = _('accepted')
rejected = _('rejected')
closed = _('closed')
class Auditable(TimeTrackable):
""" Base abstract class for keeping track of acceptation of change.
May be attribute change, or some custom workflow change.
Object, old value and new value is not stored here, giving ability to set it
according to custom neeeds.
You must implement in subclass:
- status field
- synchronize_status(new_status) method
If implementing attribute change, please do something like this:
class AttributeChange(Auditable):
object = ...
new_attribute = ..
old_attribute = ...
"""
user = models.ForeignKey('auth.User', verbose_name=_("user"), null=True,
blank=True, default=None, on_delete=models.SET_NULL)
status_lastchanged = models.DateTimeField(default=datetime.now,
verbose_name=_("date"))
issue_key = models.CharField(verbose_name=_("external ticket key number"),
max_length=30, blank=True, null=True, default=None)
class Meta:
abstract = True
def status_changed(self):
# newly created
if not self.id:
return True
# didnt change status
if 'status' not in self.dirty_fields:
return False
# changed status
dirty_statusid = self.dirty_fields['status']
if not dirty_statusid or dirty_statusid == self.status:
return False
else:
return True
def synchronize_status(self, new_status):
pass
def set_status_and_sync(self, new_status):
self.status = new_status
self.status_lastchanged = datetime.now()
self.synchronize_status(new_status)
self.save()
def transition_issue(self, transition_id, retry_count=1):
tracker = IssueTracker()
tracker.transition_issue(
issue_key=self.issue_key,
transition_id=transition_id,
)
def create_issue(self, params, default_assignee, retry_count=1):
"""
We create 2 IssueTracker requests for IssueTracker here.
1) Check if assignee exists in IssueTracker
2) Create issue with back-link for acceptance
"""
s = settings.ISSUETRACKERS['default']['OPA']
template=s['TEMPLATE']
issue_type=s['ISSUETYPE']
tracker = IssueTracker()
ci = None
try:
if params.get('ci_uid'):
ci = CI.objects.get(uid=params.get('ci_uid'))
except CI.DoesNotExist:
pass
if not tracker.user_exists(params.get('technical_assignee')):
tuser = default_assignee
else:
tuser = params.get('technical_assignee')
if not tracker.user_exists(params.get('business_assignee')):
buser = default_assignee
else:
buser = params.get('business_assignee')
issue = tracker.create_issue(
issue_type=issue_type,
description=params.get('description'),
summary=params.get('summary'),
ci=ci,
assignee=default_assignee,
technical_assignee=tuser,
business_assignee=buser,
start=self.created.isoformat(),
end='',
template=template,
)
self.issue_key = issue.get('key')
self.save()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import unicodedata
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from lck.django.choices import Choices
from lck.django.common.models import TimeTrackable
from ralph.cmdb.integration.issuetracker import IssueTracker
from ralph.cmdb.models import CI
def _normalize_name(name):
# Polish Ł is not handled properly
ret = name.lower().replace(' ', '.').replace(
'Ł', 'L').replace('ł', 'l')
return unicodedata.normalize('NFD', ret).encode('ascii', 'ignore')
def get_login_from_owner_name(owner):
return '.'.join(_normalize_name(n) for n in (
owner.first_name, owner.last_name))
def get_technical_owner(device):
if not device.venture:
return ''
owners = device.venture.technical_owners()
return get_login_from_owner_name(owners[0]) if owners else ''
def get_business_owner(device):
if not device.venture:
return ''
owners = device.venture.business_owners()
return get_login_from_owner_name(owners[0]) if owners else ''
class AuditStatus(Choices):
_ = Choices.Choice
created = _('created')
accepted = _('accepted')
rejected = _('rejected')
closed = _('closed')
class Auditable(TimeTrackable):
""" Base abstract class for keeping track of acceptation of change.
May be attribute change, or some custom workflow change.
Object, old value and new value is not stored here, giving ability to set it
according to custom neeeds.
You must implement in subclass:
- status field
- synchronize_status(new_status) method
If implementing attribute change, please do something like this:
class AttributeChange(Auditable):
object = ...
new_attribute = ..
old_attribute = ...
"""
user = models.ForeignKey('auth.User', verbose_name=_("user"), null=True,
blank=True, default=None, on_delete=models.SET_NULL)
status_lastchanged = models.DateTimeField(default=datetime.now,
verbose_name=_("date"))
issue_key = models.CharField(verbose_name=_("external ticket key number"),
max_length=30, blank=True, null=True, default=None)
class Meta:
abstract = True
def status_changed(self):
# newly created
if not self.id:
return True
# didnt change status
if 'status' not in self.dirty_fields:
return False
# changed status
dirty_statusid = self.dirty_fields['status']
if not dirty_statusid or dirty_statusid == self.status:
return False
else:
return True
def synchronize_status(self, new_status):
pass
def set_status_and_sync(self, new_status):
self.status = new_status
self.status_lastchanged = datetime.now()
self.synchronize_status(new_status)
self.save()
def transition_issue(self, transition_id, retry_count=1):
tracker = IssueTracker()
tracker.transition_issue(
issue_key=self.issue_key,
transition_id=transition_id,
)
def create_issue(self, params, default_assignee, retry_count=1):
"""
We create 2 IssueTracker requests for IssueTracker here.
1) Check if assignee exists in IssueTracker
2) Create issue with back-link for acceptance
"""
s = settings.ISSUETRACKERS['default']['OPA']
template=s['TEMPLATE']
issue_type=s['ISSUETYPE']
tracker = IssueTracker()
ci = None
try:
if params.get('ci_uid'):
ci = CI.objects.get(uid=params.get('ci_uid'))
except CI.DoesNotExist:
pass
if not tracker.user_exists(params.get('technical_assignee')):
tuser = default_assignee
else:
tuser = params.get('technical_assignee')
if not tracker.user_exists(params.get('business_assignee')):
buser = default_assignee
else:
buser = params.get('business_assignee')
issue = tracker.create_issue(
issue_type=issue_type,
description=params.get('description'),
summary=params.get('summary'),
ci=ci,
assignee=default_assignee,
technical_assignee=tuser,
business_assignee=buser,
start=self.created.isoformat(),
end='',
template=template,
)
self.issue_key = issue.get('key')
self.save()
| en | 0.796154 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Polish Ł is not handled properly Base abstract class for keeping track of acceptation of change. May be attribute change, or some custom workflow change. Object, old value and new value is not stored here, giving ability to set it according to custom neeeds. You must implement in subclass: - status field - synchronize_status(new_status) method If implementing attribute change, please do something like this: class AttributeChange(Auditable): object = ... new_attribute = .. old_attribute = ... # newly created # didnt change status # changed status We create 2 IssueTracker requests for IssueTracker here. 1) Check if assignee exists in IssueTracker 2) Create issue with back-link for acceptance | 1.853747 | 2 |
src/classes.py | UoA-eResearch/leapmotion_gestures | 9 | 6631747 | import tkinter as tk
import numpy as np
class GUI:
"""simple tkinter gui for gesture recognition"""
def __init__(self, master):
# root window of gui
self.master = master
master.title("GUI")
# text variable to record current gesture
self.gesture = tk.StringVar()
self.gesture.set('position hand')
# image to be displayed, representing gesture: begin with image of dead parrot
self.img = tk.PhotoImage(file='data/images/dead.png')
# image to display when input is bad
self.bad = tk.PhotoImage(file='data/images/dead.png')
self.label = tk.Label(master, image=self.bad)
self.label.pack()
# label for gesture text
self.label2 = tk.Label(master, font=("Helvetica", 36), textvariable=self.gesture)
self.label2.pack(side=tk.BOTTOM)
# label with text coloured according to level of fury
self.label_fury = tk.Label(master, foreground="#%02x%02x%02x" % (0,50,0,), font=("Helvetica", 30), text=' movement')
self.label_fury.pack(side=tk.LEFT)
# label with text coloured according to level of angularity
self.label_angularity = tk.Label(master, foreground="#%02x%02x%02x" % (0,50,0,), font=("Helvetica", 30), text='angularity ')
self.label_angularity.pack(side=tk.RIGHT)
class SettingsGUI:
"""tkinter gui for changing settings during application run"""
def __init__(self, master):
# dictionaries of current settings values
self.settings = {}
# dictionary of tk spinbox objects
self.sb_dict = {}
# dictionary of string variables, for setting default spinbox values
self.strvar_dict = {}
# dictionary of labels used to demarcate spinboxes
self.label_dict = {}
# set univeral text format for all spinboxes and labels
self.text_format = ("Helvetica", 16)
# row of next spinbox/label, += 1 every time a spinbox is created
self.next_row = 0
# root window of settings gui
self.master = master
master.title("settings")
self.change_colour = False
# generate spinboxes + labels for some different settings
self.create_setting('prediction interval', 1, 40, 1, 15)
self.create_setting('graph update interval', 1, 20, 1, 3)
self.create_setting('fury beta', 0.8, 1, 0.005, 0.9)
self.create_setting('angularity beta', 0.8, 1, 0.005, 0.975)
self.create_setting('confidence beta', 0.8, 1, 0.005, 0.98)
self.create_setting('x axis range', 1, 150, 1, 30)
self.create_setting('effective confidence zero', 0.05, 0.95, 0.05, 0.7)
self.create_setting('min conf. to change image', 0.0, 0.95, 0.05, 0.55)
self.create_setting('every nth frame to model', 1, 10, 1, 5)
self.create_setting('labels gather?', 0, 1, 1, 0)
self.update_button = tk.Button(self.master, text='Update Settings', command=self.update_settings, font=self.text_format)
self.update_button.grid()
self.update_button = tk.Button(self.master, text='Change Colours', command=self.change_colours, font=self.text_format)
self.update_button.grid()
def create_setting(self, variable, from_, to, increment, default):
"""generate spinbox + label"""
# set up default value of spinbox
self.settings[variable] = default
self.strvar_dict[variable] = tk.StringVar()
self.strvar_dict[variable].set(str(self.settings[variable]))
# set up spinbox the spinbox
self.sb_dict[variable] = tk.Spinbox(self.master, from_=from_, to=to, increment=increment, textvariable=self.strvar_dict[variable], font=self.text_format)
self.sb_dict[variable].grid(row=self.next_row, column=1)
# set up corresponding text label
self.label_dict[variable] = tk.Label(self.master, text=variable, font=self.text_format)
self.label_dict[variable].grid(row=self.next_row, column=0)
# increment row number
self.next_row += 1
def update_settings(self):
"""update the settings dictionary to reflect spinbox values"""
for setting in self.settings.keys():
if '.' in self.sb_dict[setting].get():
self.settings[setting] = float(self.sb_dict[setting].get())
else:
self.settings[setting] = int(self.sb_dict[setting].get())
def change_colours(self):
self.change_colour = True
class CircularBuffer:
"""reasonbly efficient circular buffer for storing last n frames or levels of furiosity etc."""
def __init__(self, shape):
# shape determines the shape of the storage used
# The first axis represents time steps, and the pointer increments on this axis, determining what will next be overwritten
self.mem = np.zeros(shape)
# store the next position to write too (store % len)
self.count = 0
self.len = shape[0]
def add(self, item):
"""add item to circular buffer to pointer location, then move pointer along by one"""
self.mem[self.count % self.len] = item
self.count += 1
def get(self):
"""return all items in buffer, ordered from oldest to newest"""
return np.concatenate((self.mem[self.count % self.len:], self.mem[:self.count % self.len]))
| import tkinter as tk
import numpy as np
class GUI:
"""simple tkinter gui for gesture recognition"""
def __init__(self, master):
# root window of gui
self.master = master
master.title("GUI")
# text variable to record current gesture
self.gesture = tk.StringVar()
self.gesture.set('position hand')
# image to be displayed, representing gesture: begin with image of dead parrot
self.img = tk.PhotoImage(file='data/images/dead.png')
# image to display when input is bad
self.bad = tk.PhotoImage(file='data/images/dead.png')
self.label = tk.Label(master, image=self.bad)
self.label.pack()
# label for gesture text
self.label2 = tk.Label(master, font=("Helvetica", 36), textvariable=self.gesture)
self.label2.pack(side=tk.BOTTOM)
# label with text coloured according to level of fury
self.label_fury = tk.Label(master, foreground="#%02x%02x%02x" % (0,50,0,), font=("Helvetica", 30), text=' movement')
self.label_fury.pack(side=tk.LEFT)
# label with text coloured according to level of angularity
self.label_angularity = tk.Label(master, foreground="#%02x%02x%02x" % (0,50,0,), font=("Helvetica", 30), text='angularity ')
self.label_angularity.pack(side=tk.RIGHT)
class SettingsGUI:
"""tkinter gui for changing settings during application run"""
def __init__(self, master):
# dictionaries of current settings values
self.settings = {}
# dictionary of tk spinbox objects
self.sb_dict = {}
# dictionary of string variables, for setting default spinbox values
self.strvar_dict = {}
# dictionary of labels used to demarcate spinboxes
self.label_dict = {}
# set univeral text format for all spinboxes and labels
self.text_format = ("Helvetica", 16)
# row of next spinbox/label, += 1 every time a spinbox is created
self.next_row = 0
# root window of settings gui
self.master = master
master.title("settings")
self.change_colour = False
# generate spinboxes + labels for some different settings
self.create_setting('prediction interval', 1, 40, 1, 15)
self.create_setting('graph update interval', 1, 20, 1, 3)
self.create_setting('fury beta', 0.8, 1, 0.005, 0.9)
self.create_setting('angularity beta', 0.8, 1, 0.005, 0.975)
self.create_setting('confidence beta', 0.8, 1, 0.005, 0.98)
self.create_setting('x axis range', 1, 150, 1, 30)
self.create_setting('effective confidence zero', 0.05, 0.95, 0.05, 0.7)
self.create_setting('min conf. to change image', 0.0, 0.95, 0.05, 0.55)
self.create_setting('every nth frame to model', 1, 10, 1, 5)
self.create_setting('labels gather?', 0, 1, 1, 0)
self.update_button = tk.Button(self.master, text='Update Settings', command=self.update_settings, font=self.text_format)
self.update_button.grid()
self.update_button = tk.Button(self.master, text='Change Colours', command=self.change_colours, font=self.text_format)
self.update_button.grid()
def create_setting(self, variable, from_, to, increment, default):
"""generate spinbox + label"""
# set up default value of spinbox
self.settings[variable] = default
self.strvar_dict[variable] = tk.StringVar()
self.strvar_dict[variable].set(str(self.settings[variable]))
# set up spinbox the spinbox
self.sb_dict[variable] = tk.Spinbox(self.master, from_=from_, to=to, increment=increment, textvariable=self.strvar_dict[variable], font=self.text_format)
self.sb_dict[variable].grid(row=self.next_row, column=1)
# set up corresponding text label
self.label_dict[variable] = tk.Label(self.master, text=variable, font=self.text_format)
self.label_dict[variable].grid(row=self.next_row, column=0)
# increment row number
self.next_row += 1
def update_settings(self):
"""update the settings dictionary to reflect spinbox values"""
for setting in self.settings.keys():
if '.' in self.sb_dict[setting].get():
self.settings[setting] = float(self.sb_dict[setting].get())
else:
self.settings[setting] = int(self.sb_dict[setting].get())
def change_colours(self):
self.change_colour = True
class CircularBuffer:
"""reasonbly efficient circular buffer for storing last n frames or levels of furiosity etc."""
def __init__(self, shape):
# shape determines the shape of the storage used
# The first axis represents time steps, and the pointer increments on this axis, determining what will next be overwritten
self.mem = np.zeros(shape)
# store the next position to write too (store % len)
self.count = 0
self.len = shape[0]
def add(self, item):
"""add item to circular buffer to pointer location, then move pointer along by one"""
self.mem[self.count % self.len] = item
self.count += 1
def get(self):
"""return all items in buffer, ordered from oldest to newest"""
return np.concatenate((self.mem[self.count % self.len:], self.mem[:self.count % self.len]))
| en | 0.75268 | simple tkinter gui for gesture recognition # root window of gui # text variable to record current gesture # image to be displayed, representing gesture: begin with image of dead parrot # image to display when input is bad # label for gesture text # label with text coloured according to level of fury # label with text coloured according to level of angularity tkinter gui for changing settings during application run # dictionaries of current settings values # dictionary of tk spinbox objects # dictionary of string variables, for setting default spinbox values # dictionary of labels used to demarcate spinboxes # set univeral text format for all spinboxes and labels # row of next spinbox/label, += 1 every time a spinbox is created # root window of settings gui # generate spinboxes + labels for some different settings generate spinbox + label # set up default value of spinbox # set up spinbox the spinbox # set up corresponding text label # increment row number update the settings dictionary to reflect spinbox values reasonbly efficient circular buffer for storing last n frames or levels of furiosity etc. # shape determines the shape of the storage used # The first axis represents time steps, and the pointer increments on this axis, determining what will next be overwritten # store the next position to write too (store % len) add item to circular buffer to pointer location, then move pointer along by one return all items in buffer, ordered from oldest to newest | 3.670818 | 4 |
extensions/donjayamanne.python-0.3.21/pythonFiles/refactor.py | maxemiliang/vscode-ext | 0 | 6631748 | # Arguments are:
# 1. Working directory.
# 2. Rope folder
import io
import sys
import json
import traceback
import rope
from rope.base import libutils
from rope.refactor.rename import Rename
from rope.refactor.extract import ExtractMethod, ExtractVariable
import rope.base.project
import rope.base.taskhandle
WORKSPACE_ROOT = sys.argv[1]
ROPE_PROJECT_FOLDER = '.vscode/.ropeproject'
class RefactorProgress():
"""
Refactor progress information
"""
def __init__(self, name='Task Name', message=None, percent=0):
self.name = name
self.message = message
self.percent = percent
class ChangeType():
"""
Change Type Enum
"""
EDIT = 0
NEW = 1
DELETE = 2
class Change():
"""
"""
EDIT = 0
NEW = 1
DELETE = 2
def __init__(self, filePath, fileMode=ChangeType.EDIT, diff=""):
self.filePath = filePath
self.diff = diff
self.fileMode = fileMode
class BaseRefactoring(object):
"""
Base class for refactorings
"""
def __init__(self, project, resource, name="Refactor", progressCallback=None):
self._progressCallback = progressCallback
self._handle = rope.base.taskhandle.TaskHandle(name)
self._handle.add_observer(self._update_progress)
self.project = project
self.resource = resource
self.changes = []
def _update_progress(self):
jobset = self._handle.current_jobset()
if jobset and not self._progressCallback is None:
progress = RefactorProgress()
# getting current job set name
if jobset.get_name() is not None:
progress.name = jobset.get_name()
# getting active job name
if jobset.get_active_job_name() is not None:
progress.message = jobset.get_active_job_name()
# adding done percent
percent = jobset.get_percent_done()
if percent is not None:
progress.percent = percent
if not self._progressCallback is None:
self._progressCallback(progress)
def stop(self):
self._handle.stop()
def refactor(self):
try:
self.onRefactor()
except rope.base.exceptions.InterruptedTaskError:
# we can ignore this exception, as user has cancelled refactoring
pass
def onRefactor(self):
"""
To be implemented by each base class
"""
pass
class RenameRefactor(BaseRefactoring):
def __init__(self, project, resource, name="Rename", progressCallback=None, startOffset=None, newName="new_Name"):
BaseRefactoring.__init__(self, project, resource,
name, progressCallback)
self._newName = newName
self.startOffset = startOffset
def onRefactor(self):
renamed = Rename(self.project, self.resource, self.startOffset)
changes = renamed.get_changes(self._newName, task_handle=self._handle)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, item.get_description()))
else:
raise Exception('Unknown Change')
class ExtractVariableRefactor(BaseRefactoring):
def __init__(self, project, resource, name="Extract Variable", progressCallback=None, startOffset=None, endOffset=None, newName="new_Name", similar=False, global_=False):
BaseRefactoring.__init__(self, project, resource,
name, progressCallback)
self._newName = newName
self._startOffset = startOffset
self._endOffset = endOffset
self._similar = similar
self._global = global_
def onRefactor(self):
renamed = ExtractVariable(
self.project, self.resource, self._startOffset, self._endOffset)
changes = renamed.get_changes(
self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, item.get_description()))
else:
raise Exception('Unknown Change')
class ExtractMethodRefactor(ExtractVariableRefactor):
def __init__(self, project, resource, name="Extract Method", progressCallback=None, startOffset=None, endOffset=None, newName="new_Name", similar=False, global_=False):
ExtractVariableRefactor.__init__(self, project, resource,
name, progressCallback, startOffset=startOffset, endOffset=endOffset, newName=newName, similar=similar, global_=global_)
def onRefactor(self):
renamed = ExtractMethod(
self.project, self.resource, self._startOffset, self._endOffset)
changes = renamed.get_changes(
self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, item.get_description()))
else:
raise Exception('Unknown Change')
class RopeRefactoring(object):
def __init__(self):
self.default_sys_path = sys.path
self._input = io.open(sys.stdin.fileno(), encoding='utf-8')
def _extractVariable(self, filePath, start, end, newName):
"""
Extracts a variale
"""
project = rope.base.project.Project(
WORKSPACE_ROOT, ropefolder=ROPE_PROJECT_FOLDER, save_history=False)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractVariableRefactor(
project, resourceToRefactor, startOffset=start, endOffset=end, newName=newName, similar=True)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({'diff': change.diff})
return valueToReturn
def _extractMethod(self, filePath, start, end, newName):
"""
Extracts a method
"""
project = rope.base.project.Project(
WORKSPACE_ROOT, ropefolder=ROPE_PROJECT_FOLDER, save_history=False)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractMethodRefactor(
project, resourceToRefactor, startOffset=start, endOffset=end, newName=newName, similar=True)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({'diff': change.diff})
return valueToReturn
def _serialize(self, identifier, results):
"""
Serializes the refactor results
"""
return json.dumps({'id': identifier, 'results': results})
def _deserialize(self, request):
"""Deserialize request from VSCode.
Args:
request: String with raw request from VSCode.
Returns:
Python dictionary with request data.
"""
return json.loads(request)
def _process_request(self, request):
"""Accept serialized request from VSCode and write response.
"""
request = self._deserialize(request)
lookup = request.get('lookup', '')
if lookup == '':
pass
elif lookup == 'extract_variable':
changes = self._extractVariable(request['file'], int(
request['start']), int(request['end']), request['name'])
return self._write_response(self._serialize(request['id'], changes))
elif lookup == 'extract_method':
changes = self._extractMethod(request['file'], int(
request['start']), int(request['end']), request['name'])
return self._write_response(self._serialize(request['id'], changes))
def _write_response(self, response):
sys.stdout.write(response + '\n')
sys.stdout.flush()
def watch(self):
self._write_response("STARTED")
while True:
try:
self._process_request(self._input.readline())
except Exception as ex:
message = ""
try:
message = ex.message
except:
pass
message = message + ' \n' + traceback.format_exc()
sys.stderr.write('$ERROR' + str(len(message)) + ':' + message)
sys.stderr.flush()
if __name__ == '__main__':
RopeRefactoring().watch()
| # Arguments are:
# 1. Working directory.
# 2. Rope folder
import io
import sys
import json
import traceback
import rope
from rope.base import libutils
from rope.refactor.rename import Rename
from rope.refactor.extract import ExtractMethod, ExtractVariable
import rope.base.project
import rope.base.taskhandle
WORKSPACE_ROOT = sys.argv[1]
ROPE_PROJECT_FOLDER = '.vscode/.ropeproject'
class RefactorProgress():
"""
Refactor progress information
"""
def __init__(self, name='Task Name', message=None, percent=0):
self.name = name
self.message = message
self.percent = percent
class ChangeType():
"""
Change Type Enum
"""
EDIT = 0
NEW = 1
DELETE = 2
class Change():
"""
"""
EDIT = 0
NEW = 1
DELETE = 2
def __init__(self, filePath, fileMode=ChangeType.EDIT, diff=""):
self.filePath = filePath
self.diff = diff
self.fileMode = fileMode
class BaseRefactoring(object):
"""
Base class for refactorings
"""
def __init__(self, project, resource, name="Refactor", progressCallback=None):
self._progressCallback = progressCallback
self._handle = rope.base.taskhandle.TaskHandle(name)
self._handle.add_observer(self._update_progress)
self.project = project
self.resource = resource
self.changes = []
def _update_progress(self):
jobset = self._handle.current_jobset()
if jobset and not self._progressCallback is None:
progress = RefactorProgress()
# getting current job set name
if jobset.get_name() is not None:
progress.name = jobset.get_name()
# getting active job name
if jobset.get_active_job_name() is not None:
progress.message = jobset.get_active_job_name()
# adding done percent
percent = jobset.get_percent_done()
if percent is not None:
progress.percent = percent
if not self._progressCallback is None:
self._progressCallback(progress)
def stop(self):
self._handle.stop()
def refactor(self):
try:
self.onRefactor()
except rope.base.exceptions.InterruptedTaskError:
# we can ignore this exception, as user has cancelled refactoring
pass
def onRefactor(self):
"""
To be implemented by each base class
"""
pass
class RenameRefactor(BaseRefactoring):
def __init__(self, project, resource, name="Rename", progressCallback=None, startOffset=None, newName="new_Name"):
BaseRefactoring.__init__(self, project, resource,
name, progressCallback)
self._newName = newName
self.startOffset = startOffset
def onRefactor(self):
renamed = Rename(self.project, self.resource, self.startOffset)
changes = renamed.get_changes(self._newName, task_handle=self._handle)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, item.get_description()))
else:
raise Exception('Unknown Change')
class ExtractVariableRefactor(BaseRefactoring):
def __init__(self, project, resource, name="Extract Variable", progressCallback=None, startOffset=None, endOffset=None, newName="new_Name", similar=False, global_=False):
BaseRefactoring.__init__(self, project, resource,
name, progressCallback)
self._newName = newName
self._startOffset = startOffset
self._endOffset = endOffset
self._similar = similar
self._global = global_
def onRefactor(self):
renamed = ExtractVariable(
self.project, self.resource, self._startOffset, self._endOffset)
changes = renamed.get_changes(
self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, item.get_description()))
else:
raise Exception('Unknown Change')
class ExtractMethodRefactor(ExtractVariableRefactor):
def __init__(self, project, resource, name="Extract Method", progressCallback=None, startOffset=None, endOffset=None, newName="new_Name", similar=False, global_=False):
ExtractVariableRefactor.__init__(self, project, resource,
name, progressCallback, startOffset=startOffset, endOffset=endOffset, newName=newName, similar=similar, global_=global_)
def onRefactor(self):
renamed = ExtractMethod(
self.project, self.resource, self._startOffset, self._endOffset)
changes = renamed.get_changes(
self._newName, self._similar, self._global)
for item in changes.changes:
if isinstance(item, rope.base.change.ChangeContents):
self.changes.append(
Change(item.resource.real_path, ChangeType.EDIT, item.get_description()))
else:
raise Exception('Unknown Change')
class RopeRefactoring(object):
def __init__(self):
self.default_sys_path = sys.path
self._input = io.open(sys.stdin.fileno(), encoding='utf-8')
def _extractVariable(self, filePath, start, end, newName):
"""
Extracts a variale
"""
project = rope.base.project.Project(
WORKSPACE_ROOT, ropefolder=ROPE_PROJECT_FOLDER, save_history=False)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractVariableRefactor(
project, resourceToRefactor, startOffset=start, endOffset=end, newName=newName, similar=True)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({'diff': change.diff})
return valueToReturn
def _extractMethod(self, filePath, start, end, newName):
"""
Extracts a method
"""
project = rope.base.project.Project(
WORKSPACE_ROOT, ropefolder=ROPE_PROJECT_FOLDER, save_history=False)
resourceToRefactor = libutils.path_to_resource(project, filePath)
refactor = ExtractMethodRefactor(
project, resourceToRefactor, startOffset=start, endOffset=end, newName=newName, similar=True)
refactor.refactor()
changes = refactor.changes
project.close()
valueToReturn = []
for change in changes:
valueToReturn.append({'diff': change.diff})
return valueToReturn
def _serialize(self, identifier, results):
"""
Serializes the refactor results
"""
return json.dumps({'id': identifier, 'results': results})
def _deserialize(self, request):
"""Deserialize request from VSCode.
Args:
request: String with raw request from VSCode.
Returns:
Python dictionary with request data.
"""
return json.loads(request)
def _process_request(self, request):
"""Accept serialized request from VSCode and write response.
"""
request = self._deserialize(request)
lookup = request.get('lookup', '')
if lookup == '':
pass
elif lookup == 'extract_variable':
changes = self._extractVariable(request['file'], int(
request['start']), int(request['end']), request['name'])
return self._write_response(self._serialize(request['id'], changes))
elif lookup == 'extract_method':
changes = self._extractMethod(request['file'], int(
request['start']), int(request['end']), request['name'])
return self._write_response(self._serialize(request['id'], changes))
def _write_response(self, response):
sys.stdout.write(response + '\n')
sys.stdout.flush()
def watch(self):
self._write_response("STARTED")
while True:
try:
self._process_request(self._input.readline())
except Exception as ex:
message = ""
try:
message = ex.message
except:
pass
message = message + ' \n' + traceback.format_exc()
sys.stderr.write('$ERROR' + str(len(message)) + ':' + message)
sys.stderr.flush()
if __name__ == '__main__':
RopeRefactoring().watch()
| en | 0.827049 | # Arguments are: # 1. Working directory. # 2. Rope folder Refactor progress information Change Type Enum Base class for refactorings # getting current job set name # getting active job name # adding done percent # we can ignore this exception, as user has cancelled refactoring To be implemented by each base class Extracts a variale Extracts a method Serializes the refactor results Deserialize request from VSCode. Args: request: String with raw request from VSCode. Returns: Python dictionary with request data. Accept serialized request from VSCode and write response. | 2.278323 | 2 |
aws/s3FileListRead.py | tokiwa/tokiwa.github.io | 0 | 6631749 | <reponame>tokiwa/tokiwa.github.io
from __future__ import print_function
import json
import urllib
import boto3
print('*Loading lambda: s3FileListRead')
s3 = boto3.client('s3')
def lambda_handler(event, context):
print('==== file list in bucket ====')
AWS_S3_BUCKET_NAME = 'yujitokiwa-jp-test'
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket(AWS_S3_BUCKET_NAME)
result = bucket.meta.client.list_objects(Bucket=bucket.name, Delimiter='/')
for o in result.get('Contents'):
print(o.get('Key')) # flie name will be printed
response = s3.get_object(Bucket=bucket.name, Key=o.get('Key'))
data = response['Body'].read()
print(data.decode('utf-8')) # file contents will be printed | from __future__ import print_function
import json
import urllib
import boto3
print('*Loading lambda: s3FileListRead')
s3 = boto3.client('s3')
def lambda_handler(event, context):
print('==== file list in bucket ====')
AWS_S3_BUCKET_NAME = 'yujitokiwa-jp-test'
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket(AWS_S3_BUCKET_NAME)
result = bucket.meta.client.list_objects(Bucket=bucket.name, Delimiter='/')
for o in result.get('Contents'):
print(o.get('Key')) # flie name will be printed
response = s3.get_object(Bucket=bucket.name, Key=o.get('Key'))
data = response['Body'].read()
print(data.decode('utf-8')) # file contents will be printed | en | 0.836386 | # flie name will be printed # file contents will be printed | 2.699444 | 3 |
hummingbird/ml/_utils.py | JasonNice/hummingbirdtestjn | 2 | 6631750 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Collection of utility functions used throughout Hummingbird.
"""
from distutils.version import LooseVersion
import warnings
from .exceptions import ConstantError
def torch_installed():
"""
Checks that *PyTorch* is available.
"""
try:
import torch
return True
except ImportError:
return False
def sklearn_installed():
"""
Checks that *Sklearn* is available.
"""
try:
import sklearn
return True
except ImportError:
return False
def lightgbm_installed():
"""
Checks that *LightGBM* is available.
"""
try:
import lightgbm
return True
except ImportError:
return False
def xgboost_installed():
"""
Checks that *XGBoost* is available.
"""
try:
import xgboost
except ImportError:
return False
from xgboost.core import _LIB
try:
_LIB.XGBoosterDumpModelEx
except AttributeError:
# The version is not recent enough even though it is version 0.6.
# You need to install xgboost from github and not from pypi.
return False
from xgboost import __version__
vers = LooseVersion(__version__)
allowed_min = LooseVersion("0.70")
allowed_max = LooseVersion("0.90")
if vers < allowed_min or vers > allowed_max:
warnings.warn("The converter works for xgboost >= 0.7 and <= 0.9. Different versions might not.")
return True
class _Constants(object):
"""
Class enabling the proper definition of constants.
"""
def __init__(self, constants, other_constants=None):
for constant in dir(constants):
if constant.isupper():
setattr(self, constant, getattr(constants, constant))
for constant in dir(other_constants):
if constant.isupper():
setattr(self, constant, getattr(other_constants, constant))
def __setattr__(self, name, value):
if name in self.__dict__:
raise ConstantError("Overwriting a constant is not allowed {}".format(name))
self.__dict__[name] = value
| # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Collection of utility functions used throughout Hummingbird.
"""
from distutils.version import LooseVersion
import warnings
from .exceptions import ConstantError
def torch_installed():
"""
Checks that *PyTorch* is available.
"""
try:
import torch
return True
except ImportError:
return False
def sklearn_installed():
"""
Checks that *Sklearn* is available.
"""
try:
import sklearn
return True
except ImportError:
return False
def lightgbm_installed():
"""
Checks that *LightGBM* is available.
"""
try:
import lightgbm
return True
except ImportError:
return False
def xgboost_installed():
"""
Checks that *XGBoost* is available.
"""
try:
import xgboost
except ImportError:
return False
from xgboost.core import _LIB
try:
_LIB.XGBoosterDumpModelEx
except AttributeError:
# The version is not recent enough even though it is version 0.6.
# You need to install xgboost from github and not from pypi.
return False
from xgboost import __version__
vers = LooseVersion(__version__)
allowed_min = LooseVersion("0.70")
allowed_max = LooseVersion("0.90")
if vers < allowed_min or vers > allowed_max:
warnings.warn("The converter works for xgboost >= 0.7 and <= 0.9. Different versions might not.")
return True
class _Constants(object):
"""
Class enabling the proper definition of constants.
"""
def __init__(self, constants, other_constants=None):
for constant in dir(constants):
if constant.isupper():
setattr(self, constant, getattr(constants, constant))
for constant in dir(other_constants):
if constant.isupper():
setattr(self, constant, getattr(other_constants, constant))
def __setattr__(self, name, value):
if name in self.__dict__:
raise ConstantError("Overwriting a constant is not allowed {}".format(name))
self.__dict__[name] = value
| en | 0.763569 | # ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- Collection of utility functions used throughout Hummingbird. Checks that *PyTorch* is available. Checks that *Sklearn* is available. Checks that *LightGBM* is available. Checks that *XGBoost* is available. # The version is not recent enough even though it is version 0.6. # You need to install xgboost from github and not from pypi. Class enabling the proper definition of constants. | 2.029868 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.