repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Telestream/telestream-cloud-python-sdk | telestream_cloud_qc_sdk/test/test_active_format_test.py | 1 | 1460 | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.active_format_test import ActiveFormatTest # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestActiveFormatTest(unittest.TestCase):
"""ActiveFormatTest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ActiveFormatTest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.active_format_test.ActiveFormatTest() # noqa: E501
if include_optional :
return ActiveFormatTest(
afd = 56,
reject_on_error = True,
checked = True
)
else :
return ActiveFormatTest(
)
def testActiveFormatTest(self):
"""Test ActiveFormatTest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| mit | 3,004,001,530,405,510,700 | 25.545455 | 96 | 0.636301 | false |
kennethcc2005/yahoo_finance_stocks | candle_output.py | 1 | 85023 | '''
Candlestick pattern functions in class type.
Only need to run output function to build the dataframe for all patterns for one symbol.
'''
import numpy as np
import pandas as pd
import json
import pandas.io.data as web
from datetime import date, datetime, timedelta
from collections import defaultdict
start = datetime(2010, 1, 1)
end = date.today()
df1 = pd.read_csv('data/companylist.csv')
df2 = pd.read_csv('data/companylist1.csv')
df3 = pd.read_csv('data/companylist2.csv')
data = web.DataReader("F", 'yahoo', start, end)
symbols = np.append(df1.Symbol.values, df2.Symbol.values)
symbols = np.append(symbols, df3.Symbol.values)
class candle(object):
def __init__(self,data):
self.data=data
def output(self):
out_df=pd.DataFrame(index=[0])
out_df['a_new_price']=self.eight_new_price()
out_df['eight_new_price']=self.eight_new_price()
out_df['ten_new_price']=self.ten_new_price()
out_df['twelve_new_price']=self.twelve_new_price()
out_df['thirteen_new_price']=self.thirteen_new_price()
out_df['bearish_abandoned_baby']=self.bearish_abandoned_baby()
out_df['bullish_abandoned_baby']=self.bullish_abandoned_baby()
out_df['above_stomach']=self.above_stomach()
out_df['advance_block']=self.advance_block()
out_df['below_stomach']=self.below_stomach()
out_df['bearish_belt_hold']=self.bearish_belt_hold()
out_df['bearish_breakaway']=self.bearish_breakaway()
out_df['bearish_doji_star']=self.bearish_doji_star()
out_df['bearish_engulfing']=self.bearish_engulfing()
out_df['bearish_harami']=self.bearish_harami()
out_df['bearish_harami_cross']=self.bearish_harami_cross()
out_df['bearish_kicking']=self.bearish_kicking()
out_df['bearish_meeting_lines']=self.bearish_meeting_lines()
out_df['bearish_separating_lines']=self.bearish_separating_lines()
out_df['bearish_side_by_side_white_lines']=self.bearish_side_by_side_white_lines()
out_df['bearish_three_line_strike']=self.bearish_three_line_strike()
out_df['bearish_tri_star']=self.bearish_tri_star()
out_df['bullish_belt_hold']=self.bullish_belt_hold()
out_df['bullish_breakaway']=self.bullish_breakaway()
out_df['bullish_doji_star']=self.bullish_doji_star()
out_df['bullish_engulfing']=self.bullish_engulfing()
out_df['bullish_harami']=self.bullish_harami()
out_df['bullish_harami_cross']=self.bullish_harami_cross()
out_df['bullish_kicking']=self.bullish_kicking()
out_df['bullish_meeting_lines']=self.bullish_meeting_lines()
out_df['bullish_separating_lines']=self.bullish_separating_lines()
out_df['bullish_side_by_side_white_lines']=self.bullish_side_by_side_white_lines()
out_df['bullish_three_line_strike']=self.bullish_three_line_strike()
out_df['bullish_tri_star']=self.bullish_tri_star()
out_df['collapsing_doji_star']=self.collapsing_doji_star()
out_df['conceling_baby_swallow']=self.conceling_baby_swallow()
out_df['dark_cloud_cover']=self.dark_cloud_cover()
out_df['deliberation']=self.deliberation()
out_df['gapping_down_doji']=self.gapping_down_doji()
out_df['gapping_up_doji']=self.gapping_up_doji()
out_df['northern_doji']=self.northern_doji()
out_df['southern_doji']=self.southern_doji()
out_df['bearish_doji_star']=self.bearish_doji_star()
out_df['bullish_doji_star']=self.bullish_doji_star()
out_df['evening_doji']=self.evening_doji()
out_df['downside_gap_three_methods']=self.downside_gap_three_methods()
out_df['downside_tasuki_gap']=self.downside_tasuki_gap()
out_df['falling_three_methods']=self.falling_three_methods()
out_df['falling_window']=self.falling_window()
out_df['hammer']=self.hammer()
out_df['inverted_hammer']=self.inverted_hammer()
out_df['hanging_man']=self.hanging_man()
out_df['high_wave']=self.high_wave()
out_df['homing_pigeon']=self.homing_pigeon()
out_df['identical_three_crows']=self.identical_three_crows()
out_df['in_neck']=self.in_neck()
out_df['ladder_bottom']=self.ladder_bottom()
out_df['last_engulfing_bottom']=self.last_engulfing_bottom()
out_df['last_engulfing_top']=self.last_engulfing_top()
out_df['matching_low']=self.matching_low()
out_df['mat_hold']=self.mat_hold()
out_df['morning_doji_star']=self.morning_doji_star()
out_df['morning_star']=self.morning_star()
out_df['on_neck']=self.on_neck()
out_df['piercing_pattern']=self.piercing_pattern()
out_df['rickshaw_man']=self.rickshaw_man()
out_df['rising_three_methods']=self.rising_three_methods()
out_df['rising_window']=self.rising_window()
out_df['shooting_star_1']=self.shooting_star_1()
out_df['shooting_star_2']=self.shooting_star_2()
out_df['stick_sandwich']=self.stick_sandwich()
out_df['takuri_line']=self.takuri_line()
out_df['three_black_crows']=self.three_black_crows()
out_df['three_inside_down']=self.three_inside_down()
out_df['three_inside_up']=self.three_inside_up()
out_df['three_outside_down']=self.three_outside_down()
out_df['three_outside_up']=self.three_outside_up()
out_df['three_stars_in_south']=self.three_stars_in_south()
out_df['three_white_soldiers']=self.three_white_soldiers()
out_df['thrusting']=self.thrusting()
out_df['tweezers_bottom']=self.tweezers_bottom()
out_df['tweezers_top']=self.tweezers_top()
out_df['two_black_gapping']=self.two_black_gapping()
out_df['two_crows']=self.two_crows()
out_df['unique_three_river_bottom']=self.unique_three_river_bottom()
out_df['upside_gap_three_methods']=self.upside_gap_three_methods()
out_df['upside_gap_two_crows']=self.upside_gap_two_crows()
out_df['upside_tasuki_gap']=self.upside_tasuki_gap()
return out_df
def doji(self,data_pt):
if float(max(data_pt['Close'], data_pt['Open']))/float(min(data_pt['Close'], data_pt['Open'])) < 1.001:
return True
else:
return False
def dragonfly_doji(self,data_pt):
'''
Look for a long lower shadow with a small body
(open and close are within pennies of each other).
'''
a = self.doji(data_pt)
b = ((data_pt['Close']-data_pt['Low'])/data_pt['Close']) > 0.03
c = self.similar_price(data_pt['Open'], data_pt['High'])
if a and b and c:
return True
else:
return False
def gravestone_doji(self,data_pt):
'''
Look for a candle with a tall upper shadow and little or no lower one.
The opening and closing prices should be within pennies of each other.
'''
a = self.doji(data_pt)
b = ((data_pt['High']-data_pt['Open'])/data_pt['Open']) > 0.03
c = self.similar_price(data_pt['Open'], data_pt['Low'])
if a and b and c:
return True
else:
return False
def long_legged_doji(self,data_pt):
'''
Look for a doji (opening and closing prices are within a few pennies of each other) accompanied by long shadows.
'''
a = self.doji(data_pt)
b = ((data_pt['High']-data_pt['Open'])/data_pt['Open']) > 0.03
c = ((data_pt['Close']-data_pt['Low'])/data_pt['Close']) > 0.03
if a and b and c:
return True
else:
return False
def body_candle(self,data_pt):
return abs(data_pt['Close'] - data_pt['Open'])
def black_candle(self,data_pt):
if (data_pt['Close'] > data_pt['Open']) and (not self.doji(data_pt)):
return False
else:
return True
def tall_black_candle(self,data_pt):
if self.black_candle(data_pt) and float(data_pt['Open'])/(data_pt['Close']) > 1.02:
return True
else:
return False
def small_black_candle(self,data_pt):
if self.black_candle(data_pt) and (not self.tall_black_candle(data_pt)):
return True
else:
return False
def white_candle(self,data_pt):
if (data_pt['Close'] > data_pt['Open']) and (not self.doji(data_pt)):
return True
else:
return False
def tall_white_candle(self,data_pt):
if self.black_candle(data_pt) and float(data_pt['Close'])/(data_pt['Open']) > 1.02:
return True
else:
return False
def small_white_candle(self,data_pt):
if self.white_candle(data_pt) and not self.tall_white_candle(data_pt):
return True
else:
return False
def white_marubozu_candle(self,data_pt):
if self.white_candle(data_pt) and (data_pt['Open'] == data_pt['Low']) and (data_pt['Close'] == data_pt['High']):
return True
else:
return False
def black_marubozu_candle(self,data_pt):
if self.black_candle(data_pt) and (data_pt['Open'] == data_pt['High']) and (data_pt['Close'] == data_pt['Low']):
return True
else:
return False
def closing_black_marubozu_candle(self,data_pt):
'''
Look for a tall black candle with an upper shadow but no lower one.
'''
if self.tall_black_candle(data_pt) and (data_pt['Open'] != data_pt['High']) and (data_pt['Close'] == data_pt['Low']):
return True
else:
return False
def closing_white_marubozu_candle(self,data_pt):
'''
Look for a tall white candle with an lower shadow but no upper one.
'''
if self.tall_white_candle(data_pt) and (data_pt['Open'] != data_pt['Low']) and (data_pt['Close'] == data_pt['High']):
return True
else:
return False
def black_spinning_top_candle(self,data_pt):
'''
Look for a small black body with shadows taller than the body.
'''
a = self.small_black_candle(data_pt)
b = (data_pt['Close'] - data_pt['Low']) > 2 * self.body_candle(data_pt)
c = (data_pt['High'] - data_pt['Open']) > 2 * self.body_candle(data_pt)
if a and b and c:
return True
else:
return False
def black_spinning_top_candle(self,data_pt):
'''
Look for a small white bodied candle with tall shadows.
'''
a = self.small_white_candle(data_pt)
b = (data_pt['Close'] - data_pt['Low']) > 2 * self.body_candle(data_pt)
c = (data_pt['High'] - data_pt['Open']) > 2 * self.body_candle(data_pt)
if a and b and c:
return True
else:
return False
def up_price_trend(self,data_pt, data_pt1, data_pt2):
'''
data_pt: the first day for the pattern
data_pt1: the day before the pattern, last day for the upward trend
data_pt2: the first day to compare as upward trend
'''
if ((data_pt1['Close'] /float(data_pt2['Open'])) > 1.03):
return True
else:
return False
def down_price_trend(self,data_pt, data_pt1, data_pt2):
'''
data_pt: the first day for the pattern
data_pt1: the day before the pattern, last day for the upward trend
data_pt2: the first day to compare as upward trend
'''
if ((float(data_pt2['Open']/data_pt1['Close'])) > 1.03):
return True
else:
return False
def similar_price(self,data_pt1,data_pt2, percent = 0.001):
a = (abs(data_pt1 - data_pt2)/(data_pt2)) < percent
if a :
return True
else:
return False
def eight_new_price(self):
for i in xrange(1,9):
if not (self.data.iloc[-i]['High'] > self.data.iloc[-i-1]['High']):
return False
if self.data.iloc[-9]['High'] < self.data.iloc[-10]['High']:
return True
else:
return False
def ten_new_price(self):
for i in xrange(1,11):
if not (self.data.iloc[-i]['High'] > self.data.iloc[-i-1]['High']):
return False
if self.data.iloc[-11]['High'] < self.data.iloc[-12]['High']:
return True
else:
return False
def twelve_new_price(self):
for i in xrange(1,13):
if not (self.data.iloc[-i]['High'] > self.data.iloc[-i-1]['High']):
return False
if self.data.iloc[-13]['High'] < self.data.iloc[-14]['High']:
return True
else:
return False
def thirteen_new_price(self):
for i in xrange(1,14):
if not (self.data.iloc[-i]['High'] > self.data.iloc[-i-1]['High']):
return False
if self.data.iloc[-14]['High'] < self.data.iloc[-15]['High']:
return True
else:
return False
def bearish_abandoned_baby(self):
a = self.data.iloc[-1]['Close'] < self.data.iloc[-1]['Open']
b = float(self.data.iloc[-1]['Open'])/(self.data.iloc[-1]['Close']) > 1.02
c = self.data.iloc[-1]['High'] < self.data.iloc[-2]['Low']
d = float(max(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']))/float(min(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open'])) < 1.001
e = self.data.iloc[-2]['Low'] > self.data.iloc[-3]['High']
f = float(self.data.iloc[-3]['Close'])/(self.data.iloc[-3]['Open']) > 1.02
g = self.up_price_trend(self.data.iloc[-3],self.data.iloc[-4], self.data.iloc[-6])
if a and b and c and d and e and f and g:
return True
else:
return False
# if self.data.iloc[-1]['Close'] < self.data.iloc[-1]['Open']:
# if float(self.data.iloc[-1]['Open'])/(self.data.iloc[-1]['Close']) > 1.03:
# if self.data.iloc[-1]['High'] < self.data.iloc[-2]['Low']:
# if float(max(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']))/float(min(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open'])) < 1.01:
# if self.data.iloc[-2]['Low'] > self.data.iloc[-3]['High']:
# if float(self.data.iloc[-3]['Close'])/(self.data.iloc[-3]['Open']) > 1.03:
def bullish_abandoned_baby(self):
a = self.data.iloc[-1]['Close'] > self.data.iloc[-1]['Open']
b = float(self.data.iloc[-1]['Close'])/(self.data.iloc[-1]['Open']) > 1.02
c = self.data.iloc[-1]['Low'] > self.data.iloc[-2]['High']
d = float(max(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']))/float(min(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open'])) < 1.001
e = self.data.iloc[-2]['High'] < self.data.iloc[-3]['Low']
f = float(self.data.iloc[-3]['Open'])/(self.data.iloc[-3]['Close']) > 1.02
g = self.down_price_trend(self.data.iloc[-3],self.data.iloc[-4], self.data.iloc[-6])
if a and b and c and d and e and f and g:
return True
else:
return False
def above_stomach(self):
a = self.data.iloc[-2]['Close'] < self.data.iloc[-2]['Open']
b = self.data.iloc[-2]['Open']/float(self.data.iloc[-2]['Close']) > 1.02
c = (self.data.iloc[-1]['Close'] > self.data.iloc[-1]['Open']) and (self.data.iloc[-1]['Close'] > self.data.iloc[-2]['Open'])
d = self.data.iloc[-1]['Close']/float(self.data.iloc[-1]['Open']) > 1.02
e = self.data.iloc[-1]['Open'] > ((float(self.data.iloc[-2]['Open'])+self.data.iloc[-2]['Close'])/2)
f = self.data.iloc[-2]['Open'] > self.data.iloc[-1]['Open']
g = self.up_price_trend(self.data.iloc[-2],self.data.iloc[-3], self.data.iloc[-5])
if a and b and c and d and e and g:
return True
else:
return False
def advance_block(self):
a = self.white_candle(self.data.iloc[-1])
b = self.white_candle(self.data.iloc[-2])
c = self.white_candle(self.data.iloc[-3])
day1_body = self.data.iloc[-3]['Close']/float(self.data.iloc[-3]['Open'])
day2_body = self.data.iloc[-2]['Close']/float(self.data.iloc[-2]['Open'])
day3_body = self.data.iloc[-1]['Close']/float(self.data.iloc[-1]['Open'])
d = day1_body > 1.03
e = (day2_body > 1.005) and ( day2_body < day1_body)
f = (day3_body > 1.005) and ( day3_body < day1_body)
g = (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Open'])
h = (self.data.iloc[-2]['Open'] < self.data.iloc[-3]['Close']) and (self.data.iloc[-2]['Open'] > self.data.iloc[-3]['Open'])
j = (self.data.iloc[-1]['High'] - self.data.iloc[-1]['Close']) > (self.data.iloc[-1]['Close'] - self.data.iloc[-1]['Open'])
k = (self.data.iloc[-2]['High'] - self.data.iloc[-2]['Close']) > (self.data.iloc[-2]['Close'] - self.data.iloc[-2]['Open'])
l = self.up_price_trend(self.data.iloc[-3],self.data.iloc[-4], self.data.iloc[-6])
if a and b and c and d and e and f and g and h and j and k and l:
return True
else:
return False
def below_stomach(self):
'''
Look for a tall white candle followed by a candle that has a body below the middle of the white candle.
The second candle as black, but the guidelines I saw did not mentions this as a requirement.
'''
a = self.black_candle(self.data.iloc[-1])
b = self.white_candle(self.data.iloc[-2])
c = self.data.iloc[-1]['Open']/float(self.data.iloc[-1]['Close']) > 1.02
d = self.data.iloc[-2]['Close']/float(self.data.iloc[-2]['Open']) > 1.02
e = (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Open']) and (self.data.iloc[-1]['Open'] < (float(self.data.iloc[-2]['Open'])+self.data.iloc[-2]['Close'])/2)
f = self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Open']
g = self.up_price_trend(self.data.iloc[-2],self.data.iloc[-3], self.data.iloc[-5])
if a and b and c and d and e and f and g:
return True
else:
return False
def bearish_belt_hold(self):
'''
Price opens at the high for the day and closes near the low, forming a tall black candle, often with a small lower shadow.
'''
a = self.tall_black_candle(self.data.iloc[-1])
b = (self.data.iloc[-1]['Close']/float(self.data.iloc[-1]['Low']) < 1.01) and (self.data.iloc[-1]['Close'] < float(self.data.iloc[-1]['Low']))
c = (self.data.iloc[-1]['Open'] == self.data.iloc[-1]['High'])
d = self.white_candle(self.data.iloc[-2])
e = self.up_price_trend(self.data.iloc[-1],self.data.iloc[-2], self.data.iloc[-4])
if a and b and c and d and e:
return True
else:
return False
def bearish_breakaway(self):
'''
Look for 5 candle lines in an upward price trend with the first candle being a tall white one.
The second day should be a white candle with a gap between the two bodies, but the shadows can overlap.
Day three should have a higher close and the candle can be any color.
Day 4 shows a white candle with a higher close.
The last day is a tall black candle with a close within the gap between the bodies of the first two candles.
'''
a = self.tall_white_candle(self.data.iloc[-5])
b = self.white_candle(self.data.iloc[-4])
c = self.data.iloc[-4]['Open'] > self.data.iloc[-5]['Close']
d = self.data.iloc[-3]['Close'] > self.data.iloc[-4]['Close']
e = self.data.iloc[-2]['Close'] > self.data.iloc[-3]['Close']
f = self.white_candle(self.data.iloc[-2])
g = self.tall_black_candle(self.data.iloc[-1])
h = (self.data.iloc[-1]['Close'] < self.data.iloc[-4]['Open']) and (self.data.iloc[-1]['Close'] > self.data.iloc[-5]['Close'])
i = self.up_price_trend(self.data.iloc[-5],self.data.iloc[-6], self.data.iloc[-8])
if a and b and c and d and e and f and g and h and i:
return True
else:
return False
def bearish_doji_star(self):
'''
Look for a two-candle pattern in an uptrend.
The first candle is a long white one.
The next day, price gaps higher and the body remains above the prior body.
A doji forms with the opening and closing prices within pennies of each other.
The shadows on the doji should be comparatively short.
'''
a = self.tall_white_candle(self.data.iloc[-2])
b = (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Close'] > self.data.iloc[-2]['Close'])
c = self.doji(self.data.iloc[-1])
d = (self.data.iloc[-1]['High'] - self.data.iloc[-1]['Low']) < self.body_candle(self.data.iloc[-2])
e = self.up_price_trend(self.data.iloc[-2],self.data.iloc[-3], self.data.iloc[-5])
if a and b and c and d and e:
return True
else:
return False
def bearish_engulfing(self):
'''
Look for a two candle pattern in an upward price trend.
The first candle is white and the second is black.
The body of the black candle is taller and overlaps the candle of the white body.
Shadows are unimportant.
'''
a = self.white_candle(self.data.iloc[-2])
b = self.black_candle(self.data.iloc[-1])
c = (self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Open']) and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Close'])
d = self.up_price_trend(self.data.iloc[-2],self.data.iloc[-3], self.data.iloc[-5])
if a and b and c and d:
return True
else:
return False
def bearish_harami(self):
'''
Look for a tall white candle followed by a small black one.
The opening and closing prices must be within the body of the white candle.
Ignore the shadows.
Either the tops of the bodies or the bottoms (or both) must be a different price.
'''
a = self.tall_white_candle(self.data.iloc[-2])
b = (self.black_candle(self.data.iloc[-1])) and (not self.tall_black_candle(self.data.iloc[-1]))
c = (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Close'] > self.data.iloc[-2]['Open'])
d = (self.data.iloc[-1]['High'] != self.data.iloc[-2]['High']) or (self.data.iloc[-1]['Low'] != self.data.iloc[-2]['Low'])
e = self.up_price_trend(self.data.iloc[-2],self.data.iloc[-3], self.data.iloc[-5])
if a and b and c and d:
return True
else:
return False
def bearish_harami_cross(self):
'''
Look for a tall white candle in an upward price trend.
The next day, a doji appears that is inside (including the shadows) the trading range of the white candle.
'''
a = self.tall_white_candle(self.data.iloc[-2])
b = self.doji(self.data.iloc[-1])
c = (self.data.iloc[-1]['High'] < self.data.iloc[-2]['High']) and (self.data.iloc[-1]['Low'] > self.data.iloc[-2]['Low'])
d = self.up_price_trend(self.data.iloc[-2],self.data.iloc[-3], self.data.iloc[-5])
if a and b and c and d:
return True
else:
return False
def bearish_kicking(self):
'''
The first days is a white marubozu candle followed by a black marubozu. Between the two candles must be a gap.
'''
a = self.white_marubozu_candle(self.data.iloc[-2])
b = self.black_marubozu_candle(self.data.iloc[-1])
c = self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Close']
if a and b and c:
return True
else:
return False
def bearish_meeting_lines(self):
'''
Look for a tall white candle in an upward price trend.
Following that, the next candle should be a tall black one.
The closes of the two candles should be "near" one another, whatever that means.
'''
a = self.up_price_trend(self.data.iloc[-2],self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_white_candle(self.data.iloc[-2])
c = self.tall_black_candle(self.data.iloc[-1])
d = (abs(self.data.iloc[-1]['Close'] - self.data.iloc[-2]['Close'])/(self.data.iloc[-1]['Close'])) < 0.001
if a and b and c and d:
return True
else:
return False
def bearish_separating_lines(self):
'''
Look for a tall white candle in a downward price trend followed by a tall black candle.
The opening price of the two candles should be similar.
'''
a = self.down_price_trend(self.data.iloc[-2],self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_white_candle(self.data.iloc[-2])
c = self.tall_black_candle(self.data.iloc[-1])
d = (abs(self.data.iloc[-1]['Open'] - self.data.iloc[-2]['Open'])/(self.data.iloc[-1]['Open'])) < 0.001
if a and b and c and d:
return True
else:
return False
def bearish_side_by_side_white_lines(self):
'''
Look for a black candle in a downward price trend.
Following that, find two white candles with bodies about the same size and similar opening prices.
The closing prices of both white candles must remain below the body of the black candle.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.black_candle(self.data.iloc[-3])
c = self.white_candle(self.data.iloc[-2])
d = self.white_candle(self.data.iloc[-1])
e = self.similar_price(self.data.iloc[-2]['Close'],self.data.iloc[-1]['Close'])
f = self.similar_price(self.data.iloc[-2]['Open'],self.data.iloc[-1]['Open'])
g = self.data.iloc[-2]['Close'] < self.data.iloc[-3]['Close']
if a and b and c and d and e and f and g:
return True
else:
return False
def bearish_three_line_strike(self):
'''
Look for three black candles forming lower lows followed by a tall white candle that
opens below the prior close and closes above the first day's open.
In other words, the last candle spans most of the price action of the prior three days.
'''
a = self.down_price_trend(self.data.iloc[-4], self.data.iloc[-5], self.data.iloc[-7])
b = self.black_candle(self.data.iloc[-2])
c = self.black_candle(self.data.iloc[-3])
d = self.black_candle(self.data.iloc[-4])
e = (self.data.iloc[-2]['Low'] < self.data.iloc[-3]['Low']) and (self.data.iloc[-2]['Close'] < self.data.iloc[-3]['Close'])
f = (self.data.iloc[-3]['Low'] < self.data.iloc[-4]['Low']) and (self.data.iloc[-3]['Close'] < self.data.iloc[-4]['Close'])
g = self.tall_white_candle(self.data.iloc[-1])
h = (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Close'] > self.data.iloc[-4]['Open'])
if a and b and c and d and e and f and g and h:
return True
else:
return False
def bearish_tri_star(self):
'''
Look for three doji candles, the middle one has a body above the other two.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.doji(self.data.iloc[-3])
c = self.doji(self.data.iloc[-2])
d = self.doji(self.data.iloc[-1])
e = min(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']) > max(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Open'])
f = min(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']) > max(self.data.iloc[-3]['Close'], self.data.iloc[-3]['Open'])
if a and b and c and d and e and f:
return True
else:
return False
def bullish_belt_hold(self):
'''
Look for a white candle with no lower shadow, but closing near the high.
'''
a = self.down_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.white_candle(self.data.iloc[-1])
c = self.data.iloc[-1]['Low'] == self.data.iloc[-1]['Open']
d = self.similar_price(self.data.iloc[-1]['High'], self.data.iloc[-1]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_breakaway(self):
'''
Look for a series of five candles in a downtrend.
The first candle is tall and black followed by another black one that opens lower,
leaving a gap between the two bodies (but shadows can overlap).
The third day is a candle of any color but it should have a lower close.
Day four is a black candle with a lower close.
The final day is a tall white candle that closes within the body gap of the first two candles.
'''
a = self.down_price_trend(self.data.iloc[-5],self.data.iloc[-6], self.data.iloc[-8])
b = self.tall_black_candle(self.data.iloc[-5])
c = (self.black_candle(self.data.iloc[-4])) and (self.data.iloc[-4]['Open'] < self.data.iloc[-5]['Close'])
d = self.data.iloc[-3]['Close'] < self.data.iloc[-4]['Close']
e = (self.black_candle(self.data.iloc[-2])) and (self.data.iloc[-2]['Close'] < self.data.iloc[-3]['Close'])
f = self.tall_white_candle(self.data.iloc[-1])
g = (self.data.iloc[-1]['Close'] > self.data.iloc[-4]['Open']) and (self.data.iloc[-1]['Close'] < self.data.iloc[-5]['Close'])
if a and b and c and d and e and f and g:
return True
else:
return False
def bullish_doji_star(self):
'''
Look for a tall black candle on the first day followed by a doji
(where the opening and closing prices are within pennies of each other)
that gaps below the prior candle's body.
The shadows can overlap, but the doji's shadows should not be unusually long, whatever that means.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = (self.tall_black_candle(self.data.iloc[-2])) and self.doji(self.data.iloc[-1])
c = max(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Open']) < self.data.iloc[-2]['Close']
d = (self.data.iloc[-1]['High']-self.data.iloc[-1]['Low']) < self.body_candle(self.data.iloc[-2])
if a and b and c and d:
return True
else:
return False
def bullish_engulfing(self):
'''
Look for two candles in a downward price trend.
The first is a black candle followed by a taller white one.
The white candle should have a close above the prior open and an open below the prior close.
In other words, the body of the white candle should engulf or overlap the body of the black candle.
Ignore the shadows.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.black_candle(self.data.iloc[-2])
c = self.tall_white_candle(self.data.iloc[-1])
d = (self.data.iloc[-1]['Close'] > self.data.iloc[-2]['Open']) and (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_harami(self):
'''
Look for a tall black candle in a downward price trend.
The next day a white candle should be nestled within the body of the prior candle.
Ignore the shadows. The tops or bottoms of the bodies can be the same price, but not both.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2])
c = self.white_candle(self.data.iloc[1])
d = (self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Open']) and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_harami_cross(self):
'''
Look for a two candle pattern in a downward price trend.
The first line is a tall black candle followed by a doji that fits within the high-low price range of the prior day.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2])
c = self.doji(self.data.iloc[-1])
d = (self.data.iloc[-1]['High'] < self.data.iloc[-2]['High']) and (self.data.iloc[-1]['Low'] < self.data.iloc[-2]['Low'])
if a and b and c and d:
return True
else:
return False
def bullish_kicking(self):
'''
Look for a tall black marubozu candle followed by an upward gap then a tall white marubozu candle.
'''
a = self.tall_black_candle(self.data.iloc[-2])
b = self.black_marubozu_candle(self.data.iloc[-2])
c = self.tall_white_candle(self.data.iloc[-1])
d = self.white_marubozu_candle(self.data.iloc[-1])
e = self.data.iloc[-1]['Low'] > self.data.iloc[-2]['High']
if a and b and c and d and e:
return True
else:
return False
def bullish_meeting_lines(self):
'''
Look for a tall black candle followed by a tall white candle in an upward price trend.
The two closes should be near one another.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2])
c = self.tall_white_candle(self.data.iloc[-1])
d = self.similar_price(self.data.iloc[-1]['Close'], self.data.iloc[-2]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_separating_lines(self):
'''
Look for a tall black candle in an upward price trend followed by a tall white candle.
The two candles share a common opening price.
'''
a = self.up_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2])
c = self.tall_white_candle(self.data.iloc[-1])
d = self.similar_price(self.data.iloc[-1]['Open'], self.data.iloc[-2]['Open'])
if a and b and c and d:
return True
else:
return False
def bullish_side_by_side_white_lines(self):
'''
Look for three white candles in an upward price trend.
The last two candles should have bodies of similar size,
open near the same price and above the top of the body of the first white candle.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.white_candle(self.data.iloc[-1]) and self.white_candle(self.data.iloc[-2]) and self.white_candle(self.data.iloc[-3])
c = (self.similar_price(self.data.iloc[-1]['Open'], self.data.iloc[-2]['Open'])) and (self.similar_price(self.data.iloc[-1]['Close'], self.data.iloc[-2]['Close']))
d = (self.data.iloc[-1]['Open'] > self.data.iloc[-3]['Close']) and (self.data.iloc[-2]['Open'] > self.data.iloc[-3]['Close'])
if a and b and c and d:
return True
else:
return False
def bullish_three_line_strike(self):
'''
Look for three white candles each with a higher close.
A tall black candle should open higher, but close below the open of the first candle.
'''
a = self.up_price_trend(self.data.iloc[-4], self.data.iloc[-5], self.data.iloc[-7])
b = (self.white_candle(self.data.iloc[-4])) and (self.white_candle(self.data.iloc[-3])) and (self.white_candle(self.data.iloc[-2]))
c = (self.data.iloc[-4]['Close'] < self.data.iloc[-3]['Close']) and (self.data.iloc[-2]['Close'] > self.data.iloc[-3]['Close'])
d = self.tall_black_candle(self.data.iloc[-1])
e = (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Close'] < self.data.iloc[-4]['Open'])
if a and b and c and d and e:
return True
else:
return False
def bullish_tri_star(self):
'''
Look for three doji after a downward price trend.
The middle doji has a body below the other two.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = (self.doji(self.data.iloc[-3])) and (self.doji(self.data.iloc[-2])) and (self.doji(self.data.iloc[-1]))
c = max(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']) < min(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Open'])
d = max(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']) < min(self.data.iloc[-3]['Close'], self.data.iloc[-3]['Open'])
if a and b and c and d:
return True
else:
return False
def collapsing_doji_star(self):
'''
Look for a white candle in an upward price trend.
Following that, find a doji that gaps below yesterday's low.
The last day is a black candle that also gaps below the doji.
None of the shadows on the three candles should overlap, so there should be gaps surrounding the doji.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.white_candle(self.data.iloc[-3])
c = (self.doji(self.data.iloc[-2])) and (self.data.iloc[-2]['High'] < self.data.iloc[-3]['Low'])
d = (self.black_candle(self.data.iloc[-1])) and (self.data.iloc[-1]['High'] < self.data.iloc[-2]['Low'])
if a and b and c and d:
return True
else:
return False
def conceling_baby_swallow(self):
'''
Look for four black candles.
The first two are long black marubozu candles followed the next day by a candle with a tall upper shadow.
The candle gaps open downward but price trades into the body of the prior day.
The last candle engulfs the prior day, including the shadows (a higher high and lower low than the prior day).
'''
a = self.down_price_trend(self.data.iloc[-4], self.data.iloc[-5], self.data.iloc[-7])
b = (self.tall_black_candle(self.data.iloc[-4])) and (self.black_marubozu_candle(self.data.iloc[-4]))
c = (self.tall_black_candle(self.data.iloc[-3])) and (self.black_marubozu_candle(self.data.iloc[-3]))
d = self.black_candle(self.data.iloc[-2]) and ((self.data.iloc[-2]['High'] - self.data.iloc[-2]['Open']) > self.body_candle(self.data.iloc[-2]))
e = (self.data.iloc[-2]['Open'] < self.data.iloc[-3]['Close']) and (self.data.iloc[-2]['High'] > self.data.iloc[-3]['Close'])
f = (self.data.iloc[-1]['High'] < self.data.iloc[-2]['Open']) and (self.data.iloc[-1]['Low'] > self.data.iloc[-2]['Close'])
if a and b and c and d and e and f:
return True
else:
return False
def dark_cloud_cover(self):
'''
Look for two candles in an upward price trend.
The first candle is a tall white one followed by a black candle with an opening price above the top of the white candle
(an opening price above the prior high), but a close below the mid point of the white body.
'''
a = self.up_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_white_candle(self.data.iloc[-2])
c = (self.black_candle(self.data.iloc[-1])) and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['High'])
d = (self.data.iloc[-1]['Close'] < (self.data.iloc[-2]['Open'] + self.data.iloc[-2]['Close'])/2.)
if a and b and c and d:
return True
else:
return False
def deliberation(self):
'''
Look for three white candlesticks in an upward price trend.
The first two are tall bodied candles but the third has a small body that opens near the second day's close.
Each candle opens and closes higher than the previous one.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_white_candle(self.data.iloc[-3]) and self.tall_white_candle(self.data.iloc[-2])
c = self.white_candle(self.data.iloc[-1]) and (not self.tall_white_candle(self.data.iloc[-1]))
d = self.similar_price(self.data.iloc[-1]['Open'], self.data.iloc[-2]['Close'])
e = (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Open']) and (self.data.iloc[-2]['Open'] > self.data.iloc[-3]['Open'])
f = (self.data.iloc[-1]['Close'] > self.data.iloc[-2]['Close']) and (self.data.iloc[-2]['Close'] > self.data.iloc[-3]['Close'])
if a and b and c and d and e and f:
return True
else:
return False
def gapping_down_doji(self):
'''
In a downtrend, price gaps lower and forms a doji
(a candle in which the opening and closing prices are no more than a few pennies apart).
'''
a = self.down_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.doji(self.data.iloc[-1])
c = self.data.iloc[-1]['High'] < self.data.iloc[-2]['Low']
if a and b and c:
return True
else:
return False
def gapping_up_doji(self):
'''
Price gaps higher, including the shadows, in an uptrend and forms a doji candle.
A doji is one in which the opening and closing prices are within pennies of each other.
'''
a = self.up_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.doji(self.data.iloc[-1])
c = self.data.iloc[-1]['Low'] > self.data.iloc[-2]['High']
if a and b and c:
return True
else:
return False
def northern_doji(self):
'''
Look for a candle in which the opening and closing prices are within pennies of each other (a doji) in an up trend.
'''
a = self.up_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.doji(self.data.iloc[-1])
if a and b:
return True
else:
return False
def southern_doji(self):
'''
Look for a doji candlestick (one in which the opening and closing prices are a few pennies from each other) in a downward price trend.
'''
a = self.down_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.doji(self.data.iloc[-1])
if a and b:
return True
else:
return False
def bearish_doji_star(self):
'''
Look for a two-candle pattern in an uptrend.
The first candle is a long white one.
The next day, price gaps higher and the body remains above the prior body.
A doji forms with the opening and closing prices within pennies of each other.
The shadows on the doji should be comparatively short.
'''
a = self.up_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_white_candle(self.data.iloc[-2])
c = self.doji(self.data.iloc[-1]) and (not self.dragonfly_doji(self.data.iloc[-1])) and (not self.gravestone_doji(self.data.iloc[-1])) and (not self.long_legged_doji(self.data.iloc[-1]))
d = min(self.data.iloc[-1]['Open'], self.data.iloc[-1]['Close']) > self.data.iloc[-1]['Close']
if a and b and c and d:
return True
else:
return False
def bullish_doji_star(self):
'''
Look for a tall black candle on the first day followed by a doji
(where the opening and closing prices are within pennies of each other)
that gaps below the prior candle's body.
The shadows can overlap, but the doji's shadows should not be unusually long, whatever that means.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2])
c = self.doji(self.data.iloc[-1]) and (not self.dragonfly_doji(self.data.iloc[-1])) and (not self.gravestone_doji(self.data.iloc[-1])) and (not self.long_legged_doji(self.data.iloc[-1]))
d = max(self.data.iloc[-1]['Open'], self.data.iloc[-1]['Close']) < self.data.iloc[-1]['Close']
if a and b and c and d:
return True
else:
return False
def evening_doji(self):
'''
Look for a tall white candle in an upward price trend followed by a doji whose body gaps above the two surrounding days.
Ignore the shadows. The last day is a tall black candle that closes at or below the mid point of the first day.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_white_candle(self.data.iloc[-3])
c = self.doji(self.data.iloc[-2])
d = (min(self.data.iloc[-2]['Open'],self.data.iloc[-2]['Close']) > self.data.iloc[-3]['Close']) and (min(self.data.iloc[-2]['Open'],self.data.iloc[-2]['Close']) > self.data.iloc[-1]['Open'])
e = self.tall_black_candle(self.data.iloc[-1])
f = self.data.iloc[-1]['Close'] <= (self.data.iloc[-3]['Close'] + self.data.iloc[-3]['Open'])/2.
if a and b and c and d and e and f:
return True
else:
return False
def downside_gap_three_methods(self):
'''
Look for two long black bodied candles in a downward price trend.
The second candle should have a gap between them (shadows do not overlap).
The last day is a white candle that opens within the body of the prior day and
closes within the body of the first day, closing the gap between the two black candles.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.black_candle(self.data.iloc[-3]) and self.black_candle(self.data.iloc[-2])
c = self.data.iloc[-3]['Low'] > self.data.iloc[-2]['High']
d = self.white_candle(self.data.iloc[-1])
e = (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Open'])and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Close'])
f = (self.data.iloc[-1]['Close'] < self.data.iloc[-3]['Open'])and (self.data.iloc[-1]['Close'] > self.data.iloc[-3]['Close'])
if a and b and c and d and e and f:
return True
else:
return False
def downside_tasuki_gap(self):
'''
Look for a black candle in a downward price trend followed by another black candle,
but this one gaps lower with no shadow overlap between the two candles.
The final day sees a white candle print on the chart,
one that opens within the body of the second candle and closes within the gap between the first and second candles.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.black_candle(self.data.iloc[-3]) and self.black_candle(self.data.iloc[-2])
c = self.data.iloc[-3]['Low'] > self.data.iloc[-2]['High']
d = self.white_candle(self.data.iloc[-1])
e = (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Open'])
f = (self.data.iloc[-1]['Close'] > self.data.iloc[-2]['High']) and (self.data.iloc[-1]['Close'] < self.data.iloc[-3]['Low'])
if a and b and c and d and e and f:
return True
else:
return False
def falling_three_methods(self):
'''
Look for a series of five candles in a downward price trend.
The first day should be a tall black candle followed by three up trending small white candles
(except the middle of the three, which can be either black or white),
followed by another tall black candle with a close below the first day's close.
The three middle candles should remain within the high-low range of the first candle.
'''
a = self.down_price_trend(self.data.iloc[-5], self.data.iloc[-6], self.data.iloc[-8])
b = self.tall_black_candle(self.data.iloc[-5])
c = self.small_white_candle(self.data.iloc[-4]) and self.small_white_candle(self.data.iloc[-2]) and (self.small_black_candle(self.data.iloc[-3]) or self.small_white_candle(self.data.iloc[-3]))
d = self.tall_black_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Close'] < self.data.iloc[-5]['Close'])
e = (self.data.iloc[-4]['High'] < self.data.iloc[-5]['High']) and (self.data.iloc[-3]['High'] < self.data.iloc[-5]['High']) and (self.data.iloc[-2]['High'] < self.data.iloc[-5]['High'])
f = (self.data.iloc[-4]['Low'] > self.data.iloc[-5]['Low']) and (self.data.iloc[-3]['Low'] > self.data.iloc[-5]['Low']) and (self.data.iloc[-2]['Low'] > self.data.iloc[-5]['Low'])
if a and b and c and d and e and f:
return True
else:
return False
def falling_window(self):
'''
Find a pattern in which yesterday's low is above today's high.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.data.iloc[-2]['Low'] > self.data.iloc[-1]['High']
if a and b:
return True
else:
return False
def hammer(self):
'''
Look for the hammer to appear in a downward price trend and
have a long lower shadow at least two or three times the height of the body with little or no upper shadow.
'''
a = self.down_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = (min(self.data.iloc[-1]['Open'], self.data.iloc[-1]['Close']) - self.data.iloc[-1]['Low']) > 2 * self.body_candle(self.data.iloc[-1])
c = self.similar_price(self.data.iloc[-1]['High'], max(self.data.iloc[-1]['Open'], self.data.iloc[-1]['Close']))
if a and b and c:
return True
else:
return False
def inverted_hammer(self):
'''
Look for a tall black candle with a close near the day's low followed by a short candle with a tall upper shadow and little or no lower shadow.
The second candle cannot be a doji
(opening and closing prices cannot be within pennies of each other) and
the open on the second candle must be below the prior candle's close.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2]) and self.similar_price(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Low'])
c = (not self.doji(self.data.iloc[-1])) and (self.small_white_candle(self.data.iloc[-1]) or self.small_black_candle(self.data.iloc[-1]))
d = self.similar_price(self.data.iloc[-1]['Low'], min(self.data.iloc[-1]['Open'], self.data.iloc[-1]['Close']))
e = (self.data.iloc[-1]['High'] - max(self.data.iloc[-1]['Open'], self.data.iloc[-1]['Close'])) > 2 * self.body_candle(self.data.iloc[-1])
f = self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Close']
if a and b and c and d and e and f:
return True
else:
return False
def hanging_man(self):
'''
Look for a small bodied candle atop a long lower shadow in an uptrend.
'''
a = self.up_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.small_white_candle(self.data.iloc[-1]) or self.small_black_candle(self.data.iloc[-1])
c = self.hammer()
if a and b and c :
return True
else:
return False
def high_wave(self):
'''
Look for tall upper and lower shadows attached to a small body.
The body is not a doji (meaning that the opening and closing prices must be more than a few pennies apart.
'''
a = self.small_white_candle(self.data.iloc[-1]) or self.small_black_candle(self.data.iloc[-1])
b = not self.doji(self.data.iloc[-1])
c = (self.data.iloc[-1]['High'] - max(self.data.iloc[-1]['Open'], self.data.iloc[-1]['Close'])) > 2 * self.body_candle(self.data.iloc[-1])
d = (min(self.data.iloc[-1]['Open'], self.data.iloc[-1]['Close']) - self.data.iloc[-1]['Low']) > 2 * self.body_candle(self.data.iloc[-1])
if a and b and c and d:
return True
else:
return False
def homing_pigeon(self):
'''
Look for a two line candle in a downward price trend.
The first day should be a tall black body followed by a small black body that fits inside the body of the prior day.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2])
c = self.small_black_candle(self.data.iloc[-1])
d = self.data.iloc[-1]['Close'] > self.data.iloc[-2]['Close']
e = self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Open']
if a and b and c and d and e:
return True
else:
return False
def identical_three_crows(self):
'''
Look for three tall black candles, the last two opening near the prior candle's close.
Some sources require each candle to be similar in size, but this one is rare enough without that restriction.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = (self.tall_black_candle(self.data.iloc[-3])) and (self.tall_black_candle(self.data.iloc[-2])) and (self.tall_black_candle(self.data.iloc[-1]))
c = self.similar_price(self.data.iloc[-2]['Open'], self.data.iloc[-3]['Close']) and self.similar_price(self.data.iloc[-1]['Open'], self.data.iloc[-2]['Close'])
if a and b and c:
return True
else:
return False
def in_neck(self):
'''
Look for a tall black candle in a downward price trend.
The next day, a white candle opens below the black day's low, but closes just into the body of the black candle.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2])
c = self.white_candle(self.data.iloc[-1])
d = (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Low']) and (self.data.iloc[-1]['Close'] > self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Close'] < (self.data.iloc[-2]['Close']+self.data.iloc[-2]['Open'])/2.)
if a and b and c and d:
return True
else:
return False
def ladder_bottom(self):
'''
Look for a series of 5 candles in a downward price trend.
The first three days should be tall black candles, each with a lower open and close.
The 4th day should be a black candle with an upper shadow,
and the last day should be a white candle that gaps open above the body of the prior day.
'''
a = self.down_price_trend(self.data.iloc[-5], self.data.iloc[-6], self.data.iloc[-8])
b = self.tall_black_candle(self.data.iloc[-5]) and self.tall_black_candle(self.data.iloc[-4]) and self.tall_black_candle(self.data.iloc[-3])
c = (self.data.iloc[-4]['Close'] < self.data.iloc[-5]['Close']) and (self.data.iloc[-3]['Close'] < self.data.iloc[-4]['Close'])
d = (self.data.iloc[-4]['Open'] < self.data.iloc[-5]['Open']) and (self.data.iloc[-3]['Open'] < self.data.iloc[-4]['Open'])
e = self.black_candle(self.data.iloc[-2]) and (self.data.iloc[-2]['High'] > self.data.iloc[-2]['Open'])
f = self.white_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Open'])
if a and b and c and d and e and f:
return True
else:
return False
def last_engulfing_bottom(self):
'''
Look for a white candle on the first day in a downward price trend followed by a black candle that engulfs the body of the white candle.
That means the black candle has a body this is above the top and below the bottom of the white candle.
Ignore the shadows.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.white_candle(self.data.iloc[-2]) and self.black_candle(self.data.iloc[-1])
c = (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Open'])
if a and b and c:
return True
else:
return False
def last_engulfing_top(self):
'''
Look for a black candle followed by a white candle that overlaps the prior black candle's body.
The white candle should have a body above the prior candle's top and below the prior candle's bottom.
'''
a = self.up_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.white_candle(self.data.iloc[-1]) and self.black_candle(self.data.iloc[-2])
c = (self.data.iloc[-2]['Low'] > self.data.iloc[-1]['Open']) and (self.data.iloc[-2]['High'] < self.data.iloc[-1]['Close'])
if a and b and c:
return True
else:
return False
def matching_low(self):
'''
Look for a black candle with a tall body.
Following that, find a black body with a close (not the low) that matches the prior close.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2]) and self.black_candle(self.data.iloc[-1])
c = self.data.iloc[-1]['Close'] == self.data.iloc[-2]['Close']
if a and b and c:
return True
else:
return False
def mat_hold(self):
'''
Look for a tall white candle to start the pattern.
The next day a small black candle has a higher close.
The third day can be any color but it is also a small candle.
The fourth day is, again, a small black candle and all three candles (days 2 to 4)
show a downward price trend but their bodies remain above the low of the first day.
The last day is another tall white candle with a close above the high of the prior four candles.
'''
a = self.up_price_trend(self.data.iloc[-5], self.data.iloc[-6], self.data.iloc[-8])
b = self.tall_white_candle(self.data.iloc[-5])
c = self.small_black_candle(self.data.iloc[-4]) and (self.data.iloc[-4]['Close'] > self.data.iloc[-5]['Close'])
d = self.small_black_candle(self.data.iloc[-3]) or self.small_white_candle(self.data.iloc[-3])
e = self.small_black_candle(self.data.iloc[-2]) and self.down_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
f = (self.data.iloc[-2]['Close'] > self.data.iloc[-5]['Low']) and (min(self.data.iloc[-3]['Close'], self.data.iloc[-3]['Open'])> self.data.iloc[-5]['Low']) \
and (self.data.iloc[-4]['Close'] > self.data.iloc[-5]['Low'])
g = self.tall_white_candle(self.data.iloc[-1]) and self.data.iloc[-1]['Close'] > max(self.data.iloc[-2]['High'], self.data.iloc[-3]['High'], self.data.iloc[-4]['High'], self.data.iloc[-5]['High'])
if a and b and c and d and e and f and g:
return True
else:
return False
def morning_doji_star(self):
'''
Look for a tall black candle in a downward price trend.
The next day, a doji appears and its body gaps below the prior candle's body.
The final day is a tall white candle whose body gaps above the doji's.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_black_candle(self.data.iloc[-3]) and self.doji(self.data.iloc[-2])
c = max(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']) < self.data.iloc[-3]['Close']
d = self.tall_white_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Open'] > max(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']))
if a and b and c and d:
return True
else:
return False
def morning_star(self):
'''
Look for a tall black candle in a downward price trend.
Following that, a small bodied candle of any color appears, one whose body gaps below the prior body.
The last day is a tall white candle that gaps above the body of the second candle and closes at least midway into the body of the first day.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_black_candle(self.data.iloc[-3]) and (self.small_black_candle(self.data.iloc[-2]) or self.small_white_candle(self.data.iloc[-2]))
c = max(self.data.iloc[-2]['Open'], self.data.iloc[-2]['Close']) < self.data.iloc[-3]['Close']
d = self.tall_white_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Open'] > max(self.data.iloc[-2]['Close'], self.data.iloc[-2]['Open']))
if a and b and c and d:
return True
else:
return False
def on_neck(self):
'''
Look for a tall black candle in a downward price trend. Following that, a white candle has a close that matches (or nearly matches) the prior low.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.tall_black_candle(self.data.iloc[-2])
c = self.white_candle(self.data.iloc[-1]) and self.similar_price(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Low'])
if a and b and c:
return True
else:
return False
def piercing_pattern(self):
'''
Look for a black candle followed by a white one that opens below the black candle's low and closes between the midpoint of the black body and opening price.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.black_candle(self.data.iloc[-2]) and self.white_candle(self.data.iloc[-1])
c = self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Low']
d = (self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Open']) and (self.data.iloc[-1]['Close'] > (self.data.iloc[-2]['Open'] - self.body_candle(self.data.iloc[-2])/2.))
if a and b and c and d:
return True
else:
return False
def rickshaw_man(self):
'''
Look for the opening and closing prices to be within pennies of each other,
unusually tall upper and lower shadows, and the body to be near the middle of the candlestick.
'''
a = self.long_legged_doji(self.data.iloc[-1])
b = self.similar_price(self.data.iloc[-1]['Open'], (self.data.iloc[-1]['High'] + self.data.iloc[-1]['Low'])/2.) or self.similar_price(self.data.iloc[-1]['Close'], (self.data.iloc[-1]['High'] + self.data.iloc[-1]['Low'])/2.)
if a and b:
return True
else:
return False
def rising_three_methods(self):
'''
Look for a tall white candle followed by three small candles that trend lower but close within the high-low range of the first candle.
Candles 2 and 4 are black, but day 3 can be any color.
The final candle in the pattern is a tall white one that closes above the close of the first day.
'''
a = self.up_price_trend(self.data.iloc[-5], self.data.iloc[-6], self.data.iloc[-8])
b = self.tall_white_candle(self.data.iloc[-5])
c = self.down_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
d = self.small_black_candle(self.data.iloc[-4]) and (self.data.iloc[-4]['Close'] < self.data.iloc[-5]['High']) and (self.data.iloc[-4]['Close'] > self.data.iloc[-5]['Low'])
e = (self.small_black_candle(self.data.iloc[-3]) or self.small_white_candle(self.data.iloc[-3])) and (self.data.iloc[-3]['Close'] < self.data.iloc[-5]['High']) and (self.data.iloc[-3]['Close'] > self.data.iloc[-5]['Low'])
f = self.small_black_candle(self.data.iloc[-2]) and (self.data.iloc[-2]['Close'] < self.data.iloc[-5]['High']) and (self.data.iloc[-2]['Close'] > self.data.iloc[-5]['Low'])
g = self.tall_white_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Close'] > self.data.iloc[-5]['Close'])
if a and b and c and d and e and f and g:
return True
else:
return False
def rising_window(self):
'''
Find a pattern in which yesterday's high is below today's low.
'''
a = self.up_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.data.iloc[-2]['High'] < self.data.iloc[-1]['Low']
if a and b:
return True
else:
return False
def shooting_star_1(self):
'''
Look for a small bodied candle (but not a doji) with little or no lower shadow and
a tall upper shadow at least twice the height of the body.
'''
a = self.up_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.small_black_candle(self.data.iloc[-1]) or self.small_white_candle(self.data.iloc[-1])
c = self.similar_price(self.data.iloc[-1]['Low'], min(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Open']))
d = (self.data.iloc[-1]['High'] - max(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Open'])) > 2 * self.body_candle(self.data.iloc[-1])
if a and b and c and d:
return True
else:
return False
def shooting_star_2(self):
'''
Look for two candles in an upward price trend.
The first candle is white followed by a small bodied candle with an upper shadow at least three times the height of the body.
The candle has no lower shadow or a very small one and there is a gap between the prices of the two bodies.
The second candle can be any color.
'''
a = self.up_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.white_candle(self.data.iloc[-2])
c = self.small_black_candle(self.data.iloc[-1]) or self.small_white_candle(self.data.iloc[-1])
d = (self.data.iloc[-1]['High'] - max(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Open'])) > 3 * self.body_candle(self.data.iloc[-1])
e = self.similar_price(self.data.iloc[-1]['Low'], min(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Open']))
f = self.data.iloc[-1]['Low'] > self.data.iloc[-2]['Close']
if a and b and c and d and e and f:
return True
else:
return False
def stick_sandwich(self):
'''
Look for a black candle in a falling price trend.
The second candle is white and it trades above the close of the prior day.
The last candle is a black one that closes at or near the close of the first day.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.black_candle(self.data.iloc[-3]) and self.white_candle(self.data.iloc[-2]) and self.black_candle(self.data.iloc[-1])
c = (self.data.iloc[-2]['Low'] > self.data.iloc[-3]['Close'])
d = self.similar_price(self.data.iloc[-1]['Close'], self.data.iloc[-3]['Close'])
if a and b and c and d:
return True
else:
return False
def takuri_line(self):
'''
A small bodied candle with a lower shadow at least three times the height of the body and little or no upper shadow.
'''
a = self.down_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.small_black_candle(self.data.iloc[-1]) or self.small_white_candle(self.data.iloc[-1])
c = self.similar_price(self.data.iloc[-1]['High'], max(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Open']))
d = abs(self.data.iloc[-1]['Low'] - min(self.data.iloc[-1]['Close'], self.data.iloc[-1]['Open'])) > 3 * self.body_candle(self.data.iloc[-1])
if a and b and c and d:
return True
else:
return False
def three_black_crows(self):
'''
Look for three tall black candles that appear in an upward price trend.
Candles 2 and 3 of the pattern should open within the body of the prior candle,
and all three should close near their lows, making new lows along the way.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_black_candle(self.data.iloc[-3]) and self.tall_black_candle(self.data.iloc[-2]) and self.tall_black_candle(self.data.iloc[-1])
c = (self.data.iloc[-2]['Open'] > self.data.iloc[-3]['Close']) and (self.data.iloc[-2]['Open'] < self.data.iloc[-3]['Open'])
d = (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Open'])
e = self.similar_price(self.data.iloc[-3]['Low'], self.data.iloc[-3]['Close']) and self.similar_price(self.data.iloc[-2]['Low'], self.data.iloc[-2]['Close']) and self.similar_price(self.data.iloc[-1]['Low'], self.data.iloc[-1]['Close'])
f = (self.data.iloc[-3]['Low'] > self.data.iloc[-2]['Low']) and (self.data.iloc[-2]['Low'] > self.data.iloc[-1]['Low'])
if a and b and c and d and e and f:
return True
else:
return False
def three_inside_down(self):
'''
Look for a tall white candle in an upward price trend.
Following that, a small black candle appears with the open and close within the body of the first day.
The tops or bottoms of the two bodies can be the same price, but not both.
The last day must close lower, but can be any color.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_white_candle(self.data.iloc[-3])
c = self.small_black_candle(self.data.iloc[-2])
d = (self.data.iloc[-2]['Open'] < self.data.iloc[-3]['Close']) and (self.data.iloc[-2]['Close'] > self.data.iloc[-3]['Open'])
e = (self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Close'])
if a and b and c and d and e:
return True
else:
return False
def three_inside_up(self):
'''
Look for a tall black candle in a downward price trend.
The next day, a small bodied white candle has a body that is within the body of the prior candle.
The tops or bottoms of the bodies can be the same price, but not both.
The last day is a white candle that closes above the prior close.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_black_candle(self.data.iloc[-3])
c = self.small_white_candle(self.data.iloc[-2])
d = (self.data.iloc[-2]['Open'] > self.data.iloc[-3]['Close']) and (self.data.iloc[-2]['Close'] < self.data.iloc[-3]['Open'])
e = self.white_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Close'] > self.data.iloc[-2]['Close'])
if a and b and c and d and e:
return True
else:
return False
def three_outside_down(self):
'''
Look for a white candle in an upward price trend.
Following that, a black candle opens higher and closes lower than the prior candle's body.
The last day is a candle with a lower close.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.white_candle(self.data.iloc[-3])
c = self.black_candle(self.data.iloc[-2]) and (self.data.iloc[-2]['Open'] > self.data.iloc[-2]['Close']) and (self.data.iloc[-2]['Close'] < self.data.iloc[-2]['Open'])
d = self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Close']
if a and b and c and d:
return True
else:
return False
def three_outside_up(self):
'''
Look for a black candle in a downward price trend.
Following that, a white candle opens below the prior body and closes above it, too.
The last day is a candle in which price closes higher, according to Morris who developed the candle.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.black_candle(self.data.iloc[-3])
c = self.white_candle(self.data.iloc[-2]) and (self.data.iloc[-2]['Open'] < self.data.iloc[-3]['Close']) and (self.data.iloc[-2]['Close'] < self.data.iloc[-3]['Open'])
d = self.data.iloc[-1]['Close'] > self.data.iloc[-2]['Close']
if a and b and c and d:
return True
else:
return False
def three_stars_in_south(self):
'''
Look for a tall black candle with a long lower shadow to appear in a downward price trend.
The second day should be similar to the first day, but smaller and with a higher low.
The last day is a black marubozu that squeezes inside the high-low range of the prior day.
Good luck finding one.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_black_candle(self.data.iloc[-3]) and ((self.data.iloc[-3]['Close']-self.data.iloc[-3]['Low']) > self.body_candle(self.data.iloc[-3]))
c = self.tall_black_candle(self.data.iloc[-2]) and ((self.data.iloc[-2]['Close']-self.data.iloc[-2]['Low']) > self.body_candle(self.data.iloc[-2]))
d = self.data.iloc[-2]['Low'] > self.data.iloc[-3]['Low']
e = self.black_marubozu_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['High'] < self.data.iloc[-2]['High']) and (self.data.iloc[-1]['Low'] > self.data.iloc[-2]['Low'])
if a and b and c and d and e:
return True
else:
return False
def three_white_soldiers(self):
'''
Look for three tall white candles, each with a close near the high, higher closes, and
bodies that overlap (an opening price within the prior candle's body.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_white_candle(self.data.iloc[-3]) and self.tall_white_candle(self.data.iloc[-2]) and self.tall_white_candle(self.data.iloc[-1])
c = self.similar_price(self.data.iloc[-3]['High'], self.data.iloc[-3]['Close']) and self.similar_price(self.data.iloc[-2]['High'], self.data.iloc[-2]['Close']) and self.similar_price(self.data.iloc[-1]['High'], self.data.iloc[-1]['Close'])
d = (self.data.iloc[-3]['High'] < self.data.iloc[-2]['High']) and (self.data.iloc[-2]['High'] < self.data.iloc[-1]['High'])
e = (self.data.iloc[-2]['Open'] > self.data.iloc[-3]['Open']) and (self.data.iloc[-2]['Open'] < self.data.iloc[-3]['Close'])
f = (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Open']) and (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Close'])
if a and b and c and d and e and f:
return True
else:
return False
def thrusting(self):
'''
Look for a black candle in a downward price trend followed by a white candle that
opens below the prior low but closes near but below the midpoint of the black candle's body.
'''
a = self.down_price_trend(self.data.iloc[-2], self.data.iloc[-3], self.data.iloc[-5])
b = self.black_candle(self.data.iloc[-2]) and self.white_candle(self.data.iloc[-1])
c = (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Low']) and (self.data.iloc[-1]['Close'] < (self.data.iloc[-2]['Open'] - self.body_candle(self.data.iloc[-2])/2.)) and \
(self.data.iloc[-1]['Close'] > (self.data.iloc[-2]['Close'] + self.body_candle(self.data.iloc[-2])/4.))
if a and b and c:
return True
else:
return False
def tweezers_bottom(self):
'''
Look for two candles sharing the same low price.
'''
a = self.down_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.data.iloc[-1]['Low'] == self.data.iloc[-2]['Low']
if a and b:
return True
else:
return False
def tweezers_top(self):
'''
Look for two adjacent candlesticks with the same (or nearly the same) high price in an uptrend.
'''
a = self.up_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.similar_price(self.data.iloc[-1]['High'], self.data.iloc[-2]['High'])
if a and b:
return True
else:
return False
def two_black_gapping(self):
'''
Look for a price gap followed by two black candles.
The second black candle should have a high below the prior candle's high.
'''
a = self.down_price_trend(self.data.iloc[-1], self.data.iloc[-2], self.data.iloc[-4])
b = self.black_candle(self.data.iloc[-2]) and self.black_candle(self.data.iloc[-1])
c = self.data.iloc[-2]['High'] < self.data.iloc[-3]['Low']
d = self.data.iloc[-1]['High'] < self.data.iloc[-2]['High']
if a and b and c and d:
return True
else:
return False
def two_crows(self):
'''
Look for a tall white candle in an upward price trend.
Following that, a black candle has a body that gaps above the prior candle's body.
The last day is another black candle, but this one opens within the prior candle's body and closes within the body of the first candle in the pattern.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_white_candle(self.data.iloc[-3])
c = self.black_candle(self.data.iloc[-2]) and (self.data.iloc[-2]['Close'] > self.data.iloc[-3]['Close'])
d = self.black_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Open']) and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Close'])
e = (self.data.iloc[-2]['Close'] > self.data.iloc[-3]['Open']) and (self.data.iloc[-2]['Close'] < self.data.iloc[-3]['Close'])
if a and b and c and d and e:
return True
else:
return False
def unique_three_river_bottom(self):
'''
Look for a tall bodied black candle in a downward price trend.
Following that, another black body rests inside the prior body, but the lower shadow is below the prior day's low.
The last day is a short bodied white candle that remains below the body of the prior candle.
'''
a = self.down_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_black_candle(self.data.iloc[-3])
c = self.black_candle(self.data.iloc[-2]) and (self.data.iloc[-2]['Low'] < self.data.iloc[-3]['Low'])
d = (self.data.iloc[-2]['Open'] < self.data.iloc[-3]['Open']) and (self.data.iloc[-2]['Close'] < self.data.iloc[-3]['Close'])
e = self.small_white_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Close'])
if a and b and c and d and e:
return True
else:
return False
def upside_gap_three_methods(self):
'''
Look for two tall white candles in an upward price trend.
There should be a gap between them, including between the shadows.
The last day is a black candle that fills the gap created by the first two days.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_white_candle(self.data.iloc[-3]) and self.tall_white_candle(self.data.iloc[-2])
c = self.data.iloc[-3]['High'] < self.data.iloc[-2]['Low']
d = self.black_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Close'] < self.data.iloc[-3]['Close']) and (self.data.iloc[-1]['Close'] > self.data.iloc[-3]['Open'])
e = (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Open'])
if a and b and c and d and e:
return True
else:
return False
def upside_gap_two_crows(self):
'''
Look for a tall white candle in an upward price trend.
Then find a black candle with a body gapping above the prior candle's body.
The last day is another black candle that engulfs the body of the middle day with a close that
remains above the close of the first candle.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.tall_white_candle(self.data.iloc[-3])
c = self.black_candle(self.data.iloc[-2]) and (self.data.iloc[-3]['Close'] < self.data.iloc[-2]['Close'])
d = self.black_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Close']) and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Open'])
e = self.data.iloc[-2]['Close'] > self.data.iloc[-3]['Close']
if a and b and c and d and e:
return True
else:
return False
def upside_tasuki_gap(self):
'''
Look for a white candle in an upward price trend.
Following that, find another white candle, but this one gaps higher and that includes a gap between the shadows of the two candles.
The last day is a black candle that opens in the body of the prior candle and closes within the gap created between the first two candles.
'''
a = self.up_price_trend(self.data.iloc[-3], self.data.iloc[-4], self.data.iloc[-6])
b = self.white_candle(self.data.iloc[-3])
c = self.white_candle(self.data.iloc[-2]) and (self.data.iloc[-2]['Low'] > self.data.iloc[-3]['High'])
d = self.black_candle(self.data.iloc[-1]) and (self.data.iloc[-1]['Open'] > self.data.iloc[-2]['Open']) and (self.data.iloc[-1]['Open'] < self.data.iloc[-2]['Close'])
e = (self.data.iloc[-1]['Close'] > self.data.iloc[-3]['Close']) and (self.data.iloc[-1]['Close'] < self.data.iloc[-2]['Open'])
if a and b and c and d and e:
return True
else:
return False
| mit | 3,866,151,280,402,048,000 | 51.540932 | 247 | 0.576738 | false |
practo/r5d4 | r5d4/__init__.py | 1 | 2459 | from __future__ import absolute_import
from flask import Flask, request, Response, json
from werkzeug.exceptions import BadRequest
import r5d4.settings as settings
from r5d4.analytics_browser import browse_analytics
from r5d4.publisher import publish_transaction
from r5d4.utility import json_response
from r5d4.logger import get_activity_log
app = Flask(__name__)
app.config["DEBUG"] = settings.DEBUG
app.config["REDIS_UNIX_SOCKET_PATH"] = settings.REDIS_UNIX_SOCKET_PATH
app.config["REDIS_HOST"] = settings.REDIS_HOST
app.config["REDIS_PORT"] = settings.REDIS_PORT
app.config["CONFIG_DB"] = settings.CONFIG_DB
app.config["DEFAULT_DATA_DB"] = settings.DEFAULT_DATA_DB
app.config["SECRET_KEY"] = settings.SECRET_KEY
activity_log = get_activity_log()
@app.errorhandler(404)
def not_found_handler(error):
error_response = json.dumps({
"status": "Not Found",
"error_message": error.description
}, indent=2)
return Response(status=404,
mimetype='application/json',
response=error_response)
@app.errorhandler(400)
def bad_request_handler(error):
error_response = json.dumps({
"status": "Bad Request",
"error_message": error.description
}, indent=2)
return Response(status=400,
mimetype='application/json',
response=error_response)
@app.errorhandler(503)
def service_unavailable_handler(error):
error_response = json.dumps({
"status": "Service Unavailable",
"error_message": error.description[0],
"error_context": error.description[1]
}, indent=2)
return Response(status=503,
mimetype='application/json',
response=error_response)
@app.route('/analytics/<analytics_name>/', methods=['GET'])
@json_response
def analytics(analytics_name):
return browse_analytics(analytics_name, request.args)
@app.route('/resource/<resource>/', methods=['POST'])
def publish(resource):
payload = request.form["payload"]
tr_type = request.form["tr_type"]
try:
publish_transaction(resource, tr_type, payload)
if activity_log:
activity_log.info("%s\t%s\t%s", tr_type, resource, payload)
except ValueError as e:
raise BadRequest(e)
return Response(status=202, mimetype='application/json',
response=json.dumps({"status": "Accepted"},
indent=2))
| mit | 8,086,123,704,522,078,000 | 32.22973 | 71 | 0.657584 | false |
MartinThoma/pysec | docs/source/conf.py | 1 | 8413 | # -*- coding: utf-8 -*-
#
# pysec documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 9 07:46:05 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysec'
copyright = u'2015, Martin Thoma'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysecdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pysec.tex', u'pysec Documentation',
u'Martin Thoma', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysec', u'pysec Documentation',
[u'Martin Thoma'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pysec', u'pysec Documentation',
u'Martin Thoma', 'pysec', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit | -3,496,438,262,788,808,000 | 30.275093 | 79 | 0.705694 | false |
amlight/ofp_sniffer | ofp_sniffer.py | 1 | 7990 | #!/usr/bin/env python3.6
"""
This code is the AmLight OpenFlow Sniffer
Author: AmLight Dev Team <[email protected]>
"""
import sys
import logging.config
import time
import threading
import yaml
from libs.core.printing import PrintingOptions
from libs.core.sanitizer import Sanitizer
from libs.core.topo_reader import TopoReader
from libs.core.cli import get_params
from libs.core.save_to_file import save_to_file
from libs.core.custom_exceptions import *
from libs.gen.packet import Packet
from apps.oess_fvd import OessFvdTracer
from apps.ofp_stats import OFStats
from apps.ofp_proxies import OFProxy
from apps.influx_client import InfluxClient
from apps.notifications import Notifications
class RunSniffer(object):
"""
The RunSniffer class is the main class for the OpenFlow Sniffer.
This class instantiate all auxiliary classes, captures the packets,
instantiate new OpenFlow messages and triggers all applications.
"""
def __init__(self):
self.printing_options = PrintingOptions()
self.sanitizer = Sanitizer()
self.oft = None
self.stats = None
self.influx = None
self.notifications = None
self.trigger_event = threading.Event()
self.cap = None
self.packet_number = None
self.load_apps = dict()
self.packet_count = 1
self.topo_reader = TopoReader()
self.save_to_file = None
self.ofp_proxy = None
self.load_config()
def load_config(self):
"""
Parses the parameters received and instantiates the
apps requested.
"""
# Get CLI params and call the pcapy loop
self.cap, self.packet_number, \
self.load_apps, sanitizer, \
topo_file, is_to_save = get_params(sys.argv)
self.sanitizer.process_filters(sanitizer)
# Load TopologyReader
self.topo_reader.readfile(topo_file)
# Save to File
self.save_to_file = save_to_file(is_to_save)
# Start Apps
self.ofp_proxy = OFProxy()
if 'oess_fvd' in self.load_apps:
self.oft = OessFvdTracer(self.load_apps['oess_fvd'])
if 'statistics' in self.load_apps:
self.stats = OFStats()
if 'influx' in self.load_apps:
self.influx = InfluxClient(trigger_event=self.trigger_event)
if 'notifications' in self.load_apps:
self.notifications = Notifications(self.load_apps['notifications'])
def run(self):
"""
cap.loop continuously capture packets w/ pcapy. For every
captured packet, self.process_packet method is called.
Exits:
0 - Normal, reached end of file
1 - Normal, user requested with CRTL + C
2 - Error
3 - Interface or file not found
"""
exit_code = 0
# DEBUG:
# self.cap.loop(-1, self.process_packet)
try:
self.cap.loop(-1, self.process_packet)
except EndOfPcapFile:
exit_code = 3
except KeyboardInterrupt:
exit_code = 1
except Exception as exception:
print('Error on packet %s: %s ' % (self.packet_count, exception))
exit_code = 2
finally:
if 'statistics' in self.load_apps:
# If OFP_Stats is running, set a timer
# before closing the app. Useful in cases
# where the ofp_sniffer is reading from a
# pcap file instead of a NIC.
time.sleep(200)
# pass
print('Exiting with code: %s' % exit_code)
# gracefully shut down
if 'influx' in self.load_apps:
self.influx.stop_event.set()
sys.exit(exit_code)
def process_packet(self, header, packet):
"""
Every packet captured by cap.loop is then processed here.
If packets are bigger than 62 Bytes, we process them.
If it is 0, means there are no more packets. If it is
something in between, it is a fragment, we ignore for now.
Args:
header: header of the captured packet
packet: packet captured from file or interface
"""
if len(packet) >= 54:
# Verify if user asked for just one specific packet
if self.was_packet_number_defined():
if not self.is_the_packet_number_specified():
self.packet_count += 1
return
# DEBUG:
# print("Packet Number: %s" % self.packet_count)
pkt = Packet(packet, self.packet_count, header)
if pkt.reconnect_error:
if isinstance(self.stats, OFStats):
# OFStats counts reconnects
self.stats.process_packet(pkt)
if isinstance(self.notifications, Notifications):
# Send notifications via Slack
self.notifications.send_msg(pkt)
elif pkt.is_openflow_packet:
valid_result = pkt.process_openflow_messages()
if valid_result:
# Apps go here:
if isinstance(self.oft, OessFvdTracer):
# FVD_Tracer does not print the packets
self.oft.process_packet(pkt)
if isinstance(self.ofp_proxy, OFProxy):
# OFP_PROXY associates IP:PORT to DPID
self.ofp_proxy.process_packet(pkt)
if isinstance(self.stats, OFStats):
# OFStats print the packets
self.stats.process_packet(pkt)
if isinstance(self.notifications, Notifications):
# Send notifications via Slack
self.notifications.send_msg(pkt)
if not isinstance(self.oft, OessFvdTracer):
# Print Packets
pkt.print_packet()
if self.influx:
# tell influx to wake up and update immediately
self.trigger_event.set()
del pkt
if self.is_the_packet_number_specified():
# If a specific packet was selected, end here.
raise EndOfPcapFile
elif len(packet) is 0:
return 3
self.packet_count += 1
def was_packet_number_defined(self):
"""
In case user wants to see a specific packet inside a
specific pcap file, provide file name with the specific
packet number after ":"
-r file.pcap:packet_number
Returns:
True if a packet number was specified
False: if a packet number was not specified
"""
if self.packet_number != 0:
return True
return False
def is_the_packet_number_specified(self):
"""
If user wants to see a specific packet inside a
specific pcap file and the packet_count is that
number, return True. Otherwise, return false
Returns:
True if packet_count matches
False: if packet_count does not match
"""
return True if self.packet_count == self.packet_number else False
def main():
"""
Main function.
Instantiates RunSniffer and run it
"""
try:
logging.config.dictConfig(yaml.safe_load(open('logging.yml', 'r')))
logger = logging.getLogger(__name__)
sniffer = RunSniffer()
logger.info("OFP_Sniffer started.")
sniffer.run()
except ErrorFilterFile as msg:
print(msg)
sys.exit(4)
except FileNotFoundError as msg:
print(msg)
sys.exit(5)
if __name__ == "__main__":
main()
| apache-2.0 | -7,528,601,546,497,908,000 | 31.479675 | 79 | 0.561702 | false |
kitchenbudapest/vr | hud.py | 1 | 4413 | ## INFO ########################################################################
## ##
## plastey ##
## ======= ##
## ##
## Oculus Rift + Leap Motion + Python 3 + C + Blender + Arch Linux ##
## Version: 0.2.0.980 (20150510) ##
## File: hud.py ##
## ##
## For more information about the project, visit ##
## <http://plastey.kibu.hu>. ##
## Copyright (C) 2015 Peter Varo, Kitchen Budapest ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Import python modules
from collections import deque
#------------------------------------------------------------------------------#
class Text:
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, text_first_object,
text_other_object,
time_getter,
interval):
self._text_first = text_first_object
self._text_other = text_other_object
self._get_time = time_getter
self._interval = interval
self._last_time = time_getter()
self._messages = deque()
self._still_empty = True
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _update(self):
# Write the changed and constructed messages to display
messages = iter(self._messages)
try:
self._text_first.text = next(messages)
self._text_other.text = '\n'.join(messages)
except StopIteration:
self._text_first.text = self._text_other.text = ''
# Update timer
self._last_time = self._get_time()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def clear(self):
self._messages = deque()
self._update()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def update(self):
# If there are any messages left
if len(self._messages):
# If interval passed
if (self._last_time + self._interval) <= self._get_time():
# Remove oldest item
self._messages.pop()
# Update display
self._update()
# If deque just become empty
elif not self._still_empty:
# Switch state flag and update display
self._still_empty = True
self._update()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def write(self, message):
# Add new message and update display
self._messages.appendleft(message)
self._update()
| gpl-3.0 | 2,802,582,969,299,939,300 | 48.58427 | 80 | 0.363245 | false |
metacloud/python-keystoneclient | keystoneclient/tests/v3/test_access.py | 1 | 6026 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from keystoneclient import access
from keystoneclient.openstack.common import timeutils
from keystoneclient.tests.v3 import client_fixtures
from keystoneclient.tests.v3 import utils
TOKEN_RESPONSE = utils.TestResponse({
"headers": client_fixtures.AUTH_RESPONSE_HEADERS
})
UNSCOPED_TOKEN = client_fixtures.UNSCOPED_TOKEN
DOMAIN_SCOPED_TOKEN = client_fixtures.DOMAIN_SCOPED_TOKEN
PROJECT_SCOPED_TOKEN = client_fixtures.PROJECT_SCOPED_TOKEN
class AccessInfoTest(utils.TestCase):
def test_building_unscoped_accessinfo(self):
auth_ref = access.AccessInfo.factory(resp=TOKEN_RESPONSE,
body=UNSCOPED_TOKEN)
self.assertTrue(auth_ref)
self.assertIn('methods', auth_ref)
self.assertIn('catalog', auth_ref)
self.assertFalse(auth_ref['catalog'])
self.assertEqual(auth_ref.auth_token,
'3e2813b7ba0b4006840c3825860b86ed')
self.assertEqual(auth_ref.username, 'exampleuser')
self.assertEqual(auth_ref.user_id, 'c4da488862bd435c9e6c0275a0d0e49a')
self.assertEqual(auth_ref.role_names, [])
self.assertIsNone(auth_ref.project_name)
self.assertIsNone(auth_ref.project_id)
self.assertIsNone(auth_ref.auth_url)
self.assertIsNone(auth_ref.management_url)
self.assertFalse(auth_ref.domain_scoped)
self.assertFalse(auth_ref.project_scoped)
self.assertEqual(auth_ref.user_domain_id,
'4e6893b7ba0b4006840c3845660b86ed')
self.assertEqual(auth_ref.user_domain_name, 'exampledomain')
self.assertIsNone(auth_ref.project_domain_id)
self.assertIsNone(auth_ref.project_domain_name)
self.assertEqual(auth_ref.expires, timeutils.parse_isotime(
UNSCOPED_TOKEN['token']['expires_at']))
def test_will_expire_soon(self):
expires = timeutils.utcnow() + datetime.timedelta(minutes=5)
UNSCOPED_TOKEN['token']['expires_at'] = expires.isoformat()
auth_ref = access.AccessInfo.factory(resp=TOKEN_RESPONSE,
body=UNSCOPED_TOKEN)
self.assertFalse(auth_ref.will_expire_soon(stale_duration=120))
self.assertTrue(auth_ref.will_expire_soon(stale_duration=300))
self.assertFalse(auth_ref.will_expire_soon())
def test_building_domain_scoped_accessinfo(self):
auth_ref = access.AccessInfo.factory(resp=TOKEN_RESPONSE,
body=DOMAIN_SCOPED_TOKEN)
self.assertTrue(auth_ref)
self.assertIn('methods', auth_ref)
self.assertIn('catalog', auth_ref)
self.assertTrue(auth_ref['catalog'])
self.assertEqual(auth_ref.auth_token,
'3e2813b7ba0b4006840c3825860b86ed')
self.assertEqual(auth_ref.username, 'exampleuser')
self.assertEqual(auth_ref.user_id, 'c4da488862bd435c9e6c0275a0d0e49a')
self.assertEqual(auth_ref.role_names, ['admin', 'member'])
self.assertEqual(auth_ref.domain_name, 'anotherdomain')
self.assertEqual(auth_ref.domain_id,
'8e9283b7ba0b1038840c3842058b86ab')
self.assertIsNone(auth_ref.project_name)
self.assertIsNone(auth_ref.project_id)
self.assertEqual(auth_ref.user_domain_id,
'4e6893b7ba0b4006840c3845660b86ed')
self.assertEqual(auth_ref.user_domain_name, 'exampledomain')
self.assertIsNone(auth_ref.project_domain_id)
self.assertIsNone(auth_ref.project_domain_name)
self.assertTrue(auth_ref.domain_scoped)
self.assertFalse(auth_ref.project_scoped)
def test_building_project_scoped_accessinfo(self):
auth_ref = access.AccessInfo.factory(resp=TOKEN_RESPONSE,
body=PROJECT_SCOPED_TOKEN)
self.assertTrue(auth_ref)
self.assertIn('methods', auth_ref)
self.assertIn('catalog', auth_ref)
self.assertTrue(auth_ref['catalog'])
self.assertEqual(auth_ref.auth_token,
'3e2813b7ba0b4006840c3825860b86ed')
self.assertEqual(auth_ref.username, 'exampleuser')
self.assertEqual(auth_ref.user_id, 'c4da488862bd435c9e6c0275a0d0e49a')
self.assertEqual(auth_ref.role_names, ['admin', 'member'])
self.assertIsNone(auth_ref.domain_name)
self.assertIsNone(auth_ref.domain_id)
self.assertEqual(auth_ref.project_name, 'exampleproject')
self.assertEqual(auth_ref.project_id,
'225da22d3ce34b15877ea70b2a575f58')
self.assertEqual(auth_ref.tenant_name, auth_ref.project_name)
self.assertEqual(auth_ref.tenant_id, auth_ref.project_id)
self.assertEqual(auth_ref.auth_url,
('http://public.com:5000/v3',))
self.assertEqual(auth_ref.management_url,
('http://admin:35357/v3',))
self.assertEqual(auth_ref.project_domain_id,
'4e6893b7ba0b4006840c3845660b86ed')
self.assertEqual(auth_ref.project_domain_name, 'exampledomain')
self.assertEqual(auth_ref.user_domain_id,
'4e6893b7ba0b4006840c3845660b86ed')
self.assertEqual(auth_ref.user_domain_name, 'exampledomain')
self.assertFalse(auth_ref.domain_scoped)
self.assertTrue(auth_ref.project_scoped)
| apache-2.0 | 4,539,674,048,013,732,000 | 39.993197 | 78 | 0.656654 | false |
SakuradaJun/django-rest-auth | rest_auth/registration/serializers.py | 1 | 3313 | from django.http import HttpRequest
from rest_framework import serializers
from requests.exceptions import HTTPError
from allauth.socialaccount.helpers import complete_social_login
class SocialLoginSerializer(serializers.Serializer):
access_token = serializers.CharField(required=False)
code = serializers.CharField(required=False)
def validate(self, attrs):
view = self.context.get('view')
request = self.context.get('request')
if not isinstance(request, HttpRequest):
request = request._request
if not view:
raise serializers.ValidationError(
'View is not defined, pass it as a context variable'
)
self.adapter_class = getattr(view, 'adapter_class', None)
if not self.adapter_class:
raise serializers.ValidationError(
'Define adapter_class in view'
)
self.adapter = self.adapter_class()
app = self.adapter.get_provider().get_app(request)
# More info on code vs access_token
# http://stackoverflow.com/questions/8666316/facebook-oauth-2-0-code-and-token
# We have the access_token straight
if('access_token' in attrs):
access_token = attrs.get('access_token')
# We did not get the access_token, but authorization code instead
elif('code' in attrs):
self.callback_url = getattr(view, 'callback_url', None)
self.client_class = getattr(view, 'client_class', None)
if not self.callback_url:
raise serializers.ValidationError(
'Define callback_url in view'
)
if not self.client_class:
raise serializers.ValidationError(
'Define client_class in view'
)
if not self.callback_url:
raise serializers.ValidationError(
'Define callback_url in view'
)
if not self.client_class:
raise serializers.ValidationError(
'Define client_class in view'
)
code = attrs.get('code')
provider = self.adapter.get_provider()
scope = provider.get_scope(request)
client = self.client_class(
request,
app.client_id,
app.secret,
self.adapter.access_token_method,
self.adapter.access_token_url,
self.callback_url,
scope
)
token = client.get_access_token(code)
access_token = token['access_token']
token = self.adapter.parse_token({'access_token': access_token})
token.app = app
try:
login = self.adapter.complete_login(
request,
app,
token,
response=access_token,
)
login.token = token
complete_social_login(request, login)
except HTTPError:
raise serializers.ValidationError('Incorrect value')
if not login.is_existing:
login.lookup()
login.save(request, connect=True)
attrs['user'] = login.account.user
return attrs
| mit | -789,387,178,647,441,900 | 32.464646 | 86 | 0.56233 | false |
lord63/zhihudaily | zhihudaily/views/utils.py | 1 | 1116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask import send_file, g, Blueprint
from zhihudaily.configs import Config
from zhihudaily.crawler import Crawler
from zhihudaily._compat import StringIO
utils = Blueprint('utils', __name__)
@utils.before_app_request
def before_request():
g.db = Config.database
g.db.connect()
@utils.after_app_request
def after_request(response):
g.db.close()
return response
@utils.route('/img/<server>/<path:hash_string>')
def image(server, hash_string):
"""Handle image, use redis to cache image."""
image_url = 'https://{0}.zhimg.com/{1}'.format(server, hash_string)
cached = Config.redis_server.get(image_url)
if cached:
buffer_image = StringIO(cached)
buffer_image.seek(0)
else:
r = Crawler().send_request(image_url)
buffer_image = StringIO(r.content)
buffer_image.seek(0)
Config.redis_server.setex(image_url, (60*60*24*4),
buffer_image.getvalue())
return send_file(buffer_image, mimetype='image/jpeg')
| mit | 1,451,385,733,581,734,100 | 25.571429 | 71 | 0.653226 | false |
geometalab/G4SE-Compass | compass-api/G4SE/api/migrations/0004_geoservicemetadata.py | 1 | 3940 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3.dev20161004124613 on 2016-10-10 12:42
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('api', '0003_recordtag'),
]
operations = [
migrations.CreateModel(
name='GeoServiceMetadata',
fields=[
('api_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='uuid')),
('identifier', models.CharField(max_length=255, verbose_name='external identifier')),
('language', models.CharField(choices=[('de', 'de'), ('fr', 'fr'), ('en', 'en')], default='de', max_length=20, verbose_name='language')),
('title', models.CharField(max_length=255, verbose_name='title')),
('abstract', models.TextField(verbose_name='abstract')),
('publication_year', models.IntegerField(verbose_name='publication year')),
('publication_lineage', models.CharField(blank=True, max_length=255, null=True, verbose_name='history')),
('is_latest', models.BooleanField(default=False, max_length=255, verbose_name='latest of series')),
('geography', models.CharField(default='Schweiz', max_length=255, verbose_name='geography')),
('extent', models.CharField(blank=True, help_text='needs follow the form `BOX(x1 y1,x2 y2)`', max_length=255, null=True, verbose_name='extent')),
('geodata_type', models.CharField(choices=[('raster', 'raster'), ('vector', 'vector'), ('other', 'other')], max_length=255, verbose_name='geodata type')),
('source', models.CharField(max_length=2083, verbose_name='source')),
('metadata_link', models.URLField(max_length=2083, verbose_name='metadata link')),
('access_link', models.URLField(max_length=2083, verbose_name='access link')),
('base_link', models.URLField(blank=True, max_length=2083, null=True, verbose_name='access to data')),
('collection', models.CharField(blank=True, max_length=255, null=True, verbose_name='group name')),
('dataset', models.CharField(blank=True, max_length=255, null=True, verbose_name='dataset name')),
('arcgis_layer_link', models.URLField(blank=True, max_length=2083, null=True, verbose_name='ArcGIS layer link')),
('qgis_layer_link', models.URLField(blank=True, max_length=2083, null=True, verbose_name='QGIS layer link')),
('arcgis_symbology_link', models.URLField(blank=True, max_length=2083, null=True, verbose_name='ArcGIS symbology link')),
('qgis_symbology_link', models.URLField(blank=True, max_length=2083, null=True, verbose_name='QGIS symbology link')),
('service_type', models.CharField(blank=True, max_length=255, null=True, verbose_name='service type')),
('crs', models.CharField(max_length=20, verbose_name='coordinate reference system')),
('term_link', models.URLField(max_length=2083, verbose_name='terms of use')),
('proved', models.DateField(blank=True, null=True, verbose_name='proving date')),
('visibility', models.CharField(choices=[('public', 'public'), ('test', 'test'), ('hsr-internal', 'hsr-internal')], default='public', max_length=255, verbose_name='access restriction')),
('login_name', models.CharField(max_length=255, verbose_name='login name')),
('modified', models.DateTimeField(auto_now=True, null=True, verbose_name='last modification')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created on')),
('imported', models.BooleanField(default=False, editable=False, verbose_name='imported')),
],
),
]
| mit | 7,452,857,091,811,495,000 | 76.254902 | 202 | 0.624365 | false |
teran/bootloader-web | bootloader/tools/api/__init__.py | 1 | 1159 | from django.contrib.contenttypes.models import ContentType
from rest_framework import viewsets
from rest_framework.permissions import IsAdminUser
from tools.models import Agent, Credential
from tools.serializers import AgentSerializer, CredentialSerializer
class CredentialViewSet(viewsets.ModelViewSet):
queryset = Credential.objects.all()
serializer_class = CredentialSerializer
permission_classes = (IsAdminUser,)
def get_queryset(self):
filterq = {}
if 'object' in self.request.query_params.keys():
filterq['content_type'] = ContentType.objects.get(
model=self.request.query_params['object']).pk
if 'object_id' in self.request.query_params.keys():
filterq['object_id'] = self.request.query_params['object_id']
if 'name' in self.request.query_params.keys():
filterq['name'] = self.request.query_params['name']
queryset = Credential.objects.filter(**filterq)
return queryset
class AgentViewSet(viewsets.ModelViewSet):
queryset = Agent.objects.all()
serializer_class = AgentSerializer
permission_classes = (IsAdminUser,)
| gpl-2.0 | 867,047,386,432,015,700 | 34.121212 | 73 | 0.704918 | false |
euccas/CodingPuzzles-Python | leet/source/searchDFS/combinations.py | 1 | 1336 | class Solution:
"""
@param n: Given the range of numbers
@param k: Given the numbers of combinations
@return: All the combinations of k numbers out of 1..n
"""
def combine(self, n, k):
# write your code here
if n is None or k is None:
return []
self.result = []
self.dfs(n, k, 0, [])
return self.result
def dfs(self, n, k, startpos, combination):
if len(combination) == k:
self.result.append(combination)
for i in range(startpos, n):
#if len(combination) > k:
# return
self.dfs(n, k, i+1, combination+[i+1])
class Solution1():
"""
Faster than Solution, because no need calculate len(curr_result)
"""
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
if n is None or k is None or k == 0:
return []
result = []
self.dfs(n, k, 1, [], result)
return result
def dfs(self, n, k, start_pos, curr_result, result):
if k == 0:
result.append(curr_result[:])
return
for i in range(start_pos, n+1):
curr_result.append(i)
self.dfs(n, k-1, i+1, curr_result, result)
curr_result.pop()
| mit | -4,516,779,564,583,097,300 | 25.72 | 68 | 0.505988 | false |
gis-rpd/pipelines | lib/pipelines.py | 1 | 34206 | """library functions for pipelines
"""
#--- standard library imports
#
import os
import sys
import subprocess
import logging
import shutil
import smtplib
from email.mime.text import MIMEText
from getpass import getuser
#import socket
import time
from datetime import datetime
from datetime import timedelta
import calendar
import json
import tarfile
import glob
#import argparse
import copy
from collections import deque
#--- third-party imports
#
import yaml
import requests
import dateutil.relativedelta
#--- project specific imports
#
from config import site_cfg
from config import rest_services
from utils import generate_timestamp
from utils import chroms_and_lens_from_fasta
from utils import bed_and_fa_are_compat
import configargparse
__author__ = "Andreas Wilm"
__email__ = "[email protected]"
__copyright__ = "2016 Genome Institute of Singapore"
__license__ = "The MIT License (MIT)"
# only dump() and following do not automatically create aliases
yaml.Dumper.ignore_aliases = lambda *args: True
# global logger
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
'[{asctime}] {levelname:8s} {filename} {message}', style='{'))
logger.addHandler(handler)
# dir relative to Snakefile where configs are to be found
# from address, i.e. users should reply to to this
# instead of rpd@gis to which we send email
# FIXME both to config or external file
RPD_MAIL = "[email protected]"
RPD_SIGNATURE = """
--
Research Pipeline Development Team
Scientific & Research Computing
<{}>
""".format(RPD_MAIL)
# ugly
PIPELINE_ROOTDIR = os.path.join(os.path.dirname(__file__), "..")
assert os.path.exists(os.path.join(PIPELINE_ROOTDIR, "VERSION"))
WORKFLOW_COMPLETION_FLAGFILE = "WORKFLOW_COMPLETE"
DOWNSTREAM_OUTDIR_TEMPLATE = "{basedir}/{user}/{pipelinename}-version-{pipelineversion}/{timestamp}"
def snakemake_log_status(log):
"""
Return exit status and timestamp (isoformat string) as tuple.
Exit status is either "SUCCESS" or "ERROR" or None
If exit status is None timestamp will be last seen timestamp or empty and the status unknown
Parses last lines of log, which could look like
[Fri Jun 17 11:13:16 2016] Exiting because a job execution failed. Look above for error message
[Fri Jul 15 01:29:12 2016] 17 of 17 steps (100%) done
[Thu Nov 10 22:45:27 2016] Nothing to be done.
"""
# this is by design a bit fuzzy
with open(log) as fh:
last_lines = deque(fh, maxlen=60)
status = None
last_etime = None
while last_lines: # iterate from end
line = last_lines.pop()
if "Refusing to overwrite existing log bundle" in line:
continue
if line.startswith("["):# time stamp required
estr = line[1:].split("]")[0]
try:
etime = str(datetime.strptime(estr, '%a %b %d %H:%M:%S %Y'))
except:
continue
if not last_etime:
last_etime = etime# first is last. useful for undefined status
if 'steps (100%) done' in line or "Nothing to be done" in line:
status = "SUCCESS"
break
elif 'Exiting' in line or "Error" in line:
status = "ERROR"
break
return status, etime
def get_downstream_outdir(requestor, pipeline_name, pipeline_version=None):
"""generate downstream output directory
"""
if is_devel_version():
basedir = site_cfg['downstream_outdir_base']['devel']
else:
basedir = site_cfg['downstream_outdir_base']['production']
if pipeline_version:
pversion = pipeline_version
else:
pversion = get_pipeline_version(nospace=True)
outdir = DOWNSTREAM_OUTDIR_TEMPLATE.format(
basedir=basedir, user=requestor, pipelineversion=pversion,
pipelinename=pipeline_name, timestamp=generate_timestamp())
return outdir
class PipelineHandler(object):
"""Class that handles setting up and calling pipelines
"""
# output
PIPELINE_CFGFILE = "conf.yaml"
RC_DIR = "rc"
RC_FILES = {
# used to load snakemake
'SNAKEMAKE_INIT' : os.path.join(RC_DIR, 'snakemake_init.rc'),
# used as bash prefix within snakemakejobs
'SNAKEMAKE_ENV' : os.path.join(RC_DIR, 'snakemake_env.rc'),
}
LOG_DIR_REL = "logs"
MASTERLOG = os.path.join(LOG_DIR_REL, "snakemake.log")
SUBMISSIONLOG = os.path.join(LOG_DIR_REL, "submission.log")
# master max walltime in hours
# note, this includes waiting for jobs in q
MASTER_WALLTIME_H = 96
def __init__(self, pipeline_name, pipeline_subdir,
def_args,
cfg_dict,
cluster_cfgfile=None,
logger_cmd=None,
site=None,
master_walltime_h=MASTER_WALLTIME_H):
"""init function
- pipeline_subdir: where default configs can be found, i.e pipeline subdir
- def_args: argparser args. only default_argparser handled, i.e. must be subset of that
- logger_cmd: the logger command used in run.sh. bash's 'true' doesn't do anything. Uses downstream default with conf db-id if set to None and logging is on.
"""
if is_devel_version():
logger.info("Running in non-production mode")
self.pipeline_name = pipeline_name
self.pipeline_version = get_pipeline_version()# external function
self.pipeline_subdir = pipeline_subdir
self.log_dir_rel = self.LOG_DIR_REL
self.masterlog = self.MASTERLOG
self.submissionlog = self.SUBMISSIONLOG
self.master_q = def_args.master_q
self.slave_q = def_args.slave_q
self.outdir = def_args.outdir
self.restarts = def_args.restarts
self.cfg_dict = copy.deepcopy(cfg_dict)
self.cfg_dict['mail_on_completion'] = not def_args.no_mail
self.cfg_dict['mail_address'] = def_args.mail_address
if def_args.name:
self.cfg_dict['analysis_name'] = def_args.name
if def_args.extra_conf:
for keyvalue in def_args.extra_conf:
assert keyvalue.count(":") == 1, ("Invalid argument for extra-conf")
k, v = keyvalue.split(":")
self.cfg_dict[k] = v
if def_args.modules_cfg:
assert os.path.exists(def_args.modules_cfg)
self.modules_cfgfile = def_args.modules_cfg
if def_args.references_cfg:
assert os.path.exists(def_args.references_cfg)
self.refs_cfgfile = def_args.references_cfg
if cluster_cfgfile:
assert os.path.exists(cluster_cfgfile)
self.cluster_cfgfile = cluster_cfgfile
self.pipeline_cfgfile_out = os.path.join(
self.outdir, self.PIPELINE_CFGFILE)
# RCs
self.snakemake_init_file = os.path.join(
self.outdir, self.RC_FILES['SNAKEMAKE_INIT'])
self.snakemake_env_file = os.path.join(
self.outdir, self.RC_FILES['SNAKEMAKE_ENV'])
if site is None:
try:
site = get_site()
except ValueError:
logger.warning("Unknown site")
site = "local"
# DB logging of execution
if def_args.db_logging in ['n', 'no', 'off']:
# use bash's true, which doesn't do anything
if logger_cmd:
sys.stderr.write("WARN: Got logger command but logging is off\n")
self.logger_cmd = 'true'
elif def_args.db_logging in ['y', 'yes', 'on']:
# if logger_cmd is given use that, otherwise use the default logger which depends on db-id
if not logger_cmd:
assert self.cfg_dict['db-id'], ("Need db-id config value for logging")
# run.sh has a path to snakemake so should contain a path to python3
scr = os.path.join(PIPELINE_ROOTDIR, 'downstream-handlers', 'downstream_started.py')
logger_cmd = "{} -d {} -o {}".format(scr, self.cfg_dict['db-id'], self.outdir)
self.logger_cmd = logger_cmd
else:
raise ValueError(def_args.db_logging)
self.site = site
self.master_walltime_h = master_walltime_h
self.snakefile_abs = os.path.abspath(
os.path.join(pipeline_subdir, "Snakefile"))
assert os.path.exists(self.snakefile_abs)
# cluster configs
if self.cluster_cfgfile:
self.cluster_cfgfile_out = os.path.join(self.outdir, "cluster.yaml")
# else: local
# run template
self.run_template = os.path.join(
PIPELINE_ROOTDIR, "lib", "run.template.{}.sh".format(self.site))
self.run_out = os.path.join(self.outdir, "run.sh")
assert os.path.exists(self.run_template)
# we don't know for sure who's going to actually exectute
# but it's very likely the current user, who needs to be notified
# on qsub kills etc
self.toaddr = email_for_user()
log_path = os.path.abspath(os.path.join(self.outdir, self.masterlog))
self.elm_data = {'pipeline_name': self.pipeline_name,
'pipeline_version': self.pipeline_version,
'site': self.site,
'instance_id': 'SET_ON_EXEC',# dummy
'submitter': 'SET_ON_EXEC',# dummy
'log_path': log_path}
@staticmethod
def write_snakemake_init(rc_file, overwrite=False):
"""write snakemake init rc (loads miniconda and, activate source')
"""
if not overwrite:
assert not os.path.exists(rc_file), rc_file
with open(rc_file, 'w') as fh:
# init first so that modules are present
fh.write("{}\n".format(" ".join(get_init_call())))
fh.write("module load miniconda3\n")
fh.write("source activate {}\n".format(site_cfg['snakemake_env']))
def write_snakemake_env(self, overwrite=False):
"""creates rc file for use as 'bash prefix', which also loads modules defined in cfgfile
"""
if not overwrite:
assert not os.path.exists(self.snakemake_env_file), self.snakemake_env_file
with open(self.snakemake_env_file, 'w') as fh_rc:
fh_rc.write("# used as bash prefix within snakemake\n\n")
fh_rc.write("# make sure module command is defined (non-login shell). see http://lmod.readthedocs.io/en/latest/030_installing.html\n")
fh_rc.write("{}\n".format(" ".join(get_init_call())))
fh_rc.write("# load modules\n")
with open(self.pipeline_cfgfile_out) as fh_cfg:
yaml_data = yaml.safe_load(fh_cfg)
assert "modules" in yaml_data
for k, v in yaml_data["modules"].items():
fh_rc.write("module load {}/{}\n".format(k, v))
fh_rc.write("\n")
fh_rc.write("# unofficial bash strict has to come last\n")
fh_rc.write("set -euo pipefail;\n")
def write_cluster_cfg(self):
"""writes site dependend cluster config
"""
shutil.copyfile(self.cluster_cfgfile, self.cluster_cfgfile_out)
def write_run_template(self):
"""writes run template replacing placeholder with variables defined in
instance
"""
d = {'SNAKEFILE': self.snakefile_abs,
'LOGDIR': self.log_dir_rel,
'MASTERLOG': self.masterlog,
'PIPELINE_NAME': self.pipeline_name,
'MAILTO': self.toaddr,
'DEFAULT_RESTARTS': self.restarts,
'MASTER_WALLTIME_H': self.master_walltime_h,
'DEFAULT_SLAVE_Q': self.slave_q if self.slave_q else "",
'LOGGER_CMD': self.logger_cmd}
with open(self.run_template) as fh:
templ = fh.read()
with open(self.run_out, 'w') as fh:
fh.write(templ.format(**d))
def read_cfgfiles(self):
"""parse default config and replace all RPD env vars
"""
merged_cfg = dict()
rpd_vars = get_rpd_vars()
for cfgkey, cfgfile in [('references', self.refs_cfgfile),
('modules', self.modules_cfgfile)]:
if not cfgfile:
continue
with open(cfgfile) as fh:
try:
d = yaml.safe_load(fh)
if not d:
# allow empty files
continue
cfg = dict(d)
except:
logger.fatal("Loading %s failed", cfgfile)
raise
# to replace rpd vars the trick is to traverse
# dictionary fully and replace all instances
dump = json.dumps(cfg)
for k, v in rpd_vars.items():
dump = dump.replace("${}".format(k), v)
cfg = dict(json.loads(dump))
if cfgkey == 'global':
merged_cfg.update(cfg)
else:
assert cfgkey not in merged_cfg
merged_cfg[cfgkey] = cfg
# determine num_chroms needed by some pipelines
# FIXME ugly because sometimes not needed
if merged_cfg.get('references'):
reffa = merged_cfg['references'].get('genome')
if reffa:
assert 'num_chroms' not in merged_cfg['references']
merged_cfg['references']['num_chroms'] = len(list(
chroms_and_lens_from_fasta(reffa)))
return merged_cfg
def write_merged_cfg(self, force_overwrite=False):
"""writes config file for use in snakemake becaused on default config
"""
master_cfg = self.read_cfgfiles()
master_cfg.update(self.cfg_dict)
b = master_cfg.get('intervals')
# sanity check: bed only makes sense if we have a reference
if b:
f = master_cfg['references'].get('genome')
assert bed_and_fa_are_compat(b, f), (
"{} not compatible with {}".format(b, f))
assert 'ELM' not in master_cfg
master_cfg['ELM'] = self.elm_data
if not force_overwrite:
assert not os.path.exists(self.pipeline_cfgfile_out)
with open(self.pipeline_cfgfile_out, 'w') as fh:
# default_flow_style=None(default)|True(least readable)|False(most readable)
yaml.dump(master_cfg, fh, default_flow_style=False)
def setup_env(self):
"""create run environment
"""
logger.info("Creating run environment in %s", self.outdir)
# create log dir recursively so that parent is created as well
os.makedirs(os.path.join(self.outdir, self.log_dir_rel))
os.makedirs(os.path.join(self.outdir, self.RC_DIR))
if self.site != "local":
self.write_cluster_cfg()
self.write_merged_cfg()
self.write_snakemake_env()
self.write_snakemake_init(self.snakemake_init_file)
self.write_run_template()
def submit(self, no_run=False):
"""submit pipeline run
"""
if self.master_q:
master_q_arg = "-q {}".format(self.master_q)
else:
master_q_arg = ""
if self.site == "local":
logger.warning("Please not that script is run in 'local' mode"
" (which is mainly for debugging)")
cmd = "cd {} && bash {} {} >> {}".format(
os.path.dirname(self.run_out), master_q_arg,
os.path.basename(self.run_out), self.submissionlog)
else:
cmd = "cd {} && {} {} {} >> {}".format(
os.path.dirname(self.run_out), site_cfg['master_submission_cmd'],
master_q_arg, os.path.basename(self.run_out), self.submissionlog)
if no_run:
logger.warning("Skipping pipeline run on request. Once ready, use: %s", cmd)
logger.warning("Once ready submit with: %s", cmd)
else:
logger.info("Starting pipeline: %s", cmd)
#os.chdir(os.path.dirname(run_out))
try:
res = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# if cluster has not compute nodes (e.g. AWS
# autoscaling to 0), UGE will throw an error, but job
# still gets submitted
if 'job is not allowed to run in any queue' in e.output.decode():
logger.warning("Looks like cluster cooled down (no compute nodes available)."
" Job is submitted nevertheless and should start soon.")
else:
raise
submission_log_abs = os.path.abspath(os.path.join(
self.outdir, self.submissionlog))
master_log_abs = os.path.abspath(os.path.join(
self.outdir, self.masterlog))
logger.debug("For submission details see %s", submission_log_abs)
logger.info("The (master) logfile is %s", master_log_abs)
def default_argparser(cfg_dir,
allow_missing_cfgfile=False,
allow_missing_outdir=False,
default_db_logging=False,
with_readunits=False):
"""Create default argparser (use as parent) for pipeline calls. Needs
point to pipelines config dir
"""
parser = configargparse.ArgumentParser(add_help=False)
sample_group = parser.add_argument_group('Sample/Readunit Input')
if with_readunits:
sample_group.add_argument('-1', "--fq1", nargs="+",
help="FastQ file/s (gzip only)."
" Multiple input files supported (auto-sorted)."
" Note: each file (or pair) gets a unique read-group id."
" Collides with --sample-cfg.")
sample_group.add_argument('-2', "--fq2", nargs="+",
help="FastQ file/s (if paired) (gzip only). See also --fq1")
sample_group.add_argument('-s', "--sample",
help="Sample name. Collides with --sample-cfg.")
sample_group.add_argument('-S', '--sample-cfg',
help="Config-file (YAML) listing samples and readunits."
" Collides with -1, -2 and -s")
parser._optionals.title = "Output"
parser.add_argument('-o', "--outdir", required=not allow_missing_outdir,
help="Output directory (must not exist)")
rep_group = parser.add_argument_group('Reporting')
default = email_for_user()
rep_group.add_argument('--mail', dest='mail_address', default=default,
help="Send completion emails to this address (default: {})".format(default))
rep_group.add_argument('--name',
help="Give this analysis run a name (used in email and report)")
rep_group.add_argument('--no-mail', action='store_true',
help="Don't send mail on completion")
default = 'y' if default_db_logging else 'n'
helpstr = "Log execution in DB (requires db-id): n=no; y=yes"
helpstr += "(only allowed as production user; default={})".format(default)
rep_group.add_argument('--db-logging', choices=('y', 'n'), default=default,
help = helpstr if is_production_user() else configargparse.SUPPRESS)
rep_group.add_argument('-v', '--verbose', action='count', default=0,
help="Increase verbosity")
rep_group.add_argument('-q', '--quiet', action='count', default=0,
help="Decrease verbosity")
q_group = parser.add_argument_group('Run behaviour')
default = get_default_queue('slave')
q_group.add_argument('--slave-q', default=default,
help="Queue to use for slave jobs (default: {})".format(default))
default = get_default_queue('master')
q_group.add_argument('--master-q', default=default,
help="Queue to use for master job (default: {})".format(default))
default = 1
q_group.add_argument('--restarts', type=int, default=default,
help="Number of auto restarts per rule (default={})".format(default))
q_group.add_argument('-n', '--no-run', action='store_true')
cfg_group = parser.add_argument_group('Configuration')
cfg_group.add_argument('--extra-conf', nargs='*', metavar="key:value",
help="Advanced: Extra values added to config (takes precedence over existing values).")
cfg_group.add_argument('--global-cfg', is_config_file=True,
help="Global config file setting commandline options")
for name, descr in [("modules", "modules"),
("references", "reference sequences")]:
cfg_file = os.path.abspath(os.path.join(cfg_dir, "{}.yaml".format(name)))
if not os.path.exists(cfg_file):
if allow_missing_cfgfile:
cfg_file = None
else:
raise ValueError((cfg_file, allow_missing_cfgfile))
cfg_group.add_argument('--{}-cfg'.format(name),
default=cfg_file,
help="Config-file (yaml) for {}. (default: {})".format(descr, cfg_file))
return parser
def get_pipeline_version(nospace=False):
"""determine pipeline version as defined by updir file
"""
version_file = os.path.abspath(os.path.join(PIPELINE_ROOTDIR, "VERSION"))
with open(version_file) as fh:
version = fh.readline().strip()
cwd = os.getcwd()
os.chdir(PIPELINE_ROOTDIR)
if os.path.exists(".git"):
commit = None
cmd = ['git', 'rev-parse', '--short', 'HEAD']
try:
res = subprocess.check_output(cmd)
commit = res.decode().strip()
except (subprocess.CalledProcessError, OSError) as _:
pass
if commit:
version = "{} {}".format(version, commit)
if nospace:
version = version.replace(" ", "-")
os.chdir(cwd)
return version
def is_devel_version():
"""checks whether this is a developers version of production
"""
check_file = os.path.abspath(os.path.join(PIPELINE_ROOTDIR, "DEVELOPERS_VERSION"))
#logger.debug("check_file = {}".format(check_file))
return os.path.exists(check_file)
def get_site():
"""Where are we running
"""
return site_cfg['name']
def get_cluster_cfgfile(cfg_dir):
"""returns None for local runs
"""
site = get_site()
if site != "local":
cfg = os.path.join(cfg_dir, "cluster.{}.yaml".format(site))
assert os.path.exists(cfg), ("Missing file {}".format(cfg))
return cfg
def get_init_call():
"""return dotkit init call
"""
cmd = ['source', site_cfg['init']]
if is_devel_version():
cmd = ['RPD_TESTING=1'] + cmd
return cmd
def get_rpd_vars():
"""Read RPD variables set by calling and parsing output from init
"""
cmd = get_init_call()
cmd = ' '.join(cmd) + ' && set | grep "^RPD_"'
try:
res = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
logger.fatal("Couldn't call init %s. Result was: %s", cmd, res)
raise
rpd_vars = dict()
for line in res.decode().splitlines():
if line.startswith('RPD_') and '=' in line:
#line = line.replace("export ", "")
#line = ''.join([c for c in line if c not in '";\''])
#logger.debug("line = {}".format(line))
k, v = line.split('=')
rpd_vars[k.strip()] = v.strip()
return rpd_vars
def isoformat_to_epoch_time(ts):
"""
Converts ISO8601 format (analysis_id) into epoch time
"""
dt = datetime.strptime(ts[:-7], '%Y-%m-%dT%H-%M-%S.%f')-\
timedelta(hours=int(ts[-5:-3]),
minutes=int(ts[-2:]))*int(ts[-6:-5]+'1')
epoch_time = calendar.timegm(dt.timetuple()) + dt.microsecond/1000000.0
return epoch_time
def relative_epoch_time(epoch_time1, epoch_time2):
"""
Relative time difference between two epoch time
"""
dt1 = datetime.fromtimestamp(epoch_time1)
dt2 = datetime.fromtimestamp(epoch_time2)
rd = dateutil.relativedelta.relativedelta(dt1, dt2)
return rd
def relative_isoformat_time(last_analysis):
"""
Relative isoformat_time
"""
analysis_epoch_time = isoformat_to_epoch_time(last_analysis+"+08:00")
epoch_time_now = isoformat_to_epoch_time(generate_timestamp()+"+08:00")
rd = relative_epoch_time(epoch_time_now, analysis_epoch_time)
relative_days = rd.months*30 + rd.days
return relative_days
def get_machine_run_flowcell_id(runid_and_flowcellid):
"""return machine-id, run-id and flowcell-id from full string.
Expected string format is machine-runid_flowcellid
>>> get_machine_run_flowcell_id("HS002-SR-R00224_BC9A6MACXX")
('HS002', 'HS002-SR-R00224', 'BC9A6MACXX')
>>> get_machine_run_flowcell_id("/path/to/seq/HS002-SR-R00224_BC9A6MACXX")
('HS002', 'HS002-SR-R00224', 'BC9A6MACXX')
>>> get_machine_run_flowcell_id("HS002_SR_R00224_BC9A6MACXX")
Traceback (most recent call last):
...
AssertionError: Wrong format: HS002_SR_R00224_BC9A6MACXX
"""
# strip away path
runid_and_flowcellid = runid_and_flowcellid.rstrip("/").split('/')[-1]
assert runid_and_flowcellid.count("_") == 1, (
"Wrong format: {}".format(runid_and_flowcellid))
runid, flowcellid = runid_and_flowcellid.split("_")
machineid = runid.split("-")[0]
return machineid, runid, flowcellid
def get_bcl_runfolder_for_runid(runid_and_flowcellid):
"""returns the bcl Run directory
"""
basedir = site_cfg['bcl2fastq_seqdir_base']
machineid, runid, flowcellid = get_machine_run_flowcell_id(
runid_and_flowcellid)
if machineid.startswith('MS00'):
rundir = "{}/{}/MiSeqOutput/{}_{}".format(basedir, machineid, runid, flowcellid)
return rundir
if machineid.startswith('NG00'):
basedir = site_cfg['bcl2fastq_seqdir_base'].replace("userrig", "novogene")
rundir = "{}/{}/{}_{}".format(basedir, machineid, runid, flowcellid)
return rundir
def user_mail_mapper(user_name):
"""Rest service to get user email id from AD mapper
"""
if is_devel_version():
user_email = rest_services['user_mail_mapper']['testing'] + user_name
else:
user_email = rest_services['user_mail_mapper']['production'] + user_name
try:
response = requests.get(user_email)
except requests.exceptions.ConnectionError:
logger.warning("Couldn't connect to user_mail_mapper")
return None
if response.status_code != requests.codes.ok:
response.raise_for_status()
logger.warning("User email mapper failed")
return None
rest_data = response.json()
return rest_data.get('userEmail')
def email_for_user():
"""get email for user (naive)
"""
user_name = getuser()
if user_name == "userrig":
toaddr = "[email protected]"
else:
toaddr = user_mail_mapper(user_name)
if toaddr is None:
toaddr = "{}@gis.a-star.edu.sg".format(user_name)
return toaddr
def is_production_user():
"""true if run as production user
"""
return getuser() == "userrig"
def get_default_queue(master_or_slave):
"""return cluster queue (for current user)
"""
if is_production_user():
user = 'production'
else:
user = 'enduser'
key = 'default_{}_q'.format(master_or_slave)
return site_cfg[key][user]
def send_status_mail(pipeline_name, success, analysis_id, outdir,
extra_text=None, pass_exception=True, to_address=None):
"""
- pipeline_name: pipeline name
- success: bool
- analysis_id: name/identifier for this analysis run
- outdir: directory where results are found
"""
body = "Pipeline {} (version {}) for {} ".format(
pipeline_name, get_pipeline_version(), analysis_id)
if success:
status_str = "completed"
body += status_str
body += "\n\nResults can be found in {}\n".format(outdir)
else:
status_str = "failed"
body += status_str
masterlog = os.path.normpath(os.path.join(outdir, "..", PipelineHandler.MASTERLOG))
body += "\n\nSorry about this."
body += "\n\nThe following log file provides more information: {}".format(masterlog)
if extra_text:
body = body + "\n" + extra_text + "\n"
body += "\n\nThis is an automatically generated email\n"
body += RPD_SIGNATURE
site = get_site()
subject = "Pipeline {} for {} {} (@{})".format(
pipeline_name, analysis_id, status_str, site)
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = RPD_MAIL
if to_address:
msg['To'] = to_address
else:
msg['To'] = email_for_user()
server = smtplib.SMTP(site_cfg['smtp_server'])
try:
server.send_message(msg)
server.quit()
except Exception as err:
logger.fatal("Sending mail failed: %s", err)
if not pass_exception:
raise
def send_mail(subject, body, toaddr=None, ccaddr=None,
pass_exception=True):
"""
Generic mail function
FIXME make toaddr and ccaddr lists
"""
body += "\n"
body += "\n\nThis is an automatically generated email\n"
body += RPD_SIGNATURE
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = RPD_MAIL
if toaddr is None:
msg['To'] = email_for_user()
elif "@" in toaddr:
msg['To'] = toaddr
else:
msg['To'] = toaddr + "@gis.a-star.edu.sg"
if ccaddr:
if "@" not in ccaddr:
ccaddr += "@gis.a-star.edu.sg"
msg['Cc'] = ccaddr
server = smtplib.SMTP(site_cfg['smtp_server'])
try:
server.send_message(msg)
server.quit()
except Exception as err:
logger.fatal("Sending mail failed: %s", err)
if not pass_exception:
raise
def ref_is_indexed(ref, prog="bwa"):
"""checks whether a reference was already indexed by given program"""
if prog == "bwa":
return all([os.path.exists(ref + ext)
for ext in [".pac", ".ann", ".amb", ".sa"]])
elif prog == "samtools":
return os.path.exists(ref + ".fai")
else:
raise ValueError
def generate_window(days=7):
"""returns tuple representing epoch window (int:present, int:past)"""
date_time = time.strftime('%Y-%m-%d %H:%M:%S')
pattern = '%Y-%m-%d %H:%M:%S'
epoch_present = int(time.mktime(time.strptime(date_time, pattern)))*1000
d = datetime.now() - timedelta(days=days)
f = d.strftime("%Y-%m-%d %H:%m:%S")
epoch_back = int(time.mktime(time.strptime(f, pattern)))*1000
return (epoch_present, epoch_back)
def path_to_url(out_path):
"""convert path to qlap33 server url"""
# FIXME change for testing, gis, NSCC
if out_path.startswith("/mnt/projects/userrig/solexa/"):
return out_path.replace("/mnt/projects/userrig/solexa/", \
"http://rpd/userrig/runs/solexaProjects/")
else:
#raise ValueError(out_path)
return out_path
def mux_to_lib(mux_id, testing=False):
"""returns the component libraries for MUX
"""
lib_list = []
if not testing:
rest_url = rest_services['lib_details']['production'].replace("lib_id", mux_id)
else:
rest_url = rest_services['lib_details']['testing'].replace("lib_id", mux_id)
response = requests.get(rest_url)
if response.status_code != requests.codes.ok:
response.raise_for_status()
rest_data = response.json()
if 'plexes' not in rest_data:
logger.fatal("FATAL: plexes info for %s is not available in ELM \n", mux_id)
sys.exit(1)
for lib in rest_data['plexes']:
lib_list.append(lib['libraryId'])
return lib_list
def bundle_and_clean_logs(pipeline_outdir, result_outdir="out/",
log_dir="logs/", overwrite=False):
"""bundle log files in pipeline_outdir+result_outdir and
pipeline_outdir+log_dir to pipeline_outdir+logs.tar.gz and remove
See http://stackoverflow.com/questions/40602894/access-to-log-files for potential alternatives
"""
for d in [pipeline_outdir,
os.path.join(pipeline_outdir, result_outdir),
os.path.join(pipeline_outdir, log_dir)]:
if not os.path.exists(d):
logger.warning("Missing directory %s. Skipping log bundling.", d)
return
bundle = os.path.join(log_dir, "logs.tar.gz")# relative to pipeline_outdir
if not overwrite and os.path.exists(os.path.join(pipeline_outdir, bundle)):
bundle = os.path.join(log_dir, "logs.{}.tar.gz".format(generate_timestamp()))
assert not os.path.exists(os.path.join(pipeline_outdir, bundle))
orig_dir = os.getcwd()
os.chdir(pipeline_outdir)
# all log files associated with output files
logfiles = glob.glob(os.path.join(result_outdir, "**/*.log"), recursive=True)
# (cluster) log directory
logfiles.extend(glob.glob(os.path.join(log_dir, "*")))
# paranoid cleaning and some exclusion
logfiles = [f for f in logfiles if os.path.isfile(f)
and not f.endswith("snakemake.log")]
with tarfile.open(bundle, "w:gz") as tarfh:
for f in logfiles:
tarfh.add(f)
os.unlink(f)
os.chdir(orig_dir)
def mark_as_completed():
"""Dropping a flag file marking analysis as complete"""
analysis_dir = os.getcwd()
flag_file = os.path.join(analysis_dir, WORKFLOW_COMPLETION_FLAGFILE)
with open(flag_file, 'a') as fh:
fh.write("{}\n".format(generate_timestamp()))
| mit | -8,655,352,702,351,648,000 | 35.350691 | 165 | 0.591095 | false |
gkoelln/youtube-dl | youtube_dl/extractor/svt.py | 1 | 9890 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
determine_ext,
dict_get,
int_or_none,
try_get,
urljoin,
compat_str,
)
class SVTBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['SE']
def _extract_video(self, video_info, video_id):
formats = []
for vr in video_info['videoReferences']:
player_type = vr.get('playerType') or vr.get('format')
vurl = vr['url']
ext = determine_ext(vurl)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
vurl, video_id,
ext='mp4', entry_protocol='m3u8_native',
m3u8_id=player_type, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
vurl + '?hdcore=3.3.0', video_id,
f4m_id=player_type, fatal=False))
elif ext == 'mpd':
if player_type == 'dashhbbtv':
formats.extend(self._extract_mpd_formats(
vurl, video_id, mpd_id=player_type, fatal=False))
else:
formats.append({
'format_id': player_type,
'url': vurl,
})
if not formats and video_info.get('rights', {}).get('geoBlockedSweden'):
self.raise_geo_restricted(
'This video is only available in Sweden',
countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
subtitles = {}
subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences'))
if isinstance(subtitle_references, list):
for sr in subtitle_references:
subtitle_url = sr.get('url')
subtitle_lang = sr.get('language', 'sv')
if subtitle_url:
if determine_ext(subtitle_url) == 'm3u8':
# TODO(yan12125): handle WebVTT in m3u8 manifests
continue
subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url})
title = video_info.get('title')
series = video_info.get('programTitle')
season_number = int_or_none(video_info.get('season'))
episode = video_info.get('episodeTitle')
episode_number = int_or_none(video_info.get('episodeNumber'))
duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration')))
age_limit = None
adult = dict_get(
video_info, ('inappropriateForChildren', 'blockedForChildren'),
skip_false_values=False)
if adult is not None:
age_limit = 18 if adult else 0
return {
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'duration': duration,
'age_limit': age_limit,
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
}
class SVTIE(SVTBaseIE):
_VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)'
_TEST = {
'url': 'http://www.svt.se/wd?widgetId=23991§ionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false',
'md5': '33e9a5d8f646523ce0868ecfb0eed77d',
'info_dict': {
'id': '2900353',
'ext': 'mp4',
'title': 'Stjärnorna skojar till det - under SVT-intervjun',
'duration': 27,
'age_limit': 0,
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'(?:<iframe src|href)="(?P<url>%s[^"]*)"' % SVTIE._VALID_URL, webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
widget_id = mobj.group('widget_id')
article_id = mobj.group('id')
info = self._download_json(
'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id),
article_id)
info_dict = self._extract_video(info['video'], article_id)
info_dict['title'] = info['context']['title']
return info_dict
class SVTPlayBaseIE(SVTBaseIE):
_SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n'
class SVTPlayIE(SVTPlayBaseIE):
IE_DESC = 'SVT Play and Öppet arkiv'
_VALID_URL = r'https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
'info_dict': {
'id': '5996901',
'ext': 'mp4',
'title': 'Flygplan till Haile Selassie',
'duration': 3527,
'thumbnail': r're:^https?://.*[\.-]jpg$',
'age_limit': 0,
'subtitles': {
'sv': [{
'ext': 'wsrt',
}]
},
},
}, {
# geo restricted to Sweden
'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten',
'only_matching': True,
}, {
'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(
self._search_regex(
self._SVTPLAY_RE, webpage, 'embedded data', default='{}',
group='json'),
video_id, fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
if data:
video_info = try_get(
data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'],
dict)
if video_info:
info_dict = self._extract_video(video_info, video_id)
info_dict.update({
'title': data['context']['dispatcher']['stores']['MetaStore']['title'],
'thumbnail': thumbnail,
})
return info_dict
video_id = self._search_regex(
r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
webpage, 'video id', default=None)
if video_id:
data = self._download_json(
'https://api.svt.se/videoplayer-api/video/%s' % video_id,
video_id, headers=self.geo_verification_headers())
info_dict = self._extract_video(data, video_id)
if not info_dict.get('title'):
info_dict['title'] = re.sub(
r'\s*\|\s*.+?$', '',
info_dict.get('episode') or self._og_search_title(webpage))
return info_dict
class SVTSeriesIE(SVTPlayBaseIE):
_VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)'
_TESTS = [{
'url': 'https://www.svtplay.se/rederiet',
'info_dict': {
'id': 'rederiet',
'title': 'Rederiet',
'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
},
'playlist_mincount': 318,
}, {
'url': 'https://www.svtplay.se/rederiet?tab=sasong2',
'info_dict': {
'id': 'rederiet-sasong2',
'title': 'Rederiet - Säsong 2',
'description': 'md5:505d491a58f4fcf6eb418ecab947e69e',
},
'playlist_count': 12,
}]
@classmethod
def suitable(cls, url):
return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url)
def _real_extract(self, url):
series_id = self._match_id(url)
qs = compat_parse_qs(compat_urllib_parse_urlparse(url).query)
season_slug = qs.get('tab', [None])[0]
if season_slug:
series_id += '-%s' % season_slug
webpage = self._download_webpage(
url, series_id, 'Downloading series page')
root = self._parse_json(
self._search_regex(
self._SVTPLAY_RE, webpage, 'content', group='json'),
series_id)
season_name = None
entries = []
for season in root['relatedVideoContent']['relatedVideosAccordion']:
if not isinstance(season, dict):
continue
if season_slug:
if season.get('slug') != season_slug:
continue
season_name = season.get('name')
videos = season.get('videos')
if not isinstance(videos, list):
continue
for video in videos:
content_url = video.get('contentUrl')
if not content_url or not isinstance(content_url, compat_str):
continue
entries.append(
self.url_result(
urljoin(url, content_url),
ie=SVTPlayIE.ie_key(),
video_title=video.get('title')
))
metadata = root.get('metaData')
if not isinstance(metadata, dict):
metadata = {}
title = metadata.get('title')
season_name = season_name or season_slug
if title and season_name:
title = '%s - %s' % (title, season_name)
elif season_slug:
title = season_slug
return self.playlist_result(
entries, series_id, title, metadata.get('description'))
| unlicense | -2,224,861,653,834,999,800 | 34.060284 | 133 | 0.516132 | false |
Cosiroc/bleau-database | BleauDataBase/GeoFormat/GPX.py | 2 | 6322 | ####################################################################################################
#
# Bleau Database - A database of the bouldering area of Fontainebleau
# Copyright (C) 2015 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
import logging
try:
from lxml import etree
except ImportError:
logging.warn('lxml module is not available')
etree = None
####################################################################################################
from ..FieldObject import FromJsonMixin
####################################################################################################
class WayPoint(FromJsonMixin):
# < wpt lat="latitudeType [1] ?" lon="longitudeType [1] ?">
# <ele> xsd:decimal </ele> [0..1] ?
# <time> xsd:dateTime </time> [0..1] ?
# <magvar> degreesType </magvar> [0..1] ?
# <geoidheight> xsd:decimal </geoidheight> [0..1] ?
# <name> xsd:string </name> [0..1] ?
# <cmt> xsd:string </cmt> [0..1] ?
# <desc> xsd:string </desc> [0..1] ?
# <src> xsd:string </src> [0..1] ?
# <link> linkType </link> [0..*] ?
# <sym> xsd:string </sym> [0..1] ?
# <type> xsd:string </type> [0..1] ?
# <fix> fixType </fix> [0..1] ?
# <sat> xsd:nonNegativeInteger </sat> [0..1] ?
# <hdop> xsd:decimal </hdop> [0..1] ?
# <vdop> xsd:decimal </vdop> [0..1] ?
# <pdop> xsd:decimal </pdop> [0..1] ?
# <ageofdgpsdata> xsd:decimal </ageofdgpsdata> [0..1] ?
# <dgpsid> dgpsStationType </dgpsid> [0..1] ?
# <extensions> extensionsType </extensions> [0..1] ?
# </wpt>
lat = float
lon = float
ele = float
time = str
magvar = float
geoidheight = float
name = str
cmt = str
desc = str
src = str
link = str
sym = str
type = str
fix = str
sat = int
hdop = float
vdop = float
pdop = float
ageofdgpsdata = float
dgpsid = int
####################################################################################################
class GPX:
##############################################
def __init__(self, gpx_path=None, schema_path=None):
self._waypoints = []
if gpx_path is not None:
self._parse(gpx_path, schema_path)
##############################################
def _parse(self, gpx_path, schema_path=None):
if schema_path is not None:
schema = etree.XMLSchema(file=schema_path)
parser = etree.XMLParser(schema=schema)
else:
parser = None
namespaces = dict(topografix='http://www.topografix.com/GPX/1/1')
tree = etree.parse(gpx_path, parser=parser)
# root = tree.getroot()
# for item in root:
# print(item.tag, tree.getpath(item))
waypoints = []
for waypoint_element in tree.xpath('topografix:wpt', namespaces=namespaces):
d = self._attribute_to_dict(waypoint_element, ('lat', 'lon'))
for element in waypoint_element:
field = etree.QName(element.tag).localname
d[field] = element.text
waypoint = WayPoint(**d)
waypoints.append(waypoint)
self._waypoints = waypoints
##############################################
@staticmethod
def _attribute_to_dict(node, fields):
attributes = node.attrib
return {field:attributes[field] for field in fields}
##############################################
@property
def waypoints(self):
return self._waypoints
##############################################
def add_waypoint(self, waypoint):
self._waypoints.append(waypoint)
##############################################
def add_waypoints(self, waypoints):
self._waypoints.extend(waypoints)
##############################################
def add_new_waypoint(self, **kwargs):
self.append_waypoint(WayPoint(**kwargs))
##############################################
def write(self, path, encoding='utf-8'):
with etree.xmlfile(path,
encoding=encoding,
compression=None,
close=True,
buffered=True) as xf:
xf.write_declaration() # standalone=True
attributes = {
'version': '1.1',
'creator': 'BleauDataBase',
'xmlns': 'http://www.topografix.com/GPX/1/1',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:schemaLocation': 'http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd',
}
with xf.element('gpx', **attributes):
for waypoint in self._waypoints:
d = waypoint.to_json(only_defined=True)
attributes = {field:str(d[field]) for field in ('lon', 'lat')}
del d['lon']
del d['lat']
with xf.element('wpt', **attributes):
# Fixme: iter ?
# for field in waypoint.__field_names__:
# value = getattr(waypoint, field)
# if value is not None:
for field, value in d.items():
with xf.element(field):
xf.write(str(value))
xf.flush()
| agpl-3.0 | 4,304,305,302,924,749,300 | 33.546448 | 116 | 0.465201 | false |
hugosenari/simplui | simplui/container.py | 1 | 5994 | # ----------------------------------------------------------------------
# Copyright (c) 2009 Tristam MacDonald
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of DarkCoda nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from .widget import Widget
from .geometry import Rect
class Container(Widget):
"""Base class for all GUI containers, also usable by itself"""
def __init__(self, **kwargs):
"""Create a container
Keyword arguments:
name -- unique widget identifier
children -- list of child elements to be added to this container
"""
Widget.__init__(self, **kwargs)
self.children = []
children = kwargs.get('children', [])
for c in children:
self.add(c)
def _get_visible(self):
return self._visible
def _set_visible(self, visible):
Widget._set_visible(self, visible)
for c in self.children:
c.visible = visible
visible = property(_get_visible, _set_visible)
def update_global_coords(self):
Widget.update_global_coords(self)
for c in self.children:
c.update_global_coords()
def update_elements(self):
Widget.update_elements(self)
for c in self.children:
c.update_elements()
def update_theme(self, theme):
Widget.update_theme(self, theme)
for c in self.children:
c.update_theme(theme)
def update_batch(self, batch, group):
Widget.update_batch(self, batch, group)
for c in self.children:
c.update_batch(batch, group)
def update_names(self, oldname=None):
Widget.update_names(self, oldname)
for c in self.children:
c.update_names(oldname)
def remove_names(self):
Widget.remove_names(self)
for c in self.children:
c.remove_names()
def add(self, child):
self.children.append(child)
child.parent = self
self.theme and child.update_theme(self.theme)
child.update_batch(self._batch, self._group)
self.find_root().update_layout()
child.update_names()
def remove(self, child):
child.remove_names()
self.children.remove(child)
child.parent = None
child.update_batch(None, None)
self.find_root().update_layout()
def on_mouse_press(self, x, y, button, modifiers):
Widget.on_mouse_press(self, x, y, button, modifiers)
r = self.clip_rect()
for c in self.children:
if r.intersect(c.bounds()).hit_test(x, y):
c.on_mouse_press(x, y, button, modifiers)
def on_mouse_drag(self, x, y, dx, dy, button, modifiers):
Widget.on_mouse_drag(self, x, y, dx, dy, button, modifiers)
for c in self.children:
c.on_mouse_drag(x, y, dx, dy, button, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
Widget.on_mouse_release(self, x, y, button, modifiers)
r = self.clip_rect()
for c in self.children:
if r.intersect(c.bounds()).hit_test(x, y):
c.on_mouse_release(x, y, button, modifiers)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
Widget.on_mouse_scroll(self, x, y, scroll_x, scroll_y)
r = self.clip_rect()
for c in self.children:
if r.intersect(c.bounds()).hit_test(x, y):
c.on_mouse_scroll(x, y, scroll_x, scroll_y)
def on_key_press(self, symbol, modifiers):
Widget.on_key_press(self, symbol, modifiers)
for c in self.children:
c.on_key_press(symbol, modifiers)
def on_text(self, text):
Widget.on_text(self, text)
for c in self.children:
c.on_text(text)
def on_text_motion(self, motion, select=False):
Widget.on_text_motion(self, motion, select)
for c in self.children:
c.on_text_motion(motion, select)
def clip_rect(self):
return Rect(self._gx, self._gy, self.w, self.h)
class SingleContainer(Container):
"""Utility base class for containers restricted to a single child"""
def __init__(self, **kwargs):
if 'children' in kwargs:
del kwargs['children']
Container.__init__(self, **kwargs)
self._content = None
def _get_content(self):
return self._content
def _set_content(self, content):
if self._content:
Container.remove(self, self._content)
self._content = content
if self._content:
Container.add(self, self._content)
self.find_root().update_layout()
content = property(_get_content, _set_content)
def add(self, other):
raise UserWarning('add to the content element')
def remove(self, other):
raise UserWarning('remove from the content element')
def determine_size(self):
if self._content:
self._content.determine_size()
self._pref_size = self._content._pref_size
def reset_size(self, size):
Widget.reset_size(self, size)
if self._content:
self._content.reset_size(size)
| bsd-3-clause | -8,756,964,571,835,446,000 | 28.239024 | 72 | 0.685352 | false |
TheLazyHase/dragon_dice_simulator | business/dice/face/special_on_melee/roar.py | 1 | 1035 | # -*- coding: utf-8 *-*
# Copyright (c) 2013 Tisserant Pierre
#
# This file is part of Dragon dice simulator.
#
# Dragon dice simulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragon dice simulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Dragon dice simulator. If not, see <http://www.gnu.org/licenses/>.
from business.dice.face import SpecialOnMelee
class Roar(SpecialOnMelee):
@property
def name(self):
return '%s Roar' % self.amount
@property
def get_special(self):
print 'NYI'
| gpl-3.0 | 6,727,823,464,892,241,000 | 35.964286 | 83 | 0.713043 | false |
vicnet/weboob | modules/regionsjob/module.py | 1 | 9219 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
from weboob.tools.backend import Module, BackendConfig
from weboob.capabilities.job import CapJob, BaseJobAdvert
from .browser import RegionsjobBrowser
from weboob.tools.value import Value
__all__ = ['RegionsjobModule']
class RegionsjobModule(Module, CapJob):
NAME = 'regionsjob'
DESCRIPTION = u'regionsjob website'
MAINTAINER = u'Bezleputh'
EMAIL = '[email protected]'
LICENSE = 'AGPLv3+'
VERSION = '1.6'
BROWSER = RegionsjobBrowser
website_choices = OrderedDict([(k, u'%s (%s)' % (v, k)) for k, v in sorted({
'www.centrejob.com': u'CentreJob',
'www.estjob.com': u'EstJob',
'www.nordjob.com': u'NordJob',
'www.ouestjob.com': u'OuestJob',
'www.pacajob.com': u'PacaJob',
'www.parisjob.com': u'ParisJob',
'www.rhonealpesjob.com': u'RhoneAlpesJob',
'www.sudouestjob.com': u'SudOuestJob',
'www.jobtrotter.com': u'JobTrotter',
}.items())])
fonction_choices = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
'': u'Indifferent',
'Assistanat_admin_accueil': u'Assistanat/Adm.ventes/Accueil',
'BTP_gros_second_oeuvre': u'BTP - Gros Oeuvre/Second Oeuvre',
'Bureau_etude_R_D': u'Bureau d\'Etudes/R & D/BTP archi/conception',
'Commercial_technico_com': u'Commercial - Technico-Commercial',
'Commercial_particulier': u'Commercial auprès des particuliers',
'Commercial_professionnel': u'Commercial auprès des professionnels',
'Commercial_vendeur': u'Commercial-Vendeur en magasin',
'Compta_gestion_finance_audit': u'Compta/Gestion/Finance/Audit',
'Dir_resp_centre_profit': u'Direction/Resp. Co. et Centre de Profit',
'Import_export_inter': u'Import/Export/International',
'Informatique_dev_hard': u'Informatique - Dével. Hardware',
'Informatique_dev': u'Informatique - Développement',
'Informatique_syst_info': u'Informatique - Systèmes d\'Information',
'Informatique_syst_reseaux': u'Informatique - Systèmes/Réseaux',
'Ingenierie_agro_agri': u'Ingénierie - Agro/Agri',
'Ingenierie_chimie_pharma_bio': u'Ingénierie - Chimie/Pharmacie/Bio.',
'Ingenierie_electro_tech': u'Ingénierie - Electro-tech./Automat.',
'Ingenierie_meca_aero': u'Ingénierie - Mécanique/Aéron.',
'Ingenierie_telecom': u'Ingénierie - Telecoms/Electronique',
'Juridique_droit': u'Juridique/Droit',
'Logistique_metiers_transport': u'Logistique/Métiers du Transport',
'Marketing_com_graphisme': u'Marketing/Communication/Graphisme',
'Dir_management_resp': u'Métiers de la distribution - Management/Resp.',
'Metiers_fonction_publique': u'Métiers de la Fonction Publique',
'Negociation_gest_immo': u'Négociation/Gestion immobilière',
'Production_gestion': u'Production - Gestion/Maintenance',
'Production_operateur': u'Production - Opérateur/Manoeuvre',
'Qualite_securite_environnement': u'Qualité/Hygiène/Sécurité/Environnement',
'Restauration_hotellerie_tourisme': u'Restauration/Tourisme/Hôtellerie/Loisirs',
'RH_Personnel_Formation': u'RH/Personnel/Formation',
'Sante_social': u'Santé/Social',
'SAV_Hotline': u'SAV/Hotline/Téléconseiller',
'Services_pers_entreprises': u'Services à la personne/aux entreprises',
}.items())])
secteur_choices = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
'': u'Indifferent',
'Agri_peche': u'Agriculture/Pêche',
'Banq_assur_finan': u'Banque/Assurance/Finance',
'BTP': u'BTP',
'Distrib_commerce': u'Distribution/Commerce de gros',
'Enseign_forma': u'Enseignement/Formation',
'Immo': u'Immobilier',
'Ind_aero': u'Industrie Aéronautique/Aérospatial',
'Ind_agro': u'Industrie Agro-alimentaire',
'Ind_auto_meca_nav': u'Industrie Auto/Meca/Navale',
'Ind_hightech_telecom': u'Industrie high-tech/Telecom',
'Ind_manufact': u'Industrie Manufacturière',
'Ind_petro': u'Industrie Pétrolière/Pétrochimie',
'Ind_pharma_bio_chim': u'Industrie Pharmaceutique/Biotechn./Chimie',
'Media_internet_com': u'Média/Internet/Communication',
'Resto': u'Restauration',
'Sante_social': u'Santé/Social/Association',
'Energie_envir': u'Secteur Energie/Environnement',
'Inform_SSII': u'Secteur informatique/SSII',
'Serv_public_autre': u'Service public autres',
'Serv_public_collec_terri': u'Service public des collectivités territoriales',
'Serv_public_etat': u'Service public d\'état',
'Serv_public_hosp': u'Service public hospitalier',
'Serv_entreprise': u'Services aux Entreprises',
'Serv_pers_part': u'Services aux Personnes/Particuliers',
'Tourism_hotel_loisir': u'Tourisme/Hôtellerie/Loisirs',
'Transport_logist': u'Transport/Logistique',
}.items())])
experience_choices = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
' ': u'Indifferent',
'Inf_1': u'- 1 an',
'1_7': u'1 à 7 ans',
'Sup_7': u'+ 7 ans',
}.items())])
contract_choices = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
'': u'Tous types de contrat',
'CDD': u'CDD',
'CDI': u'CDI',
'Stage': u'Stage',
'Travail_temp': u'Travail temporaire',
'Alternance': u'Alternance',
'Independant': u'Indépendant',
'Franchise': u'Franchise',
}.items())])
qualification_choice = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
'': u'Indifferent',
'BEP_CAP': u'BEP/CAP',
'Employe_Operateur': u'Employé/Opérateur/Ouvrier Spe/Bac',
'Technicien_B2': u'Technicien/Employé Bac +2',
'Agent_maitrise_B3': u'Agent de maîtrise/Bac +3/4',
'Ingenieur_B5': u'Ingénieur/Cadre/Bac +5',
'Cadre_dirigeant': u'> Bac + 5 (cadre dirigeant)',
}.items())])
enterprise_type_choice = OrderedDict([(k, u'%s' % (v)) for k, v in sorted({
'': u'Tous types d\'entreprises',
'Cabinet_recr': u'Cabinets de recrutement',
'Entreprises': u'Entreprises',
'SSII': u'SSII',
'Travail_temporaire': u'Travail temporaire',
}.items())])
CONFIG = BackendConfig(Value('website', label=u'Region', choices=website_choices),
Value('place', label='Place', masked=False, default=''),
Value('metier', label='Job name', masked=False, default=''),
Value('fonction', label=u'Fonction', choices=fonction_choices, default=''),
Value('secteur', label=u'Secteur', choices=secteur_choices, default=''),
Value('contract', label=u'Contract', choices=contract_choices, default=''),
Value('experience', label=u'Experience', choices=experience_choices, default=''),
Value('qualification', label=u'Qualification', choices=qualification_choice, default=''),
Value('enterprise_type', label=u'Enterprise type',
choices=enterprise_type_choice, default=''))
def create_default_browser(self):
return self.create_browser(self.config['website'].get())
def search_job(self, pattern=''):
return self.browser.search_job(pattern=pattern)
def advanced_search_job(self):
return self.browser.search_job(pattern=self.config['metier'].get(),
fonction=self.config['fonction'].get(),
secteur=self.config['secteur'].get(),
contract=self.config['contract'].get(),
experience=self.config['experience'].get().strip(),
qualification=self.config['qualification'].get(),
enterprise_type=self.config['enterprise_type'].get(),
place=self.config['place'].get())
def get_job_advert(self, _id, advert=None):
return self.browser.get_job_advert(_id, advert)
def fill_obj(self, advert, fields):
return self.get_job_advert(advert.id, advert)
OBJECTS = {BaseJobAdvert: fill_obj}
| lgpl-3.0 | 2,018,595,079,303,210,200 | 47.52381 | 116 | 0.619671 | false |
dwavesystems/dimod | tests/test_variables.py | 1 | 6327 | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc as abc
import decimal
import fractions
import itertools
import unittest
import numpy as np
from parameterized import parameterized_class
from dimod.variables import Variables
class TestAppend(unittest.TestCase):
def test_conflict(self):
variables = Variables()
variables._append(1)
variables._append() # should take the label 0
variables._append()
self.assertEqual(variables, [1, 0, 2])
class TestDuplicates(unittest.TestCase):
def test_duplicates(self):
# should have no duplicates
variables = Variables(['a', 'b', 'c', 'b'])
self.assertEqual(list(variables), ['a', 'b', 'c'])
def test_count(self):
variables = Variables([1, 1, 1, 4, 5])
self.assertEqual(list(variables), [1, 4, 5])
for v in range(10):
if v in variables:
self.assertEqual(variables.count(v), 1)
else:
self.assertEqual(variables.count(v), 0)
def test_len(self):
variables = Variables('aaaaa')
self.assertEqual(len(variables), 1)
def test_unlike_types_eq_hash(self):
zeros = [0, 0.0, np.int8(0), np.float64(0),
fractions.Fraction(0), decimal.Decimal(0)]
for perm in itertools.permutations(zeros, len(zeros)):
variables = Variables(perm)
self.assertEqual(len(variables), len(set(zeros)))
class TestIndex(unittest.TestCase):
def test_permissive(self):
variables = Variables()
with self.assertRaises(ValueError):
variables.index(0)
self.assertEqual(variables.index(0, permissive=True), 0)
self.assertEqual(variables.index(0, permissive=True), 0)
self.assertEqual(variables.index('a', permissive=True), 1)
class TestPop(unittest.TestCase):
def test_empty(self):
with self.assertRaises(IndexError):
Variables()._pop()
def test_simple(self):
variables = Variables('abc')
self.assertEqual(variables._pop(), 'c')
self.assertEqual(variables, 'ab')
class TestPrint(unittest.TestCase):
def test_pprint(self):
import pprint
variables = Variables(range(10))
variables._append('a') # make not range
string = pprint.pformat(variables, width=20)
target = '\n'.join(
["Variables([0,",
" 1,",
" 2,",
" 3,",
" 4,",
" 5,",
" 6,",
" 7,",
" 8,",
" 9,",
" 'a'])"])
self.assertEqual(string, target)
def test_repr_empty(self):
variables = Variables()
self.assertEqual(repr(variables), 'Variables()')
def test_repr_mixed(self):
variables = Variables('abc')
self.assertEqual(repr(variables), "Variables(['a', 'b', 'c'])")
def test_repr_range(self):
self.assertEqual(repr(Variables(range(10))),
'Variables({!r})'.format(list(range(10))))
self.assertEqual(repr(Variables(range(11))), 'Variables(range(0, 11))')
class TestRelabel(unittest.TestCase):
def test_permissive(self):
variables = Variables([0, 1])
# relabels a non-existant variable 2
variables._relabel({0: 'a', 1: 'b', 2: 'c'})
self.assertEqual(variables, Variables('ab'))
def test_swap(self):
variables = Variables([1, 0, 3, 4, 5])
variables._relabel({5: 3, 3: 5})
self.assertEqual(variables, [1, 0, 5, 4, 3])
@parameterized_class(
[dict(name='list', iterable=list(range(5))),
dict(name='string', iterable='abcde'),
dict(name='range', iterable=range(5)),
dict(name='range_reversed', iterable=range(4, -1, -1)),
dict(name='range_start', iterable=range(2, 7)),
dict(name='range_step', iterable=range(0, 10, 2)),
dict(name='mixed', iterable=[0, ('b',), 2.1, 'c', frozenset('d')]),
dict(name='floats', iterable=[0., 1., 2., 3., 4.]),
],
class_name_func=lambda cls, i, inpt: '%s_%s' % (cls.__name__, inpt['name'])
)
class TestIterable(unittest.TestCase):
def test_contains_unhashable(self):
variables = Variables(self.iterable)
self.assertFalse([] in variables)
def test_count_unhashable(self):
variables = Variables(self.iterable)
self.assertEqual(variables.count([]), 0)
def test_index(self):
variables = Variables(self.iterable)
for idx, v in enumerate(self.iterable):
self.assertEqual(variables.index(v), idx)
def test_iterable(self):
variables = Variables(self.iterable)
self.assertEqual(list(variables), list(self.iterable))
def test_equality(self):
variables = Variables(self.iterable)
self.assertEqual(variables, self.iterable)
def test_len(self):
variables = Variables(self.iterable)
self.assertEqual(len(variables), len(self.iterable))
def test_relabel_conflict(self):
variables = Variables(self.iterable)
iterable = self.iterable
# want a relabelling with identity relabels and that maps to the same
# set of labels as the original
target = [iterable[-i] for i in range(len(iterable))]
mapping = dict(zip(iterable, target))
variables._relabel(mapping)
self.assertEqual(variables, target)
def test_relabel_not_hashable(self):
variables = Variables(self.iterable)
mapping = {v: [v] for v in variables}
with self.assertRaises(ValueError):
variables._relabel(mapping)
| apache-2.0 | 2,359,141,378,282,986,000 | 31.446154 | 79 | 0.599178 | false |
micolous/cfsprinter | src/pagerprinter/plugins/skypesms.py | 1 | 2611 | #!/usr/bin/env python
"""
Skype SMS plugin for pagerprinter.
Copyright 2011-2013 Michael Farrell <http://micolous.id.au/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Skype documentation that isn't behind a wall, because Skype have some
funny ideas about the definition of "open source projects", that they
can demand you to sign up for a developer programme when you really
don't need to:
<http://skype4py.sourceforge.net/doc/html/>
Library download:
<http://sourceforge.net/projects/skype4py/files/>
Note that the API is entirely un-pythonic, so be careful. It seems
like .NET coding conventions.
If you're finding yourself sending a lot of messages, sign up for a
proper SMS gateway. It's cheaper and doesn't require Skype to be
running.
"""
from __future__ import absolute_import
from . import BasePlugin
try:
from Skype4Py import Skype, smsMessageTypeOutgoing
except ImportError:
print "NOTICE: skypesms plugin requires Skype4Py to be installed"
print "http://sourceforge.net/projects/skype4py/"
PLUGIN = None
else:
# make our plugin!
class SkypePlugin(BasePlugin):
def __init__(self):
print "Attempting to connect to Skype API. If Python crashes, this"
print "could mean that Skype isn't running at the moment."
print ""
print "(There's no way around this at present -- Skype's Python"
print "libraries suck. It also this seems to crash all the time"
print "on OSX.)"
# connect to skype
self.skype = Skype()
# skype4py is quite un-pythonic, with it's method names.
self.skype.Attach()
# by now skype4py would crash if skype isn't running.
def configure(self, c):
# read in phone numbers we need
self.numbers = [
x.strip()
for x
in c.get('skypesms', 'to').lower().split(',')
]
def execute(self, msg, unit, address, when, printer, print_copies):
# lets just send the whole message verbatim.
sms = self.skype.CreateSms(smsMessageTypeOutgoing, *self.numbers)
sms.Body = "%s: %s - %s" % (when.ctime(), msg, unit)
sms.Send()
PLUGIN = SkypePlugin
| gpl-3.0 | 4,111,728,540,101,125,000 | 32.474359 | 71 | 0.733435 | false |
lmazuel/azure-sdk-for-python | azure-batch/azure/batch/models/application_get_options.py | 1 | 1743 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGetOptions(Model):
"""Additional parameters for get operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None):
super(ApplicationGetOptions, self).__init__()
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
| mit | -6,436,945,429,809,833,000 | 43.692308 | 106 | 0.659208 | false |
Open-Transport/synthese | legacy/tools/synthesepy/db_backends_unittest.py | 1 | 1294 | # Database backends unit tests.
# @file db_backends_unittest.py
# @author Sylvain Pasche
#
# This file belongs to the SYNTHESE project (public transportation specialized software)
# Copyright (C) 2002 Hugues Romain - RCSmobility <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import unittest
from synthesepy import db_backends
class DummyEnv(object):
pass
class TestDBBackend(unittest.TestCase):
def test_create_backend(self):
backend = db_backends.create_backend(DummyEnv(), 'dummy://')
self.assertEqual(backend.name, 'dummy')
| gpl-2.0 | -3,998,989,573,430,954,000 | 35.971429 | 91 | 0.727975 | false |
Vgr255/Wolfbot | settings/wolfgame.py | 1 | 23664 | import botconfig
#####################################################################################
PING_WAIT = 300 # seconds #
PING_MIN_WAIT = 30 # amount of time between first !join and !ping can be allowed #
MINIMUM_WAIT = 60 # amount of time the players must wait between the first !join and !start #
EXTRA_WAIT = 20 # amount of time !wait adds before starting the game #
EXTRA_WAIT_JOIN = 0 # Add this many seconds to the waiting time for each !join (unusued yet) #
WAIT_AFTER_JOIN = 10 # Wait at least this many seconds after the last join (still yet to implement) #
MAXIMUM_WAITED = 3 # limit for amount of !wait's #
STATS_RATE_LIMIT = 15 # time between two !stats #
VOTES_RATE_LIMIT = 15 # time between two !votes #
ADMINS_RATE_LIMIT = 300 # time between two !admins #
GSTATS_RATE_LIMIT = 0 # time to wait for each !gamestats #
PSTATS_RATE_LIMIT = 0 # time to wait for each !player #
TIME_RATE_LIMIT = 30 # time to wait for each !time #
SHOTS_MULTIPLIER = .16 # ceil(shots_multiplier * len_players) = bullets given #
MOLOTOV_AMOUNT = .11 # ceil(molotov_ammount * len_players) = molotovs given #
MIN_PLAYERS = 4 # minimum amount of players needed to start a game #
MAX_PLAYERS = 30 # maximum amount of players allowed #
DRUNK_SHOTS_MULTIPLIER = 3 # drunk gets more bullets #
DRUNK_FIRE_MULTIPLIER = 5 # drunk gets way more molotovs. but he can die as easily #
NIGHT_TIME_WARN = 90 # should be less than NIGHT_TIME_LIMIT #
NIGHT_TIME_LIMIT = 120 # night ends after x seconds (default is 120) #
DAY_TIME_LIMIT_WARN = 480 # warns before the day changes #
DAY_TIME_LIMIT_CHANGE = 120 # seconds after DAY_TIME_LIMIT_WARN has passed #
JOIN_TIME_LIMIT = 1800 # amount of time (in seconds) before game is cancelled after first join #
SHORT_DAY_PLAYERS = 6 # Number of players left to have a short day #
SHORT_DAY_LIMIT_WARN = 180 # same as above, except for small days. only set if above is also set #
SHORT_DAY_LIMIT_CHANGE = 120 # same as above, except for small days #
START_WITH_DAY = False # obviously, does game starts with day? #
WOLF_STEALS_GUN = True # if True, gun will be handed to a random wolf/traitor/werecrow when gunner dies #
WOLF_STEALS_FIRE = True # same, but for the arsonist instead #
KILL_IDLE_TIME = 300 # amount of seconds before the player is removed from the game #
WARN_IDLE_TIME = 180 # warns after x seconds, before the player is removed from the game #
PART_GRACE_TIME = 30 # amount of seconds the bot waits before removing when user /parts #
QUIT_GRACE_TIME = 30 # amount of seconds the bot waits before removing when user /quits #
MIN_LOG_PLAYERS = 12 # number of players needed to disable logging (reducing lag) #
MAX_PRIVMSG_TARGETS = 1 # better not touch that... #
LEAVE_STASIS_PENALTY = 0 # number of games user is not allowed to join if they !leave #
IDLE_STASIS_PENALTY = 0 # same, if they idle out #
PART_STASIS_PENALTY = 0 # same but for /part instead #
SELF_LYNCH_ALLOWED = True # can you lynch yourself? #
GOAT_HERDER = True # new role? not sure #
HIDDEN_TRAITOR = True # something about hiding the traitor, making it look like a villager? #
CANT_KILL_TRAITOR = True # Can the wolves kill the traitor? #
CARE_BOLD = False # determines if the bot cares about bolds in channel #
CARE_COLOR = False # same, except for color #
KILL_COLOR = False # does the bot kick you for using color #
KILL_BOLD = False # same, but for bold #
CARE_ADVERTISING = False # warns any message containing a '#' in it (advertising, hashtag, etc) #
KILL_ADVERTISING = False # kicks on advertising #
EXEMPT_ADMINS = True # doesn't kick admins/owners #
BAN_AFTER_KICKS = True # decide whether user will be banned/quieted after being kicked #
TIME_BEFORE_UNSET = 30 # amount of time (in seconds) before user is un-banned/quieted #
BAN_TYPE = "q" # should be either q or b (WITHOUT the +) to decide between ban or quiet #
AUTO_OP_FLAG = True # used to decide whether the bot will send /msg ChanServ op on connect #
AUTO_OP_FAIL = False # if set to True it will send an error to the channel upon connecting #
RAW_JOIN = True # allow to join other chans than the default one #
LOG_CHAN = False # logs activity in botconfig.ADMIN_CHAN #
LOG_AUTO_TOGGLE = True # automatically disables logging if there are too many players #
AUTO_LOG_TOGGLE = False # automatically toggle logging when an admin gets in the admin_chan #
MINIMALIST_LOG = True # only displays sensible commands. only useful if LOG_CHAN = False #
EXT_PING = "" # external pinging in the special channel. leave blank to disable it #
MAX_ERRORS = 4 # max amount of errors that can happen before the bot quits #
USE_IDENT = False # defines if should use ident along with host for !ping and similar #
ALLOW_GIT = True # joins the development channel and automatically fetches commits #
AUTO_OP_DEOP = True # determines if bot ops and deops chanops on start and endgame #
#####################################################################################
LOG_FILENAME = ""
BARE_LOG_FILENAME = ""
# HIT MISS SUICIDE
GUN_CHANCES = ( 5/7 , 1/7 , 1/7 )
DRUNK_GUN_CHANCES = ( 3/7 , 3/7 , 1/7 )
MANSLAUGHTER_CHANCE = 1/5 # ACCIDENTAL HEADSHOT (FATAL)
# SUCCESS MISS SUICIDE
FIRE_CHANCES = ( 3/7 , 3/7 , 1/7 )
DRUNK_FIRE_CHANCES = ( 2/7 , 2/7 , 3/7 )
GUNNER_KILLS_WOLF_AT_NIGHT_CHANCE = 7/10
PYRO_KILLS_WOLF_AT_NIGHT_CHANCE = 4/5
GUARDIAN_ANGEL_DIES_CHANCE = 1/2
DETECTIVE_REVEALED_CHANCE = 2/5
#########################################################################################################################
# ROLE INDEX: PLAYERS SEER WOLF CURSED DRUNK HARLOT TRAITOR GUNNER CROW ANGEL DETECTIVE PYRO ##
#########################################################################################################################
ROLES_GUIDE = { 4 : ( 0 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
5 : ( 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
6 : ( 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
7 : ( 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
8 : ( 1 , 2 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
9 : ( 1 , 2 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
10 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 ), ##
11 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 ), ##
12 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 , 0 ), ##
13 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 , 0 ), ##
14 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
15 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
16 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
17 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
18 : ( 1 , 3 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
19 : ( 1 , 3 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
20 : ( 1 , 3 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
21 : ( 2 , 4 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
22 : ( 2 , 4 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
23 : ( 2 , 4 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
24 : ( 2 , 4 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
25 : ( 2 , 4 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 ), ##
26 : ( 2 , 5 , 1 , 1 , 1 , 1 , 2 , 1 , 1 , 1 , 0 ), ##
27 : ( 2 , 5 , 1 , 1 , 1 , 1 , 2 , 1 , 1 , 1 , 0 ), ##
28 : ( 2 , 5 , 1 , 1 , 1 , 1 , 2 , 1 , 1 , 1 , 0 ), ##
29 : ( 2 , 5 , 1 , 1 , 1 , 1 , 2 , 1 , 1 , 1 , 0 ), ##
30 : ( 2 , 5 , 1 , 1 , 1 , 1 , 2 , 1 , 1 , 1 , 0 ), ##
None : ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )} ##
#########################################################################################################################
# Notes: It is not needed to have a line for every combination, but it helps when you want to tweak a bit ##
# Notes: If one line is not specified (aka left out, doesn't appear) it will consider the next lower one ##
#########################################################################################################################
GAME_MODES = {}
AWAY = [] # cloaks of people who are away.
PING_IN = [] # cloaks of people who used !in to get in the ping list. works only with botconfig.REVERSE_PING set to True
SIMPLE_NOTIFY = [] # cloaks of people who !simple, who want everything /notice'd
ROLE_INDICES = {0 : "seer",
1 : "wolf",
2 : "cursed villager",
3 : "village drunk",
4 : "harlot",
5 : "traitor",
6 : "gunner",
7 : "werecrow",
8 : "guardian angel",
9 : "detective",
10: "arsonist"}
INDEX_OF_ROLE = dict((v,k) for k,v in ROLE_INDICES.items())
NO_VICTIMS_MESSAGES = ("The body of a young penguin pet is found.",
"A pool of blood and wolf paw prints are found.",
"Traces of wolf fur are found.")
LYNCH_MESSAGES = ("The villagers, after much debate, finally decide on lynching \u0002{0}\u0002, who turned out to be... a \u0002{1}\u0002.",
"Under a lot of noise, the pitchfork-bearing villagers lynch \u0002{0}\u0002, who turned out to be... a \u0002{1}\u0002.",
"The villagers drag the poor \u0002{0}\u0002 to the tree on the edge of the village. The poor guy was... a \u0002{1}\u0002.",
"The mob drags a protesting \u0002{0}\u0002 to the hanging tree. S/He succumbs to the will of the horde, and is hanged. It is discovered (s)he was a \u0002{1}\u0002.",
"Resigned to his/her fate, \u0002{0}\u0002 is led to the gallows. After death, it is discovered (s)he was a \u0002{1}\u0002.")
RULES = (botconfig.CHANNEL + " channel rules:\n"+
"1) Do not share information after death. "+
"2) Do not play with bots or clones. "+
"3) Do not quit unless you need to leave. "+
"4) Do not paste messages from the bot during the game. "+
"5) Do not ping people unless they have played recently.\n"+
"6) Do not advertise another channel or network. "+
"7) Do not take advantage of a player timing out. "+
"8) Using anti-idle messages or /whois idle times \u0002IS\u0002 cheating. "+
"9) If you are unsure whether you can do something or not, ask an operator. "+
"10) Channel and bot operators have the final word.")
is_role = lambda plyr, rol: rol in ROLES and plyr in ROLES[rol]
def plural(role):
if role == "wolf": return "wolves"
elif role == "person": return "people"
else: return role + "s"
def list_players():
pl = []
burnt = []
for burned in BURNED: # burned players' roles still appear, but they mustn't be marked as alive
burnt.append(burned)
for x in ROLES.values():
if x in burnt:
continue
pl.extend(x)
return pl
def list_players_and_roles():
plr = {}
for x in ROLES.keys():
for p in ROLES[x]:
plr[p] = x
return plr
def get_reveal_role(nick):
if HIDDEN_TRAITOR and get_role(nick) == "traitor":
return "villager"
else:
return get_role(nick)
get_role = lambda plyr: list_players_and_roles()[plyr]
def del_player(pname):
prole = get_role(pname)
ROLES[prole].remove(pname)
class InvalidModeException(Exception): pass
def game_mode(name):
def decor(c):
GAME_MODES[name] = c
return c
return decor
CHANGEABLE_ROLES = { "seers" : INDEX_OF_ROLE["seer"],
"wolves" : INDEX_OF_ROLE["wolf"],
"cursed" : INDEX_OF_ROLE["cursed villager"],
"drunks" : INDEX_OF_ROLE["village drunk"],
"harlots" : INDEX_OF_ROLE["harlot"],
"traitors" : INDEX_OF_ROLE["traitor"],
"gunners" : INDEX_OF_ROLE["gunner"],
"werecrows" : INDEX_OF_ROLE["werecrow"],
"angels" : INDEX_OF_ROLE["guardian angel"],
"detectives" : INDEX_OF_ROLE["detective"],
"arsonists" : INDEX_OF_ROLE["arsonist"]}
# TODO: implement game modes
@game_mode("roles")
class ChangedRolesMode(object):
"""Example: !fgame roles=wolves:1,seers:0,angels:1"""
def __init__(self, arg):
self.ROLES_GUIDE = ROLES_GUIDE.copy()
lx = list(ROLES_GUIDE[None])
pairs = arg.split(",")
pl = list_players()
if not pairs:
raise InvalidModeException("Invalid syntax for mode roles.")
for pair in pairs:
change = pair.split(":")
if len(change) != 2:
raise InvalidModeException("Invalid syntax for mode roles.")
role, num = change
try:
num = int(num)
try:
lx[CHANGEABLE_ROLES[role.lower()]] = num
except KeyError:
raise InvalidModeException(("The role \u0002{0}\u0002 "+
"is not valid.").format(role))
except ValueError:
raise InvalidModeException("A bad value was used in mode roles.")
for k in ROLES_GUIDE.keys():
self.ROLES_GUIDE[k] = tuple(lx)
# Persistence
# Load saved settings
import sqlite3
import os
conn = sqlite3.connect("data.sqlite3", check_same_thread = False)
with conn:
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS away (nick TEXT)') # whoops, i mean cloak, not nick
c.execute('CREATE TABLE IF NOT EXISTS simple_role_notify (cloak TEXT)') # people who understand each role
c.execute('SELECT * FROM away')
for row in c:
AWAY.append(row[0])
c.execute('SELECT * FROM simple_role_notify')
for row in c:
SIMPLE_NOTIFY.append(row[0])
# populate the roles table
c.execute('DROP TABLE IF EXISTS roles')
c.execute('CREATE TABLE roles (id INTEGER PRIMARY KEY AUTOINCREMENT, role TEXT)')
for x in ["villager"]+list(ROLE_INDICES.values()):
c.execute("INSERT OR REPLACE INTO roles (role) VALUES (?)", (x,))
c.execute(('CREATE TABLE IF NOT EXISTS rolestats (player TEXT, role TEXT, '+
'teamwins SMALLINT, individualwins SMALLINT, totalgames SMALLINT, '+
'UNIQUE(player, role))'))
c.execute(('CREATE TABLE IF NOT EXISTS gamestats (size SMALLINT, villagewins SMALLINT, ' +
'wolfwins SMALLINT, totalgames SMALLINT, UNIQUE(size))'))
#def remove_away(clk):
# with conn:
# c.execute('DELETE from away where nick=?', (clk,))
#def add_away(clk):
# with conn:
# c.execute('INSERT into away VALUES (?)', (clk,))
#def add_ping(clk):
# with conn:
# c.execute('INSERT into ping VALUES (?)', (clk,))
#def remove_ping(clk):
# with conn:
# c.execute('DELETE from ping where nick=?', (clk,))
#def remove_simple_rolemsg(clk):
# with conn:
# c.execute('DELETE from simple_role_notify where cloak=?', (clk,))
#def add_simple_rolemsg(clk):
# with conn:
# c.execute('INSERT into simple_role_notify VALUES (?)', (clk,))
def update_role_stats(acc, role, won, iwon):
with conn:
wins, iwins, total = 0, 0, 0
c.execute(("SELECT teamwins, individualwins, totalgames FROM rolestats "+
"WHERE player=? AND role=?"), (acc, role))
row = c.fetchone()
if row:
wins, iwins, total = row
if won:
wins += 1
if iwon:
iwins += 1
total += 1
c.execute("INSERT OR REPLACE INTO rolestats VALUES (?,?,?,?,?)",
(acc, role, wins, iwins, total))
def update_game_stats(size, winner):
with conn:
vwins, wwins, total = 0, 0, 0
c.execute("SELECT villagewins, wolfwins, totalgames FROM gamestats "+
"WHERE size=?", (size,))
row = c.fetchone()
if row:
vwins, wwins, total = row
if winner == "wolves":
wwins += 1
elif winner == "villagers":
vwins += 1
total += 1
c.execute("INSERT OR REPLACE INTO gamestats VALUES (?,?,?,?)",
(size, vwins, wwins, total))
def get_player_stats(acc, role):
if role.lower() not in ["villager"] + [v.lower() for k, v in ROLE_INDICES.items()]:
return "No such role: {0}".format(role)
with conn:
c.execute("SELECT player FROM rolestats WHERE player LIKE ? COLLATE NOCASE", (acc,))
player = c.fetchone()
if player:
for row in c.execute("SELECT * FROM rolestats WHERE player=? COLLATE NOCASE AND role=? COLLATE NOCASE", (acc, role)):
msg = "\u0002{0}\u0002 as \u0002{1}\u0002 | Team wins: {2} (%d%%), Individual wins: {3} (%d%%), Total games: {4}".format(*row)
return msg % (round(row[2]/row[4] * 100), round(row[3]/row[4] * 100))
else:
return "No stats for {0} as {1}.".format(player[0], role)
return "{0} has not played any games.".format(acc)
def get_player_totals(acc):
role_totals = []
with conn:
c.execute("SELECT player FROM rolestats WHERE player LIKE ? COLLATE NOCASE", (acc,))
player = c.fetchone()
if player:
c.execute("SELECT role, totalgames FROM rolestats WHERE player=? COLLATE NOCASE", (acc,))
rows = c.fetchall()
total = 0
for row in rows:
total += row[1]
for row in rows:
role_totals.append("\u0002{row[0]}\u0002: {row[1]} ({prct:.2%})".format(row=row, prct=row[1]/total))
return "\u0002{0}\u0002's totals | \u0002{1} total games\u0002 | {2}".format(player[0], total, ", ".join(role_totals))
else:
return "{0} has not played any games.".format(acc)
def get_game_stats(size):
with conn:
for row in c.execute("SELECT * FROM gamestats WHERE size=?", (size,)):
msg = "\u0002{0}\u0002 player games | Village wins: {1} (%d%%), Wolf wins: {2} (%d%%), Total games: {3}".format(*row)
return msg % (round(row[1]/row[3] * 100), round(row[2]/row[3] * 100))
else:
return "No stats for \u0002{0}\u0002 player games.".format(size)
def get_game_totals():
size_totals = []
total = 0
with conn:
for size in range(MIN_PLAYERS, MAX_PLAYERS + 1):
c.execute("SELECT size, totalgames FROM gamestats WHERE size=?", (size,))
row = c.fetchone()
if row:
size_totals.append("\u0002{0}p\u0002: {1}".format(*row))
total += row[1]
if len(size_totals) == 0:
return "No games have been played."
else:
return "Total games ({0}) | {1}".format(total, ", ".join(size_totals))
| bsd-2-clause | 7,256,666,831,428,034,000 | 55.748201 | 185 | 0.440585 | false |
thelectronicnub/redditswapbot | util/flair_sql_import.py | 1 | 1111 | #!/usr/bin/env python2
import sys, os
import json
import argparse
import sqlite3 as lite
con = None
def extant_file(x):
if not os.path.exists(x):
raise argparse.ArgumentError("{0} does not exist".format(x))
return x
def main():
parser = argparse.ArgumentParser(description="Import flairs")
parser.add_argument("-f", "--file", dest="filename", help="json input file", metavar="FILE", type=extant_file, required=True)
args = parser.parse_args()
try:
con = lite.connect('flair.db')
except lite.Error, e:
print "Error %s:" % e.args[0]
sys.exit(1)
curs = con.cursor()
curs.execute('''CREATE TABLE IF NOT EXISTS flair (
username TEXT PRIMARY KEY NOT NULL ,
flair_text TEXT,
flair_css_class TEXT,
lastpost timestamp,
lastpostid TEXT,
lastid TEXT DEFAULT ''
)''')
flair_json = json.load(open(args.filename))
curs.executemany('INSERT INTO flair (username, flair_text, flair_css_class) VALUES (:user, :flair_text, :flair_css_class)', flair_json)
con.commit()
if con:
con.close()
if __name__ == "__main__":
main()
| gpl-3.0 | -6,648,211,901,216,044,000 | 22.638298 | 139 | 0.648065 | false |
lesina/labs2016 | Laba19/G.py | 1 | 1494 | def makeAdjacencyMatrix():
for i in range(size):
adjacency_matrix.append([1000000] * size)
for i in range(n):
vertex1, vertex2, weight = list(map(int, input().split()))
adjacency_matrix[vertex1][vertex2] = weight
adjacency_matrix[vertex2][vertex1] = weight
def Dexter(size, adjacency_matrix, start = 0):
valid = [True] * size
weight = [1000000] * size
weight[start] = 0
for i in range(size):
min_weight = 1000001
ID_min_weight = -1
for i in range(size):
if valid[i] and weight[i] < min_weight:
min_weight = weight[i]
ID_min_weight = i
for i in range(size):
if weight[ID_min_weight] + adjacency_matrix[ID_min_weight][i] < weight[i]:
weight[i] = weight[ID_min_weight] + adjacency_matrix[ID_min_weight][i]
valid[ID_min_weight] = False
return weight
def returnPath(retStart):
Path.append(retStart)
newWeight = weight[retStart]
for j in range(size):
for i in range(size):
if (newWeight - adjacency_matrix[i][retStart] == weight[i]):
newWeight -= adjacency_matrix[i][retStart]
retStart = i
Path.append(i)
Path = []
adjacency_matrix = []
size, n, start, end = list(map(int, input().split()))
makeAdjacencyMatrix()
weight = Dexter(size, adjacency_matrix, start)
print(weight[end])
# retStart = int(input())
# returnPath(retStart)
# print(*Path) | gpl-3.0 | 6,825,533,243,467,289,000 | 30.808511 | 86 | 0.591031 | false |
RAPD/RAPD | src/old_agents/rapd_agent_integrate.py | 1 | 129537 | """
RAPD agent for fast integration with XDS
"""
__license__ = """
This file is part of RAPD
Copyright (C) 2011-2018, Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2011-06-29"
__maintainer__ = "David Neau"
__email__ = "[email protected]"
__status__ = "Production"
# This is an active rapd agent
RAPD_AGENT = True
# This handler's request type
AGENT_TYPE = "INTEGRATE"
AGENT_SUBTYPE = "CORE"
# A unique UUID for this handler (uuid.uuid1().hex)
ID = "bd11f4401eaa11e697c3ac87a3333966"
VERSION = "2.0.0"
# Standard imports
from distutils.spawn import find_executable
import logging
import logging.handlers
import math
from multiprocessing import Process
import os
# import os.path
from pprint import pprint
# import shutil
import stat
import subprocess
import sys
import threading
import time
import numpy
# RAPD imports
from subcontractors.xdsme.xds2mos import Xds2Mosflm
from utils.communicate import rapd_send
from subcontractors.stats import AutoStats
import utils.text as text
from utils.text import json
from bson.objectid import ObjectId
import utils.xutils as Utils
# Import smartie.py from the installed CCP4 package
# smartie.py is a python script for parsing log files from CCP4
sys.path.append(os.path.join(os.environ["CCP4"], "share", "smartie"))
import smartie
# Software dependencies
VERSIONS = {
"aimless": (
"version 0.5.23",
"version 0.5.29",
"version 0.5.31",
),
"freerflag": (
"version 2.2",
),
"gnuplot": (
" 5.0 ",
),
"mtz2various": (
"version 1.1",
),
"pointless": (
"version 1.10.19",
"version 1.10.28",
),
"truncate": (
"version 7.0.004",
"version 7.0.024",
"version 7.0.028",
),
"xds": (
"VERSION Nov 1, 2016",
),
"xds_par": (
"VERSION Nov 1, 2016",
),
}
def try_float(number, default="NO DEFAULT"):
"""Attempt to cast to a float, but return string if not"""
try:
return float(number)
except ValueError:
if default != "NO DEFAULT":
return default
else:
return number
def try_int(number, default="NO DEFAULT"):
"""Attempt to cast to an int, but return string if not"""
try:
return float(number)
except ValueError:
if default != "NO DEFAULT":
return default
else:
return number
class RapdAgent(Process):
"""
classdocs
command format
{
"command":"INDEX+STRATEGY",
"directories":
{
"data_root_dir":"" # Root directory for the data session
"work":"" # Where to perform the work
},
"image_data":{}, # Image information
["image2":{},] # 2nd image information
"preferences":{} # Settings for calculations
"return_address":("127.0.0.1", 50000) # Location of control process
}
"""
spacegroup = False
low_res = False
hi_res = False
results = {}
def __init__(self, site, command, tprint=False, logger=False):
"""
Initialize the agent
Keyword arguments
site -- full site settings
command -- dict of all information for this agent to run
"""
# Store tprint for use throughout
if tprint:
self.tprint = tprint
# Dead end if no tprint passed
else:
def func(arg=False, level=False, verbosity=False, color=False):
pass
self.tprint = func
# Get the logger Instance
if logger:
self.logger = logger
else:
self.logger = logging.getLogger("RAPDLogger")
self.logger.debug("__init__")
# Store passed-in variables
self.site = site
self.command = command
self.settings = self.command.get("preferences")
self.controller_address = self.command.get("return_address", False)
self.dirs = self.command["directories"]
self.image_data = self.command.get("data").get("image_data")
self.run_data = self.command.get("data").get("run_data")
self.process_id = self.command["process_id"]
self.logger.debug("self.image_data = %s", self.image_data)
if self.settings.get("start_frame", False):
self.image_data["start"] = self.settings.get("start_frame")
else:
self.image_data["start"] = self.run_data.get("start")
# print "self.image_data[\"start\"]", self.image_data["start"]
if self.settings.get("end_frame", False):
self.image_data["total"] = self.settings.get("end_frame") - self.image_data["start"] + 1
else:
self.image_data["total"] = self.run_data.get("total")
# print "self.image_data[\"total\"]", self.image_data["total"]
self.image_data['image_template'] = self.run_data['image_template']
# Check for 2theta tilt:
if 'twotheta' in self.run_data:
self.image_data['twotheta'] = self.run_data['twotheta']
# self.image_data['start'] = self.settings['request']['frame_start']
# self.image_data['total'] = str( int(self.settings['request']['frame_start'])
# + int(self.settings['request']['frame_finish']) - 1)
if self.settings.get('spacegroup', False):
self.spacegroup = self.settings['spacegroup']
if self.settings.get("hi_res", False):
self.hi_res = self.settings.get("hi_res")
if self.settings.get("low_res", False):
self.low_res = self.settings.get("low_res")
if 'multiprocessing' in self.settings:
self.cluster_use = self.settings['multiprocessing']
if self.cluster_use == 'True':
self.cluster_use = True
elif self.cluster_use == 'False':
self.cluster_use = False
else:
self.cluster_use = False
if 'ram_integrate' in self.settings:
self.ram_use = self.settings['ram_integrate']
if self.ram_use == 'True':
self.ram_use = True
elif self.ram_use == 'False':
self.ram_use = False
if self.ram_use == True:
self.ram_nodes = self.settings['ram_nodes']
# ram_nodes is a list containing three lists.
# ram_nodes[0] is a list containing the name of the nodes where
# data was distributed to.
# ram_nodes[1] is a list of the first frame number for the wedge
# of images copied to the corresponding node.
# ram_nodes[2] is a list of the last frame number for the wedge
# of images copied to the corresponding node.
else:
self.ram_nodes = None
else:
self.ram_use = False
self.ram_nodes = None
if 'standalone' in self.settings:
self.standalone = self.settings['standalone']
if self.standalone == 'True':
self.standalone = True
elif self.standalone == 'False':
self.standalone = False
else:
self.standalone = False
if 'work_dir_override' in self.settings:
if (self.settings['work_dir_override'] == True
or self.settings['work_dir_override'] == 'True'):
self.dirs['work'] = self.settings['work_directory']
if 'beam_center_override' in self.settings:
if (self.settings['beam_center_override'] == True
or self.settings['beam_center_override'] == 'True'):
self.image_data['x_beam'] = self.settings['x_beam']
self.image_data['y_beam'] = self.settings['y_beam']
# Some detectord need flipped for XDS
if self.settings.get('flip_beam', False):
x = self.image_data['y_beam']
self.image_data['y_beam'] = self.image_data['x_beam']
self.image_data['x_beam'] = x
self.xds_default = []
# Parameters likely to be changed based on beamline setup.
# Directory containing XDS.INP default files for detectors.
#if os.path.isdir('/home/necat/DETECTOR_DEFAULTS'):
# self.detector_directory = '/home/necat/DETECTOR_DEFAULTS/'
#Also check set_detector_data for other detector dependent values!
# XDS parameters for number of JOBS and PROCESSORS.
# Values are beamline specific, depending on computing resources.
# self.jobs is number of nodes XDS can use for colspot and/or integration.
# self.procs is number of procesors XDS can use per job.
if self.cluster_use == True:
if self.ram_use == True:
self.jobs = len(self.ram_nodes[0])
self.procs = 8
else:
# Set self.jobs and self.procs based on available cluster resources
self.jobs = 20
self.procs = 8
else:
# Setting self.jobs > 1 provides some speed up on
# multiprocessor machines.
# Should be set based on computer used for processing
self.jobs = 1
self.procs = 4
Process.__init__(self, name="FastIntegration")
self.start()
def run(self):
self.logger.debug('Fastintegration::run')
self.preprocess()
self.process()
#self.postprocess()
def preprocess(self):
"""
Things to do before main proces runs.
1. Change to the correct working directory.
2. Read in detector specific parameters.
"""
self.logger.debug('FastIntegration::preprocess')
if os.path.isdir(self.dirs['work']) == False:
os.makedirs(self.dirs['work'])
os.chdir(self.dirs['work'])
self.xds_default = self.createXDSinp(self.settings['xdsinp'])
def process(self):
"""
Things to do in main process:
1. Run integration and scaling.
2. Report integration results.
3. Run analysis of data set.
"""
self.logger.debug('FastIntegration::process')
if not self.command["command"] in ("INTEGRATE", "XDS"):
self.logger.debug('Program did not request an integration')
self.logger.debug('Now Exiting!')
return
xds_input = self.xds_default
if self.command["command"] == 'XDS':
integration_results = self.xds_total(xds_input)
else:
if os.path.isfile(self.last_image) == True:
if self.ram_use == True:
integration_results = self.ram_total(xds_input)
else:
integration_results = self.xds_total(xds_input)
else:
if self.ram_use == True:
integration_results = self.ram_integrate(xds_input)
elif (self.image_data['detector'] == 'ADSC' or
self.cluster_use == False):
integration_results = self.xds_split(xds_input)
else:
integration_results = self.xds_processing(xds_input)
os.chdir(self.dirs['work'])
if integration_results == 'False':
# Do a quick clean up?
pass
else:
final_results = self.finish_data(integration_results)
# Set up the results for return
self.results['process'] = {'agent_process_id': self.process_id,
'status': 100}
self.results['results'] = final_results
self.logger.debug(self.results)
#self.sendBack2(results)
self.write_json(self.results)
self.print_info()
return
# Skip this for now
analysis = self.run_analysis(final_results['files']['mtzfile'], self.dirs['work'])
analysis = 'Success'
if analysis == 'Failed':
self.logger.debug(analysis)
# Add method for dealing with a failure by run_analysis.
pass
elif analysis == 'Success':
self.logger.debug(analysis)
self.results["status"] = "SUCCESS"
self.logger.debug(self.results)
# self.sendBack2(results)
if self.controller_address:
rapd_send(self.controller_address, self.results)
return
def ram_total(self, xdsinput):
"""
This function controls processing by XDS when the complete data
is present and distributed to ramdisks on the cluster
"""
self.logger.debug('Fastintegration::ram_total')
first = int(self.image_data['start'])
last = int(self.image_data['start']) + int(self.image_data['total']) -1
data_range = '%s %s' %(first, last)
dir = 'wedge_%s_%s' %(first, last)
xdsdir = os.path.join(self.dirs['work'], dir)
if os.path.isdir(xdsdir) == False:
os.mkdir(xdsdir)
os.chdir(xdsdir)
# Figure out how many images are on the first node.
# If greater than self.procs, simply set up spot ranges with a number
# of images equal to self.procs from the first and last ram nodes.
# If less than self.procs, reduce self.procs and set up spot ranges
# with all of the images on the first and last ram nodes.
Num_images = self.ram_nodes[2][0] - self.ram_nodes[1][0] + 1
if Num_images < self.procs:
self.procs = Num_images
spot_range = self.ram_nodes[1][0] + self.procs - 1
xdsinp = xdsinput[:]
xdsinp.append('JOB=XYCORR INIT COLSPOT IDXREF DEFPIX INTEGRATE CORRECT\n\n')
# Add the spot ranges.
xdsinp.append('SPOT_RANGE=%s %s\n' %(self.ram_nodes[1][0], spot_range))
# Make sure the last ram node has an adequate number of images available.
spot_range = self.ram_nodes[1][-1] + self.procs - 1
if self.ram_nodes[2][-1] < spot_range:
spot_range = self.ram_nodes[2][-1]
xdsinp.append('SPOT_RANGE=%s %s\n' %(self.ram_nodes[1][-1], spot_range))
xdsinp.append('DATA_RANGE=%s\n' % data_range)
self.write_file('XDS.INP', xdsinp)
self.write_forkscripts(self.ram_nodes, self.image_data['osc_range'])
self.xds_ram(self.ram_nodes[0][0])
newinp = self.check_for_xds_errors(xdsdir, xdsinp)
if newinp == False:
self.logger.debug(' Unknown xds error occurred. Please check for cause!')
self.tprint(arg="Unknown xds error occurred. Please check for cause!",
level=10,
color="red")
raise Exception("Unknown XDS error")
return False
else:
# Find a suitable cutoff for resolution
# Returns False if no new cutoff, otherwise returns the value of
# the high resolution cutoff as a float value.
new_rescut = self.find_correct_res(xdsdir, 1.0)
if new_rescut != False:
os.rename('%s/CORRECT.LP' %xdsdir, '%s/CORRECT.LP.nocutoff' %xdsdir)
os.rename('%s/XDS.LOG' %xdsdir, '%s/XDS.LOG.nocutoff' %xdsdir)
newinp[-2] = 'JOB=CORRECT\n\n'
newinp[-3] = 'INCLUDE_RESOLUTION_RANGE=200.0 %.2f\n' % new_rescut
self.write_file('XDS.INP', newinp)
self.xds_ram(self.ram_nodes[0][0])
# Prepare the display of results.
final_results = self.run_results(xdsdir)
# Polish up xds processing by moving GXPARM.XDS to XPARM.XDS
# and rerunning xds.
#
# Don't polish if low resolution, as this tend to blow up.
if new_rescut <= 4.5:
os.rename('%s/GXPARM.XDS' %xdsdir, '%s/XPARM.XDS' %xdsdir)
os.rename('%s/CORRECT.LP' %xdsdir, '%s/CORRECT.LP.old' %xdsdir)
os.rename('%s/XDS.LOG' %xdsdir, '%s/XDS.LOG.old' %xdsdir)
newinp[-2] = 'JOB=INTEGRATE CORRECT\n\n'
newinp[-3] = '\n'
self.write_file('XDS.INP', newinp)
self.xds_ram(self.ram_nodes[0][0])
#Check to see if a new resolution cutoff should be applied
#new_rescut = self.find_correct_res(xdsdir, 1.0)
#if new_rescut != False:
# os.rename('%s/CORRECT.LP' %xdsdir, '%s/CORRECT.LP.nocutoff' %xdsdir)
# os.rename('%s/XDS.LOG' %xdsdir, '%s/XDS.LOG.nocutoff' %xdsdir)
# newinp[-2] = 'JOB=CORRECT !XYCORR INIT COLSPOT IDXREF DEFPIX INTEGRATE CORRECT\n\n'
# newinp[-5] = 'INCLUDE_RESOLUTION_RANGE=200.0 %.2f\n' % new_rescut
# self.write_file('XDS.INP', newinp)
# self.xds_ram(self.ram_nodes[0][0])
# new_rescut = self.find_correct_res(xdsdir, 1.0)
# if new_rescut != False:
# os.rename('%s/CORRECT.LP' %xdsdir, '%s/CORRECT.LP.oldcutoff' %xdsdir)
# os.rename('%s/XDS.LOG' %xdsdir, '%s/XDS.LOG.oldcutoff' %xdsdir)
# newinp[-5] = 'INCLUDE_RESOLUTION_RANGE=200.0 %.2f\n' % new_rescut
# self.write_file('XDS.INP', newinp)
# self.xds_ram(self.ram_nodes[0][0])
final_results = self.run_results(xdsdir)
final_results['status'] = 'SUCCESS'
return final_results
def change_xds_inp(self, xds_input, new_line):
"""Modify the XDS.INP lines with the input line"""
param = new_line.split("=")[0].strip()
xds_output = []
found = False
for line in xds_input:
if param+"=" in line:
xds_output.append(new_line)
else:
xds_output.append(line)
# Append the line if it is new
if not found:
xds_output.append(new_line)
return xds_output
def xds_total(self, xdsinput):
"""
This function controls processing by XDS when the complete data
set is already present on the computer system.
"""
self.logger.debug('Fastintegration::xds_total')
self.tprint(arg="\nXDS processing", level=99, color="blue")
first = int(self.image_data['start'])
last = int(self.image_data['start']) + int(self.image_data['total']) -1
data_range = '%s %s' %(first, last)
self.logger.debug('start = %s, total = %s',
self.image_data['start'],
self.image_data['total'])
self.logger.debug('first - %s, last = %s', first, last)
self.logger.debug('data_range = %s', data_range)
dir = 'wedge_%s_%s' % (first, last)
xdsdir = os.path.join(self.dirs['work'], dir)
if os.path.isdir(xdsdir) == False:
os.mkdir(xdsdir)
xdsinp = xdsinput[:]
if self.low_res or self.hi_res:
if not self.low_res:
low_res = 200.0
else:
low_res = self.low_res
if not self.hi_res:
hi_res = 0.9
else:
hi_res = self.hi_res
xdsinp = self.change_xds_inp(xdsinp,
"INCLUDE_RESOLUTION_RANGE=%.2f %.2f\n" % (low_res, hi_res))
xdsinp = self.change_xds_inp(xdsinp, "MAXIMUM_NUMBER_OF_PROCESSORS=%s\n" % self.procs)
xdsinp = self.change_xds_inp(xdsinp, "MAXIMUM_NUMBER_OF_JOBS=%s\n" % self.jobs)
xdsinp = self.change_xds_inp(xdsinp, "JOB=XYCORR INIT COLSPOT \n\n")
xdsinp = self.change_xds_inp(xdsinp, "DATA_RANGE=%s\n" % data_range)
xdsfile = os.path.join(xdsdir, 'XDS.INP')
self.write_file(xdsfile, xdsinp)
self.tprint(arg=" Searching for peaks",
level=99,
color="white",
newline=False)
self.xds_run(xdsdir)
# Index
xdsinp[-2] = ("JOB=IDXREF \n\n")
self.write_file(xdsfile, xdsinp)
self.tprint(arg=" Indexing",
level=99,
color="white",
newline=False)
self.xds_run(xdsdir)
# Integrate
# Override spacegroup?
if self.spacegroup != False:
# Check consistency of spacegroup, and modify if necessary.
xdsinp = self.find_xds_symm(xdsdir, xdsinp)
else:
xdsinp = self.change_xds_inp(xdsinp, "JOB=DEFPIX INTEGRATE CORRECT \n\n")
self.write_file(xdsfile, xdsinp)
self.tprint(arg=" Integrating",
level=99,
color="white",
newline=False)
self.xds_run(xdsdir)
# If known xds_errors occur, catch them and take corrective action
newinp = self.check_for_xds_errors(xdsdir, xdsinp)
if newinp == False:
self.logger.exception('Unknown xds error occurred. Please check for cause!')
self.tprint(arg="\nXDS error unknown to RAPD has occurred. Please check for cause!",
level=30,
color="red")
# TODO put out failing JSON
raise Exception("XDS error unknown to RAPD has occurred.")
# Prepare the display of results.
prelim_results = self.run_results(xdsdir)
self.tprint("\nPreliminary results summary", 99, "blue")
self.print_results(prelim_results)
# Already have hi res cutoff
if self.hi_res:
new_rescut = self.hi_res
# Find a suitable cutoff for resolution
else:
if self.low_res:
low_res = self.low_res
else:
low_res = 200.0
# Returns False if no new cutoff, otherwise returns the value of
# the high resolution cutoff as a float value.
new_rescut = self.find_correct_res(xdsdir, 1.0)
newinp = self.change_xds_inp(newinp, "JOB= INTEGRATE CORRECT \n\n")
# newinp[-2] = 'JOB= INTEGRATE CORRECT \n\n'
if new_rescut != False:
os.rename('%s/CORRECT.LP' %xdsdir, '%s/CORRECT.LP.nocutoff' %xdsdir)
os.rename('%s/XDS.LOG' %xdsdir, '%s/XDS.LOG.nocutoff' %xdsdir)
newinp = self.change_xds_inp(
newinp,
"%sINCLUDE_RESOLUTION_RANGE=%.2f %.2f\n" % (newinp[-2], low_res, new_rescut))
# newinp[-2] = '%sINCLUDE_RESOLUTION_RANGE=200.0 %.2f\n' % (newinp[-2], new_rescut)
self.write_file(xdsfile, newinp)
self.tprint(arg=" Reintegrating with new resolution cutoff",
level=99,
color="white",
newline=False)
self.xds_run(xdsdir)
# Prepare the display of results.
prelim_results_2 = self.run_results(xdsdir)
self.tprint("\nIntermediate results summary", 99, "blue")
self.print_results(prelim_results_2)
# Polish up xds processing by moving GXPARM.XDS to XPARM.XDS
# and rerunning xds.
#
# If low resolution, don't try to polish the data, as this tends to blow up.
if new_rescut <= 4.5:
os.rename('%s/GXPARM.XDS' %xdsdir, '%s/XPARM.XDS' %xdsdir)
os.rename('%s/CORRECT.LP' %xdsdir, '%s/CORRECT.LP.old' %xdsdir)
os.rename('%s/XDS.LOG' %xdsdir, '%s/XDS.LOG.old' %xdsdir)
#newinp[-2] = 'JOB=INTEGRATE CORRECT !XYCORR INIT COLSPOT IDXREF DEFPIX INTEGRATE CORRECT\n\n'
self.write_file(xdsfile, newinp)
self.tprint(arg=" Polishing",
level=99,
color="white",
newline=False)
self.xds_run(xdsdir)
final_results = self.run_results(xdsdir)
else:
# Check to see if a new resolution cutoff should be applied
new_rescut = self.find_correct_res(xdsdir, 1.0)
if new_rescut != False:
os.rename('%s/CORRECT.LP' %xdsdir, '%s/CORRECT.LP.oldcutoff' %xdsdir)
os.rename('%s/XDS.LOG' %xdsdir, '%s/XDS.LOG.oldcutoff' %xdsdir)
newinp[-2] = '%sINCLUDE_RESOLUTION_RANGE=200.0 %.2f\n' % (newinp[-2], new_rescut)
self.write_file(xdsfile, newinp)
self.tprint(arg=" New resolution cutoff", level=99, color="white", newline=False)
self.xds_run(xdsdir)
final_results = self.run_results(xdsdir)
# Put data into the commanline
self.tprint("\nFinal results summary", 99, "blue")
self.print_results(final_results)
self.print_plots(final_results)
final_results['status'] = 'ANALYSIS'
return final_results
def xds_split(self, xdsinput):
"""
Controls xds processing for unibinned ADSC data
Launches XDS when half the data set has been collected and again once
the complete data set has been collected.
"""
self.logger.debug("FastIntegration::xds_split")
first_frame = int(self.image_data['start'])
half_set = (int(self.image_data['total']) / 2) + first_frame - 1
last_frame = int(self.image_data['start']) + int(self.image_data['total']) - 1
frame_count = first_frame + 1
file_template = os.path.join(self.image_data['directory'], self.image_template)
# Figure out how many digits needed to pad image number.
# First split off the <image number>.<extension> portion of the file_template.
numimg = self.image_template.split('_')[-1]
# Then split off the image number portion.
num = numimg.split('.')[0]
# Then find the length of the number portion
pad = len(num)
replace_string = ''
for i in range(0, pad, 1):
replace_string += '?'
look_for_file = file_template.replace(replace_string,
'%0*d' %(pad, frame_count))
# Maximum wait time for next image is exposure time + 30 seconds.
wait_time = int(math.ceil(float(self.image_data['time']))) + 30
timer = Process(target=time.sleep, args=(wait_time,))
timer.start()
while frame_count < last_frame:
if os.path.isfile(look_for_file) == True:
if timer.is_alive():
timer.terminate()
timer = Process(target=time.sleep, args=(wait_time,))
timer.start()
if frame_count == half_set:
proc_dir = 'wedge_%s_%s' % (first_frame, frame_count)
xds_job = Process(target=self.xds_wedge,
args=(proc_dir, frame_count, xdsinput))
xds_job.start()
frame_count += 1
look_for_file = file_template.replace(replace_string,
'%0*d' %(pad, frame_count))
elif timer.is_alive() == False:
self.logger.debug(' Image %s not found after waiting %s seconds.',
look_for_file,
wait_time)
self.logger.debug(' RAPD assumes the data collection has been aborted.')
self.logger.debug(' Launching a final xds job with last image detected.')
self.image_data['last'] = frame_count - 1
results = self.xds_total(xdsinput)
return results
# If you reach here, frame_count equals the last frame, so look for the
# last frame and then launch xds_total.
while timer.is_alive():
if os.path.isfile(self.last_image):
if xds_job.is_alive():
xds_job.terminate()
results = self.xds_total(xdsinput)
timer.terminate()
break
# If timer expires (ending the above loop) and last frame has not been
# detected, launch xds_total with last detected image.
if os.path.isfile(self.last_image) == False:
if xds_job.is_alive():
xds_job.terminate()
self.image_data['last'] = frame_count - 1
results = self.xds_total(xdsinput)
return results
def xds_processing(self, xdsinput):
"""
Controls processing of data on disks (i.e. not stored in RAM)
by xds. Attempts to process every 10 images up to 100 and then
every 20 images after that. This function should be used for NE-CAT
data collected on ADSC in binned mode
"""
"""
Need to set up a control where every ten frames an XDS processing is launched.
Need to keep track of what's been launched. To avoid launching too many XDS
jobs, if an XDS job is running when next ten frames are collected, don't launch
new wedge but rather wait for next multiple of 10. XDS jobs should be checked for
common errors and rerun if needed. A resolution cutoff should be generated at the
CORRECT stage (pass this cutoff on to next wedge?). Once the data set is complete,
last XDS should be "polished" by moving GXPARM.XDS to XPARM.XDS
As XDS jobs finish, launch whatever generates the GUI display
"""
self.logger.debug('FastIntegration::xds_processing')
first_frame = int(self.image_data['start'])
last_frame = + int(self.image_data['total']) - int(self.image_data['start']) + 1
frame_count = first_frame
# Maximum wait time for next image is exposure time + 15 seconds.
#wait_time = int(math.ceil(float(self.image_data['time']))) + 15
# Maximum wait time for next image is exposure time + 60 seconds.
if self.image_data['detector'] == 'PILATUS' or self.image_data['detector'] == 'HF4M':
wait_time = int(math.ceil(float(self.image_data['time']))) + 15
else:
wait_time = int(math.ceil(float(self.image_data['time']))) + 60
try:
wedge_size = int(10 // float(self.image_data['osc_range']))
except:
self.logger.debug('xds_processing:: dynamic wedge size allocation failed!')
self.logger.debug(' Setting wedge size to 10.')
wedge_size = 10
file_template = os.path.join(self.image_data['directory'], self.image_template)
# Figure out how many digits needed to pad image number.
# First split off the <image number>.<extension> portion of the file_template.
numimg = self.image_template.split('_')[-1]
# Then split off the image number portion.
num = numimg.split('.')[0]
# Then find the length of the number portion
pad = len(num)
replace_string = ''
for _ in range(0, pad, 1):
replace_string += '?'
look_for_file = file_template.replace(replace_string,
'%0*d' % (pad, frame_count))
timer = Process(target=time.sleep, args=(wait_time,))
timer.start()
# Create the process xds_job (runs a timer with no delay).
# This is so xds_job exists when it is checked for later on.
# Eventually xds_job is replaced by the actual integration jobs.
xds_job = Process(target=time.sleep, args=(0,))
xds_job.start()
while frame_count < last_frame:
# Look for next look_for_file to see if it exists.
# If it does, check to see if it is a tenth image.
# If it is a tenth image, launch an xds job.
# If it isn't a tenth image, index the look_for_file
# If it doesn't exist, keep checking until time_process expires.
if os.path.isfile(look_for_file) == True:
# Reset the timer process
if timer.is_alive():
timer.terminate()
timer = Process(target=time.sleep, args=(wait_time,))
timer.start()
# If frame_count is a tenth image, launch and xds job
# remainder = ((frame_count + 1) - first_frame) % wedge_size
# self.logger.debug(' remainder = %s' % remainder)
if xds_job.is_alive == True:
self.logger.debug(' xds_job.is_alive = True')
if (((frame_count + 1) - first_frame) % wedge_size == 0 and
xds_job.is_alive() == False):
proc_dir = 'wedge_%s_%s' %(first_frame, frame_count)
xds_job = Process(target=self.xds_wedge,
args=(proc_dir, frame_count, xdsinput))
xds_job.start()
# Increment the frame count to look for next image
frame_count += 1
look_for_file = file_template.replace(replace_string,
'%0*d' % (pad, frame_count))
# If next frame does not exist, check to see if timer has expired.
# If timer has expired, assume an abort has occurred.
elif timer.is_alive() == False:
self.logger.debug(' Image %s not found after waiting %s seconds.',
look_for_file,
wait_time)
# There have been a few cases, particularly with Pilatus's
# Furka file transfer has failed to copy an image to disk.
# So check for the next two files before assuming there has
# been an abort.
self.logger.debug(' RAPD assumes the data collection has been aborted.')
self.logger.debug(' RAPD checking for next two subsequent images to be sure.')
frame_count += 1
look_for_file = file_template.replace(replace_string, '%0*d' % (pad, frame_count))
if os.path.isfile(look_for_file) == True:
timer = Process(target=time.sleep, args=(wait_time,))
timer.start()
# Increment the frame count to look for next image
frame_count += 1
look_for_file = file_template.replace(replace_string,
'%0*d' %(pad, frame_count))
else:
self.logger.debug(' RAPD did not fine the next image, checking for one more.')
frame_count += 1
look_for_file = file_template.replace(replace_string, '%0*d' %(pad, frame_count))
if os.path.isfile(look_for_file) == True:
timer = Process(target=time.sleep, args=(wait_time,))
timer.start()
frame_count += 1
look_for_file = file_template.replace(
replace_string,
'%0*d' % (pad, frame_count))
else:
self.logger.debug(' RAPD did not find the next image either.')
self.logger.debug(
' Launching a final xds job with last image detected.')
self.image_data['total'] = frame_count - 2 - first_frame
results = self.xds_total(xdsinput)
return results
# If you reach here, frame_count equals the last frame, so look for the
# last frame and then launch xds_total.
while timer.is_alive():
if os.path.isfile(self.last_image):
if xds_job.is_alive():
xds_job.terminate()
results = self.xds_total(xdsinput)
timer.terminate()
break
# If timer expires (ending the above loop) and last frame has not been
# detected, launch xds_total with last detected image.
if os.path.isfile(self.last_image) == False:
if xds_job.is_alive():
xds_job.terminate()
self.image_data['total'] = frame_count - first_frame
results = self.xds_total(xdsinput)
return results
def xds_wedge(self, dir, last, xdsinput):
"""
This function controls processing by XDS for an intermediate wedge
"""
self.logger.debug('Fastintegration::xds_wedge')
self.tprint(arg="\nXDS processing", level=99, color="blue")
first = int(self.image_data['start'])
data_range = '%s %s' % (first, last)
xdsdir = os.path.join(self.dirs['work'], dir)
if os.path.isdir(xdsdir) == False:
os.mkdir(xdsdir)
xdsinp = xdsinput[:]
#xdsinp = self.find_spot_range(first, last, self.image_data['osc_range'],xdsinput[:])
xdsinp.append('MAXIMUM_NUMBER_OF_PROCESSORS=%s\n' % self.procs)
xdsinp.append('MAXIMUM_NUMBER_OF_JOBS=%s\n' % self.jobs)
#xdsinp.append('MAXIMUM_NUMBER_OF_JOBS=1\n')
xdsinp.append('JOB=XYCORR INIT COLSPOT !IDXREF DEFPIX INTEGRATE CORRECT\n\n')
xdsinp.append('DATA_RANGE=%s\n' % data_range)
xdsfile = os.path.join(xdsdir, 'XDS.INP')
self.write_file(xdsfile, xdsinp)
self.tprint(arg=" Searching for peaks wedge", level=99, color="white", newline=False)
self.xds_run(xdsdir)
#xdsinp[-3]=('MAXIMUM_NUMBER_OF_JOBS=%s\n' % self.jobs)
xdsinp[-2] = ('JOB=IDXREF DEFPIX INTEGRATE CORRECT\n\n')
self.write_file(xdsfile, xdsinp)
self.tprint(arg=" Integrating", level=99, color="white", newline=False)
self.xds_run(xdsdir)
# If known xds_errors occur, catch them and take corrective action
newinp = 'check_again'
while newinp == 'check_again':
newinp = self.check_for_xds_errors(xdsdir, xdsinp)
if newinp == False:
self.logger.debug(' Unknown xds error occurred for %s.', dir)
self.logger.debug(' Please check for cause!')
return
else:
# Find a suitable cutoff for resolution
# Returns False if no new cutoff, otherwise returns the value of
# the high resolution cutoff as a float value.
new_rescut = self.find_correct_res(xdsdir, 1.0)
if new_rescut != False:
os.rename('%s/CORRECT.LP' %xdsdir, '%s/CORRECT.LP.nocutoff' %xdsdir)
os.rename('%s/XDS.LOG' %xdsdir, '%s/XDS.LOG.nocutoff' %xdsdir)
newinp[-2] = 'JOB=INTEGRATE CORRECT\n'
newinp[-2] = '%sINCLUDE_RESOLUTION_RANGE=200.0 %.2f\n' % (newinp[-2], new_rescut)
self.write_file(xdsfile, newinp)
self.tprint(arg=" Reintegrating", level=99, color="white", newline=False)
self.xds_run(xdsdir)
results = self.run_results(xdsdir)
return results
def createXDSinp(self, xds_dict):
"""
This function takes the dict holding XDS keywords and values
and converts them into a list of strings that serves as the
basis for writing out an XDS.INP file.
"""
self.logger.debug("FastIntegration::createXDSinp")
# print self.image_data["start"]
# print self.image_data["total"]
last_frame = self.image_data['start'] + self.image_data["total"] - 1
self.logger.debug('last_frame = %s', last_frame)
# print last_frame
# self.logger.debug('detector_type = %s' % detector_type)
background_range = '%s %s' %(int(self.image_data['start']), int(self.image_data['start']) + 4)
x_beam = float(self.image_data['x_beam']) / float(self.image_data['pixel_size'])
y_beam = float(self.image_data['y_beam']) / float(self.image_data['pixel_size'])
#if x_beam < 0 or x_beam > int(xds_dict['NX']):
# raise RuntimeError, 'x beam coordinate outside detector'
#if y_beam < 0 or y_beam > int(xds_dict['NY']):
# raise RuntimeError, 'y beam coordinate outside detector'
if 'image_template' in self.image_data:
self.image_template = self.image_data['image_template']
else:
raise RuntimeError, '"image_template" not defined in input data.'
file_template = os.path.join(self.image_data['directory'], self.image_template)
# Count the number of '?' that need to be padded in a image filename.
pad = file_template.count('?')
# Replace the first instance of '?' with the padded out image number
# of the last frame. Need pad + 1 to get the right number of digits.
self.last_image = file_template.replace('?', '%d'.zfill(pad + 1) % last_frame, 1)
# Remove the remaining '?'
self.last_image = self.last_image.replace('?', '')
# Repeat the last two steps for the first image's filename.
self.first_image = file_template.replace('?', str(self.image_data['start']).zfill(pad), 1)
self.first_image = self.first_image.replace('?', '')
# Begin constructing the list that will represent the XDS.INP file.
xds_input = ['!===== DATA SET DEPENDENT PARAMETERS =====\n',
'ORGX=%.2f ORGY=%.2f ! Beam Center (pixels)\n' % (x_beam, y_beam),
'DETECTOR_DISTANCE=%.2f ! (mm)\n' %
(float(self.image_data['distance'])),
'OSCILLATION_RANGE=%.2f ! (degrees)\n' %
(float(self.image_data['osc_range'])),
'X-RAY_WAVELENGTH=%.5f ! (Angstroems)\n' %
(float(self.image_data['wavelength'])),
'NAME_TEMPLATE_OF_DATA_FRAMES=%s\n\n' % file_template,
'BACKGROUND_RANGE=%s\n\n' % background_range,
'!===== DETECTOR_PARAMETERS =====\n']
for key, value in xds_dict.iteritems():
# Regions that are excluded are defined with
# various keyword containing the word UNTRUSTED.
# Since multiple regions may be specified using
# the same keyword on XDS but a dict cannot
# have multiple values assigned to a key,
# the following if statements work though any
# of these regions and add them to xdsinput.
if 'UNTRUSTED' in key:
if 'RECTANGLE' in key:
line = 'UNTRUSTED_RECTANGLE=%s\n' %value
elif 'ELLIPSE' in key:
line = 'UNTRUSTED_ELLIPSE=%s\n' %value
elif 'QUADRILATERL' in key:
line = 'UNTRUSTED_QUADRILATERAL=%s\n' %value
else:
line = "%s=%s\n" % (key, value)
xds_input.append(line)
# If the detector is tilted in 2theta, adjust the value of
# DIRECTION_OF_DETECTOR_Y-AXIS.
# **** IMPORTANT ****
# This adjustment assumes that the 2theta tilt affects only
# the DIRECTION_OF_DETECTOR_Y-AXIS, and not the
# DIRECTION_OF_DETECTOR_X-AXIS.
#
# If 2theta is not inclined, self.image_data should not have the key
# 'twotheta', or have that key set to a value of None.
#
# If 2theta is inclined, it should be give in self.image_data
# with the key 'twotheta' and a value in degrees.
#
if 'twotheta' in self.image_data and self.image_data['twotheta'] != None:
twotheta = math.radians(float(self.image_data['twotheta']))
tilty = math.cos(twotheta)
tiltz = math.sin(twotheta)
xds_input.append('!***** Detector is tilted in 2theta *****\n')
xds_input.append('! 2THETA = %s degrees\n' % self.image_data['twotheta'])
xds_input.append('!*** Resetting DIRECTION_OF_DETECTOR_Y-AXIS ***\n')
xds_input.append('DIRECTION_OF_DETECTOR_Y-AXIS= 0.0 %.4f %.4f\n' %(tilty, tiltz))
xds_input.append('! 0.0 cos(2theta) sin(2theta)\n\n')
# pprint(xds_input)
return xds_input
def write_file(self, filename, file_input):
"""
Writes out file_input as filename.
file_input should be a list containing the desired contents
of the file to be written.
"""
self.logger.debug('FastIntegration::write_file')
self.logger.debug(' Filename = %s' % filename )
# pprint(file_input)
with open (filename, 'w') as file:
file.writelines(file_input)
return
def find_spot_range(self, first, last, osc, input):
"""
Finds up to two spot ranges for peak picking.
Ideally the two ranges each cover 5 degrees of data and
are 90 degrees apart. If the data set is 10 degrees or
less, return a single spot range equal to the entire data
set. If the data set is less than 90 degrees, return two
spot ranges representing the first 5 degrees and the middle
5 degrees of data.
"""
self.logger.debug('FastIntegration::find_spot_range')
self.logger.debug(' first_frame = %s', first)
self.logger.debug(' last_frame = %s', last)
self.logger.debug(' frame_width = %s', osc)
# Determine full oscillation range of the data set.
fullrange = (float(last) - float(first) + 1) * float(osc)
# If the full oscillation range is 10 degrees or less
# return a single spot_range equal to the full data set
if fullrange <= 10:
input.append('SPOT_RANGE=%s %s\n\n' %(first, last))
else:
endspot1 = int(first) + int(5 / float(osc)) - 1
input.append('SPOT_RANGE=%s %s\n\n' %(first, endspot1))
if fullrange < 95:
spot2_start = int((int(last) - int(first) + 1) / 2)
else:
spot2_start = int(90 / float(osc))
spot2_end = spot2_start + int(5 / float(osc)) - 1
input.append('SPOT_RANGE=%s %s\n\n' %(spot2_start, spot2_end))
return input
def xds_run(self, directory):
"""
Launches the running of xds.
"""
self.logger.debug('FastIntegration::xds_run')
self.logger.debug(' directory = %s', directory)
self.logger.debug(' detector = %s', self.image_data['detector'])
xds_command = 'xds_par'
os.chdir(directory)
# TODO skip processing for now
if self.cluster_use == True:
job = Process(target=BLspec.processCluster,
args=(self, (xds_command, 'XDS.LOG', '8', 'phase2.q')))
else:
job = Process(target=Utils.processLocal,
args=((xds_command, "XDS.LOG"),
self.logger))
job.start()
while job.is_alive():
time.sleep(1)
self.tprint(arg=".", level=99, color="white", newline=False)
self.tprint(arg=" done", level=99, color="white")
os.chdir(self.dirs['work'])
return
def xds_ram(self, first_node):
"""
Launches xds_par via ssh on the first_node.
This ensures that xds runs properly when trying to use
data distributed to the cluster's ramdisks
"""
self.logger.debug('FastIntegration::xds_ram')
my_command = ('ssh -x %s "cd $PWD && xds_par > XDS.LOG"' % first_node)
self.logger.debug(' %s', command)
p = subprocess.Popen(my_command, shell=True, )
p.wait()
return
def find_correct_res(self, directory, isigi):
"""
Looks at CORRECT.LP to find a resolution cutoff, where I/sigma is
approximately 1.5
"""
self.logger.debug(' directory = %s', directory)
self.logger.debug(' isigi = %s', isigi)
self.tprint(arg=" Determining resolution cutoff ",
level=99,
color="white",
newline=False)
new_hi_res = False
correctlp = os.path.join(directory, 'CORRECT.LP')
try:
correct_log = open(correctlp, 'r').readlines()
except IOError as e:
self.logger.debug('Could not open CORRECT.LP')
self.logger.debug(e)
return new_hi_res
flag = 0
IsigI = 0
hires = 0
# Read from the bottom of CORRECT.LP up, looking for the first
# occurence of "total", which signals that you've found the
# last statistic table given giving I/sigma values in the file.
for i in range(len(correct_log)-1, 0, -1):
if correct_log[i].strip().startswith('total'):
flag = 1
elif flag == 1:
if len(correct_log[i]) == 1:
new_hi_res = hires
break
line = correct_log[i].split()
if line[0][0].isdigit():
#if line[8] == '-99.00':
# self.logger.debug(' IsigI = -99.00')
# return False
prev_hires = hires
prev_IsigI = IsigI
hires = float(line[0])
try:
IsigI = float(line[8])
except ValueError:
pass
#self.logger.debug(' hires = %s, IsigI = %s' %(hires, IsigI))
if IsigI >= isigi:
# If the first IsigI value greater than 2, break and
# return False as new_hires.
if prev_IsigI == 0:
break
else:
new_hi_res = '%0.2f' % numpy.interp([isigi],
[prev_IsigI, IsigI],
[prev_hires, hires])
# print [isigi]
# print [prev_IsigI, IsigI]
# print [prev_hires, hires]
# print interp([isigi], [prev_IsigI, IsigI], [prev_hires, hires])
break
else: # If first character in line is not a digit, you;ve
# read through the entire table, so break.
new_hi_res = hires
break
self.logger.debug(' prev_hires = %s prev_IsigI = %s' % (prev_hires, prev_IsigI))
self.logger.debug(' hires = %s IsigI = %s' %(hires, IsigI))
self.logger.debug(' New cutoff = %s' %new_hi_res)
hi_res = float(new_hi_res)
self.tprint(arg="new cutoff = %4.2f %s" % (hi_res, text.aring),
level=99,
color="white")
return hi_res
def check_for_xds_errors(self, dir, input):
"""
Examines results of an XDS run and searches for known problems.
"""
self.logger.debug('FastIntegration::check_for_xds_errors')
self.tprint(arg=" Checking XDS output for errors",
level=99,
color="white")
os.chdir(dir)
# Enter a loop that looks for an error, then tries to correct it
# and the reruns xds.
# Loop should continue until all errors are corrected, or only
# an unknown error is detected.
xdslog = open('XDS.LOG', 'r').readlines()
for line in xdslog:
if '! ERROR !' in line:
# An error was found in XDS.LOG, now figure out what it was.
if 'CANNOT CONTINUE WITH A TWO DIMENSION' in line:
self.logger.debug(' Found an indexing error')
self.tprint(arg="\n Found an indexing error",
level=10,
color="red")
# Try to fix by extending the data range
tmp = input[-1].split('=')
first, last = tmp.split()
if int(last) == (int(self.image_data('start')) + int(self.image_data('total')) -1):
self.logger.debug(' FAILURE: Already using the full data range available.')
return False
else:
input[-1] = 'SPOT_RANGE=%s %s' % (first, (int(last) + 1))
self.write_file('XDS.INP', input)
os.system('mv XDS.LOG initialXDS.LOG')
self.tprint(arg="\n Extending spot range",
level=10,
color="white",
newline=False)
self.xds_run(dir)
return input
elif 'SOLUTION IS INACCURATE' in line or 'INSUFFICIENT PERCENTAGE' in line:
self.logger.debug(' Found inaccurate indexing solution error')
self.logger.debug(' Will try to continue anyway')
self.tprint(arg=" Found inaccurate indexing solution error - trying to continue anyway",
level=30,
color="red")
# Inaccurate indexing solution, can try to continue with DEFPIX,
# INTEGRATE, and CORRECT anyway
self.logger.debug(' The length of input is %s' % len(input))
if 'JOB=DEFPIX' in input[-2]:
self.logger.debug('Error = %s' %line)
self.logger.debug('XDS failed to run with inaccurate indexing solution error.')
self.tprint(arg="\n XDS failed to run with inaccurate indexing solution error.",
level=30,
color="red")
return False
else:
input[-2] = ('JOB=DEFPIX INTEGRATE CORRECT !XYCORR INIT COLSPOT'
+ ' IDXREF DEFPIX INTEGRATE CORRECT\n')
self.write_file('XDS.INP', input)
os.system('mv XDS.LOG initialXDS.LOG')
self.tprint(arg="\n Integrating with suboptimal indexing solution",
level=99,
color="white",
newline=False)
self.xds_run(dir)
return input
elif 'SPOT SIZE PARAMETERS HAS FAILED' in line:
self.logger.debug(' Found failure in determining spot size parameters.')
self.logger.debug(' Will use default values for REFLECTING_RANGE and BEAM_DIVERGENCE.')
self.tprint(arg="\n Found failure in determining spot size parameters.",
level=99,
color="red")
input.append('\nREFLECTING_RANGE=1.0 REFLECTING_RANGE_E.S.D.=0.10\n')
input.append('BEAM_DIVERGENCE=0.9 BEAM_DIVERGENCE_E.S.D.=0.09\n')
self.write_file('XDS.INP', input)
os.system('mv XDS.LOG initialXDS.LOG')
self.tprint(arg=" Integrating after failure in determining spot size parameters",
level=99,
color="white",
newline=False)
self.xds_run(dir)
return input
else:
# Unanticipated Error, fail the error check by returning False.
self.logger.debug('Error = %s' %line)
return False
return input
def write_forkscripts(self, node_list, osc):
"""
Creates two small script files that are run in place of
XDS's forkcolspot and forkintegrate scripts to allow
utilization of data distributed on the cluster's ramdisks.
In order for the forkscripts to work, the forkcolspot and
forkintegrate scripts in the xds directory should be modified
appropriately.
"""
self.logger.debug('FastIntegration::write_forkscripts')
niba0 = 5 // float(osc) # minimum number of images per batch
ntask = len(node_list[0]) # Total number of jobs
nodes = node_list[0] # list of nodes where data is distributed
fframes = node_list[1] # list of first image on each node
lframes = node_list[2] # list of last image on each node
forkc = ['#!/bin/bash\n']
forkc.append('echo "1" | ssh -x %s "cd $PWD && mcolspot_par" &\n'
% nodes[0])
forkc.append('echo "2" | ssh -x %s "cd $PWD && mcolspot_par" &\n'
% nodes[-1])
forkc.append('wait\n')
forkc.append('rm -f mcolspot.tmp')
forki = ['#!/bin/bash\n']
for x in range(0, ntask, 1):
itask = x + 1
nitask = lframes[x] - fframes[x] + 1
if nitask < niba0:
nbatask = 1
else:
nbatask = nitask // niba0
forki.append('echo "%s %s %s %s" | ssh -x %s "cd $PWD && mintegrate_par" &\n'
% (fframes[x], nitask, itask, nbatask, nodes[x]))
forki.append('wait\n')
forki.append('rm -f mintegrate.tmp')
self.write_file('forkc', forkc)
self.write_file('forki', forki)
os.chmod('forkc', stat.S_IRWXU)
os.chmod('forki', stat.S_IRWXU)
return
def run_results(self, directory):
"""
Takes the results from xds integration/scaling and prepares
tables and plots for the user interface.
"""
self.logger.debug('FastIntegration::run_results')
os.chdir(directory)
orig_rescut = False
# Run xdsstat on XDS_ASCII.HKL.
xdsstat_log = self.xdsstat()
# Run pointless to convert XDS_ASCII.HKL to mtz format.
mtzfile = self.pointless()
# Run dummy run of aimless to generate various stats and plots.
# i.e. We don't use aimless for actual scaling, it's already done by XDS.
if mtzfile != 'Failed':
aimless_log = self.aimless(mtzfile)
else:
self.logger.debug(' Pointless did not run properly!')
self.logger.debug(' Please check logs and files in %s' %self.dirs['work'])
return('Failed')
# Parse the aimless logfile to look for resolution cutoff.
aimlog = open(aimless_log, "r").readlines()
for line in aimlog:
if 'High resolution limit' in line:
current_resolution = line.split()[-1]
elif 'from half-dataset correlation' in line:
resline = line
elif 'from Mn(I/sd) > 1.50' in line:
resline2 = line
break
res_cut = resline.split('=')[1].split('A')[0].strip()
res_cut2 = resline2.split('=')[1].split('A')[0].strip()
if float(res_cut2) < float(res_cut):
res_cut = res_cut2
# Run aimless with a higher resolution cutoff if the suggested resolution
# is greater than the initial resolution + 0.05.
if (float(res_cut) > float(current_resolution) + 0.05):
# Save information on original resolution suggestions
orig_rescut = resline
# rerun aimless
aimless_log = self.aimless(mtzfile, res_cut)
#graphs, tables, summary = self.parse_aimless(aimless_log)
graphs, summary = self.parse_aimless2(aimless_log)
wedge = directory.split('_')[-2:]
summary['wedge'] = '-'.join(wedge)
# Parse INTEGRATE.LP and add information about mosaicity to summary.
summary['mosaicity'] = float(self.parse_integrateLP())
# Parse CORRECT.LP and add information from that to summary.
summary['ISa'] = float(self.parse_correctLP())
# Parse CORRECT.LP and pull out per wedge statistics
#self.parse_correct()
#scalamtz = mtzfile.replace('pointless','scala')
#scalalog = scalamtz.replace('mtz','log')
scalamtz = mtzfile.replace('pointless', 'aimless')
scalalog = scalamtz.replace('mtz', 'log')
results = {'status': 'WORKING',
'plots': graphs,
'summary': summary,
'mtzfile': scalamtz,
'dir': directory
}
self.logger.debug("Returning results!")
self.logger.debug(results)
# Set up the results for return
self.results['process'] = {
'agent_process_id':self.process_id,
'status':50
}
self.results['results'] = results
self.logger.debug(self.results)
# self.sendBack2(tmp)
if self.controller_address:
rapd_send(self.controller_address, self.results)
return results
def make_plots(self, graphs, tables):
"""
Generates the plots html file.
Keyword arguments
graphs --
tables --
"""
self.logger.debug('FastIntegration::make_plots')
# plotThese contains a list of graph titles that you want plotted
# addition plots may be requested by adding the title (stripped of
# leading and trailing whitespace) to plotThese.
# The plot titles also serve as keys for the tab titles.
plotThese = {
#'Mn(k) & 0k (theta=0) v. batch' : 'Scale vs frame',
#'Relative Bfactor v. batch' : 'Bfactor vs frame',
'Rmerge v Batch for all runs' : 'R vs frame',
#'Imean & RMS Scatter' : 'I vs frame',
'Imean/RMS scatter' : 'I/sd vs frame',
'I/sigma, Mean Mn(I)/sd(Mn(I))' : 'I/sigma',
'Rmerge v Resolution' : 'R vs Res',
'Rmerge, Rfull, Rmeas, Rpim v Resolution' : 'R vs Res',
'Average I,sd and Sigma' : 'I vs Res',
'Average I, RMSdeviation and Sd' : 'I vs Res',
'Completeness v Resolution' : 'Completeness',
'Multiplicity v Resolution' : 'Redundancy',
'Rmeas, Rsym & PCV v Resolution' : 'Rmeas',
'Rpim (precision R) v Resolution' : 'Rpim',
#'Rd vs frame_difference' : 'Rd',
'Anom & Imean CCs v resolution -' : 'Anom Corr',
'Anom & Imean CCs v resolution' : 'CCanom and CC1/2',
'RMS correlation ratio' : 'RCR',
'Rcp v. batch' : 'Rcp v batch'
}
plotfile = ['<html>\n',
'<head>\n',
' <style type="text/css">\n',
' body { background-image: none; }\n',
' .x-label { position:relative; text-align:center; top: 10px; }\n',
' .title { font-size:30px; text-align:center; }\n',
' </style>\n',
' <script type="text/javascript">\n',
'$(function() {\n',
' // Tabs\n',
" $('.tabs').tabs();\n",
' });\n',
' </script>\n',
'</head>\n',
'<body>\n',
' <table>\n',
' <tr>\n',
' <td width="100%">\n',
' <div class="tabs">\n',
' <!-- This is where the tabl labels are defined\n',
' 221 = tab2 (on page) tab2 (full output tab) tab1 -->\n',
' <ul>\n'
]
# Define tab labels for each graph.
for i, graph in enumerate(graphs):
if graph[0] in plotThese:
title = plotThese[graph[0]]
plotfile.append(' <li><a href="#tabs-22%s">%s</a></li>\n'
% (i, title))
plotfile.append(' </ul>\n')
# Define title and x-axis labels for each graph.
for i,graph in enumerate(graphs):
if graph[0] in plotThese:
plotfile.extend([' <div id="tabs-22%s">\n' % i,
' <div class="title"><b>%s</b></div>\n'
% graph[0],
' <div id="chart%s_div" style=' % i,
'"width:800px; height:600px"></div>\n',
' <div class="x-label">%s</div>\n'
% graph[1],
' </div>\n'
])
plotfile.extend([' </div> <!-- End of Tabs -->\n',
' </td>\n',
' </tr>\n',
' </table>\n\n',
'<script id="source" language="javascript" type="text/javascript">\n',
'$(function () {\n'
])
# varNames is a counter, such that the variables used for plotting
# will simply be y+varName (i.e. y0, y1, y2, etc)
# actual labels are stored transiently in varLabel, and added
# as comments next to the variable when it is initialized
varNum = 0
for i,graph in enumerate(graphs):
title, xlabel, ylabels, xcol, ycols, tableNum = graph
if title in plotThese:
varLabel = []
data = []
plotline = ' var '
# graph[2] is the label for the y-values.
#ylabels = graph[2]
for ylabel in ylabels:
varLabel.append(ylabel)
var = 'y%s' %varNum
varNum += 1
data.append(var)
if ylabel == ylabels[-1]:
plotline += ('%s= [];\n' % var)
else:
plotline += ('%s= [], ' % var)
plotfile.append(plotline)
#xcol = int(graph[3])
#ycols = graph[4]
#tableNum = graph[5]
self.logger.debug('table # %s' %tableNum)
for line in tables[tableNum]:
#self.logger.debug('tableNum = %s line=%s line[0]=%s' %(tableNum,line, line[0]))
if line[0] == '$$':
#self.logger.debug("line == '$$' is TRUE")
break
for y, ycol in enumerate(ycols):
#self.logger.debug("ycols == %s" %ycols)
if line[ycol] !='-':
plotfile.append(' %s.push([%s,%s]);\n'
%(data[y], line[xcol], line[ycol]))
plotfile.extend([' var plot%s' % i,
' = $.plot($("#chart%s_div"), [\n' % i
])
for x in range(0, len(data), 1):
plotfile.append(' {data:%s, label:"%s" },\n'
% (data[x], varLabel[x]))
plotfile.extend([' ],\n',
' { lines: {show: true},\n',
' points: {show: false},\n',
" selection: {mode: 'xy' },\n",
' grid: {hoverable: true, clickable: true },\n'
] )
if xlabel == 'Dmin (A)':
plotfile.append(' xaxis: {ticks: [\n')
for line in tables[tableNum]:
if line[0] == '$$':
break
plotfile.append(' [%s,"%s"],\n'
%(line[xcol], line[xcol+1]))
plotfile.append(' ]},\n')
plotfile.append(' });\n\n')
plotfile.extend(['function showTooltip(x, y, contents) {\n',
" $('<div id=tooltip>' + contents + '</div>').css( {\n",
" position: 'absolute',\n",
" display: 'none',\n",
" top: y + 5,\n",
' left: x + 5, \n',
" border: '1px solid #fdd',\n",
" padding: '2px',\n",
" 'background-color': '#fee',\n",
" opacity: 0.80\n"
' }).appendTo("body").fadeIn(200);\n',
' }\n\n',
' var previousPoint = null;\n'
])
for i, graph in enumerate(graphs):
title = graph[0]
xlabel = graph[1]
if title in plotThese:
plotfile.append(' $("#chart%s_div").bind' %str(i) )
plotfile.extend(['("plothover", function (event, pos, item) {\n',
' $("#x").text(pos.x.toFixed(2));\n',
' $("#y").text(pos.y.toFixed(2));\n\n',
'if (true) {\n',
' if (item) {\n',
' if (previousPoint != item.datapoint) {\n',
' previousPoint = item.datapoint;\n\n',
' $("#tooltip").remove();\n',
])
if xlabel == 'Dmin (A)':
plotfile.append(' var x = (Math.sqrt(1/item.datapoint[0])).toFixed(2),\n')
else:
plotfile.append(' var x = item.datapoint[0].toFixed(2),\n')
plotfile.extend([' y = item.datapoint[1].toFixed(2);\n',
' showTooltip(item.pageX, item.pageY,\n',
' item.series.label + " at " + x + " = " + y);\n',
' }\n',
' }\n',
' else {\n',
' $("#tooltip").remove();\n',
' previousPoint = null;\n',
' }\n',
' }\n });\n\n'
])
plotfile.append('});\n</script>\n</body>\n</html>\n')
self.write_file('plot.html', plotfile)
return('plot.html')
def parse_aimless(self, logfile):
"""
Parses the aimless logfile in order to pull out data for graphing
and the results table.
Relevant values from teh summary table are stored into a results
dictionary.
Returns a list of lists called graphs that contains information on
data labels and where to pull data from the nested list called tables.
Returns a nested list called tables, which is a copy of the data
tables in the aimless logfile.
Returns a dict called int_results that contains the information
found in the results summary table of the aimless log file.
"""
log = smartie.parselog(logfile)
# The program expect there to be 10 tables in the aimless log file.
ntables = log.ntables()
if ntables != 10:
#raise RuntimeError, '%s tables found in aimless output, program expected 10.' %ntables
self.logger.debug('%s tables found in aimless output, program exepected 10.' %ntables)
tables = []
for i in range(0,ntables):
data = []
# Ignore the Anisotropy analysis table (it's not always present
# and if you don't ignore it, it causes problems when it is not
# there.)
if 'Anisotropy analysis' in log.tables()[i].title():
pass
else:
for line in log.tables()[i].data().split('\n'):
if line != '':
data.append(line.split())
tables.append(data)
# Pull out information for the summary table.
flag = True
summary = log.keytext(0).message().split('\n')
# For some reason, 'Anomalous flag switched ON' is not always being found.
# so this line creates a blank entry of anomalous_report so that it cannot
# be referenced before assignment.
anomalous_report = ''
for line in summary:
if 'Space group' in line:
space_group = line.strip().split(': ')[-1]
elif 'Average unit cell' in line:
unit_cell = map(float, line.split()[3:])
elif 'Anomalous flag switched ON' in line:
anomalous_report = line
#elif flag == True and 'from half-dataset correlation' in line:
# flag = False
# res_cut = line
int_results={
'bins_low': map(float, summary[3].split()[-3:]),
'bins_high': map(float, summary[4].split()[-3:]),
'rmerge_anom': map(float, summary[6].split()[-3:]),
'rmerge_norm': map(float, summary[7].split()[-3:]),
'rmeas_anom': map(float, summary[8].split()[-3:]),
'rmeas_norm': map(float, summary[9].split()[-3:]),
'rpim_anom': map(float, summary[10].split()[-3:]),
'rpim_norm': map(float, summary[11].split()[-3:]),
'rmerge_top': float(summary[12].split()[-3]),
'total_obs': map(int, summary[13].split()[-3:]),
'unique_obs': map(int, summary[14].split()[-3:]),
'isigi': map(float, summary[15].split()[-3:]),
'cc-half': map(float, summary[16].split()[-3:]),
'completeness': map(float, summary[17].split()[-3:]),
'multiplicity': map(float, summary[18].split()[-3:]),
'anom_completeness': map(float, summary[20].split()[-3:]),
'anom_multiplicity': map(float, summary[21].split()[-3:]),
'anom_correlation': map(float, summary[22].split()[-3:]),
'anom_slope': [float(summary[23].split()[-3])],
'scaling_spacegroup': space_group,
'scaling_unit_cell': unit_cell,
#'text': res_cut,
'text2': anomalous_report
}
# Now create a list for each graph to be plotted.
# This list should have [title, xlabel, ylabels, xcol, ycols, tableNum]
# title is the graph title in the aimless logfile,
# xlabel is the label to be used for the x-axis, ylabels are the labels
# to be used for the data sets in the graph, xcol is the position within
# the table where the x-values are , ycols are the position of the y-vaules,
# and tableNum is the position of the table within the list tables.
graphs = [
['Mn(k) & 0k (theta=0) v. batch', 'image_number', ['Mn(k)', '0k'], 0, [5,6], 0],
['Relative Bfactor v. batch', 'image_number', ['Bfactor'], 0, [4], 0],
['Rmerge v Batch for all runs', 'image_number', ['Rmerge', 'SmRmerge'], 0, [5,12], 1],
['Maximum resolution limit, I/sigma > 1.0', 'image_number', ['MaxRes','SmMaxRes'], 0, [10,13], 1],
['Cumulative multiplicity', 'image_number', ['CMlplc'], 0, [11], 1],
['Imean & RMS Scatter', 'image_number', ['Mn(I)','RMSdev'], 0, [2,3], 1],
['Imean/RMS scatter', 'image_number', ['I/rms'], 0, [4], 1],
['Number of rejects', 'image_number', ['Nrej'], 0, [7], 1],
['Anom & Imean CCs v resolution', 'Dmin (A)', ['CCanom', 'CC1/2'], 1, [3,6], 2],
['RMS correlation ratio', 'Dmin (A)', ['RCRanom'], 1, [5], 2],
#['Imean CCs v resolution', 'Dmin (A)', ['CC_d12', 'CC_d3'], 1, [3,4], 3],
#['Mn(I/sd) v resolution', 'Dmin (A)', ['(I/sd)d12', '(I/sd)d3'], 1, [5,6], 3],
#['Projected Imean CCs v resolution', 'Dmin (A)', ['CCp1', 'CCp3'], 1, [7,8], 3],
['I/sigma, Mean Mn(I)/sd(Mn(I))', 'Dmin (A)', ['I/RMS','Mn(I/sd)'], 1, [12,13], 3],
['Rmerge, Rfull, Rmeas, Rpim v Resolution', 'Dmin (A)', ['Rmerge', 'Rfull', 'Rmeas', 'Rpim'], 1, [3,4,6,7], 3],
['Average I, RMSdeviation and Sd', 'Dmin (A)', ['AvI', 'RMSdev', 'sd'], 1, [9,10,11], 3],
['Fractional bias', 'Dmin (A)', ['FrcBias'], 1, [14], 3],
['Rmerge, Rmeas, Rpim v Resolution', 'Dmin (A)',
['Rmerge', 'RmergeOv', 'Rmeas', 'RmeasOv', 'Rpim', 'RpimOv'], 1, [3,4,7,8,9,10], 4],
['Rmerge v Intensity', 'Imax', ['Rmerge', 'Rmeas', 'Rpim'], 0, [1,3,4], 5],
['Completeness v Resolution', 'Dmin (A)', ['%poss', 'C%poss', 'AnoCmp', 'AnoFrc'], 1, [6,7,9,10], 6],
['Multiplicity v Resolution', 'Dmin (A)', ['Mlpclct', 'AnoMlt'], 1, [8,11], 6],
['Sigma(scatter/SD), within 5 sd', '<I>', ['SdFc'], 1, [7], 7],
['Sigma(scatter/SD, within 5 SD, all and within', '<I>', ['SdF', 'SdFc'], 1, [4,7], 7],
['Rcp v. batch', 'relative frame difference', ['Rcp'], 1, [-1], 8]
]
return(graphs, tables, int_results)
def parse_aimless2(self, logfile):
"""
Parses the aimless logfile in order to pull out data for
graphing and the results summary table.
Relevant values for the summary table are stored in a dict.
Relevant information for creating plots are stored in a dict,
with the following format for each entry (i.e. each plot):
{"<*plot label*>":{
"data":{
"parameters":{<*line parameters*>},
"series":[
{xs : [],
ys : []
}
]
}
"parameters" : {<*plot parameters*>}
}
...
...
}
"""
log = smartie.parselog(logfile)
# Pull out information for the results summary table.
flag = True
summary = log.keytext(0).message().split("\n")
# For some reason "Anomalous flag switched ON" is not always
# found, so the line below creates a blank entry for the
# the variable that should be created when that phrase is
# found, eliminating the problem where the program reports that
# the variable anomalous_report is referenced before assignment.
anomalous_report = ""
for line in summary:
if "Space group" in line:
space_group = line.strip().split(": ")[-1]
elif "Average unit cell" in line:
unit_cell = map(float, line.split()[3:])
elif "Anomalous flag switched ON" in line:
anomalous_report = line
int_results = {
"bins_low": map(float, summary[3].split()[-3:]),
"bins_high": map(float, summary[4].split()[-3:]),
"rmerge_anom": map(float, summary[6].split()[-3:]),
"rmerge_norm": map(float, summary[7].split()[-3:]),
"rmeas_anom": map(float, summary[8].split()[-3:]),
"rmeas_norm": map(float, summary[9].split()[-3:]),
"rpim_anom": map(float, summary[10].split()[-3:]),
"rpim_norm": map(float, summary[11].split()[-3:]),
"rmerge_top": float(summary[12].split()[-3]),
"total_obs": map(int, summary[13].split()[-3:]),
"unique_obs": map(int, summary[14].split()[-3:]),
"isigi": map(float, summary[15].split()[-3:]),
"cc-half": map(float, summary[16].split()[-3:]),
"completeness": map(float, summary[17].split()[-3:]),
"multiplicity": map(float, summary[18].split()[-3:]),
"anom_completeness": map(float, summary[20].split()[-3:]),
"anom_multiplicity": map(float, summary[21].split()[-3:]),
"anom_correlation": map(float, summary[22].split()[-3:]),
"anom_slope": [float(summary[23].split()[-3])],
"scaling_spacegroup": space_group,
"scaling_unit_cell": unit_cell,
"text2": anomalous_report
}
# Smartie can pull table information based on a regular
# expression pattern that matches the table title from
# the aimless log file.
# NOTE : the regular expression must match the beginning
# of the table's title, but does not need to be the entire
# title.
#
# We will use this to pull out the data from tables we are
# interested in.
#
# The beginning of the titles for all common tables in the
# aimless log file are given below, but not all of them
# are currently used to generate a plot.
scales = "=== Scales v rotation"
rfactor = "Analysis against all Batches"
cchalf = "Correlations CC(1/2)"
anisotropy = "Anisotropy analysis"
vresolution = "Analysis against resolution, XDSdataset"
anomalous = "Analysis against resolution, with & without"
intensity = "Analysis against intensity"
completeness = "Completeness & multiplicity"
deviation = "Run 1, standard deviation"
rcp = "Radiation damage"
plots = {
"Rmerge vs Frame": {
"data" :[
{
"parameters" :
{
"linecolor" : "3",
"linelabel" : "Rmerge",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [int(x) for x in log.tables(rfactor)[0].col("N")],
"ys" : [try_float(x, 0.0) for x in log.tables(rfactor)[0].col("Rmerge")]
} ]
},
{
"parameters" :
{
"linecolor" : "4",
"linelabel" : "SmRmerge",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_int(x) for x in log.tables(rfactor)[0].col("N")],
"ys" : [try_float(x, 0.0) for x in log.tables(rfactor)[0].col("SmRmerge")]
} ]
} ],
"parameters" :
{
"toplabel" : "Rmerge vs Batch for all Runs",
"xlabel" : "Image Number"
}
},
"Imean/RMS scatter" :
{
"data" :
[ {
"parameters" :
{
"linecolor" : "3",
"linelabel" : "I/rms",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [int(x) for x in log.tables(rfactor)[0].col("N")],
"ys" : [try_float(x, 0.0) for x in log.tables(rfactor)[0].col("I/rms")]
} ]
} ],
"parameters" :
{
"toplabel" : "Imean / RMS scatter",
"xlabel" : "Image Number"
}
},
"Anomalous & Imean CCs vs Resolution" :
{
"data" :
[ {
"parameters" :
{
"linecolor" : "3",
"linelabel" : "CCanom",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(cchalf)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(cchalf)[0].col("CCanom")]
} ]
},
{
"parameters" :
{
"linecolor" : "4",
"linelabel" : "CC1/2",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(cchalf)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(cchalf)[0].col("CC1/2")]
} ]
} ],
"parameters" :
{
"toplabel" : "Anomalous & Imean CCs vs. Resolution",
"xlabel" : "Dmid (Angstroms)"
}
},
"RMS correlation ration" :
{
"data" :
[ {
"parameters" :
{
"linecolor" : "3",
"linelabel" : "RCRanom",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(cchalf)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(cchalf)[0].col("RCRanom")]
} ]
} ],
"parameters" :
{
"toplabel" : "RMS correlation ratio",
"xlabel" : "Dmid (Angstroms)"
}
},
"I/sigma, Mean Mn(I)/sd(Mn(I))" :
{
"data" :
[ {
"parameters" :
{
"linecolor" : "3",
"linelabel" : "I/RMS",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("I/RMS")]
} ]
},
{
"parameters" :
{
"linecolor" : "4",
"linelabel" : "Mn(I/sd)",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("Mn(I/sd)")]
} ]
} ],
"parameters" :
{
"toplabel" : "I/sigma, Mean Mn(I)/sd(Mn(I))",
"xlabel" : "Dmid (Angstroms)"
}
},
"Rmerge, Rfull, Rmeas, Rpim vs. Resolution" :
{
"data" :
[ {
"parameters" :
{
"linecolor" : "3",
"linelabel" : "Remerge",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("Rmrg")]
} ]
},
{
"parameters" :
{
"linecolor" : "4",
"linelabel" : "Rfull",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("Rfull")]
} ]
},
{
"parameters" :
{
"linecolor" : "5",
"linelabel" : "Rmeas",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("Rmeas")]
} ]
},
{
"parameters" :
{
"linecolor" : "6",
"linelabel" : "Rpim",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("Rpim")]
} ]
} ],
"parameters" :
{
"toplabel" : "Rmerge, Rfull, Rmeas, Rpim vs. Resolution",
"xlabel" : "Dmid (Angstroms)"
}
},
"Average I, RMS deviation, and Sd" :
{
"data" :
[ {
"parameters" :
{
"linecolor" : "3",
"linelabel" : "Average I",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("1/d^2")],
"ys" : [try_int(x, 0) for x in log.tables(vresolution)[0].col("AvI")]
} ]
},
{
"parameters" :
{
"linecolor" : "4",
"linelabel" : "RMS deviation",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("RMSdev")]
} ]
},
{
"parameters" :
{
"linecolor" : "5",
"linelabel" : "std. dev.",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(vresolution)[0].col("sd")]
} ]
} ],
"parameters" :
{
"toplabel" : "Average I, RMS dev., and std. dev.",
"xlabel" : "Dmid (Ansgstroms)"
}
},
"Completeness" :
{
"data" :
[ {
"parameters" :
{
"linecolor" : "3",
"linelabel" : "%poss",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("%poss")]
} ]
},
{
"parameters" :
{
"linecolor" : "4",
"linelabel" : "C%poss",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("C%poss")]
} ]
},
{
"parameters" :
{
"linecolor" : "5",
"linelabel" : "AnoCmp",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("AnoCmp")]
} ]
},
{
"parameters" :
{
"linecolor" : "6",
"linelabel" : "AnoFrc",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("AnoFrc")]
} ]
} ],
"parameters" :
{
"toplabel" : "Completeness vs. Resolution",
"xlabel" : "Dmid (Angstroms)"
}
},
"Redundancy" :
{
"data" :
[ {
"parameters" :
{
"linecolor" : "3",
"linelabel" : "multiplicity",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("Mlplct")]
} ]
},
{
"parameters" :
{
"linecolor" : "4",
"linelabel" : "anomalous multiplicity",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("1/d^2")],
"ys" : [try_float(x, 0.0) for x in log.tables(completeness)[0].col("AnoMlt")]
} ]
} ],
"parameters" :
{
"toplabel" : "Redundancy",
"xlabel" : "Dmid (Angstroms)"
}
},
"Radiation Damage" :
{
"data" :
[ {
"parameters" :
{
"linecolor" : "3",
"linelabel" : "Rcp",
"linetype" : "11",
"linewidth" : "3"
},
"series" :
[ {
"xs" : [ int(x) for x in log.tables(rcp)[0].col("Batch")],
"ys" : [try_float(x, 0.0) for x in log.tables(rcp)[0].col("Rcp")]
} ]
} ],
"parameters" :
{
"toplabel" : "Rcp vs. Batch",
"xlabel" : "Relative frame difference"
}
}
}
# Return to the main program.
return (plots, int_results)
def aimless(self, mtzin, resolution=False):
"""
Runs aimless on the data, including the scaling step.
"""
self.logger.debug('FastIntegration::aimless')
self.tprint(arg=" Running Aimless",
level=99,
color="white")
mtzout = mtzin.replace('pointless', 'aimless')
logfile = mtzout.replace('mtz', 'log')
comfile = mtzout.replace('mtz', 'com')
aimless_file = ['#!/bin/tcsh\n',
#'/share/apps/necat/programs/ccp4-6.3.0/ccp4-6.3.0/bin/aimless hklin %s hklout %s << eof > %s\n' % (mtzin, mtzout, logfile),
'aimless hklin %s hklout %s << eof > %s\n' % (mtzin, mtzout, logfile),
'anomalous on\n',
'scales constant\n',
'sdcorrection norefine full 1 0 0 partial 1 0 0\n',
'cycles 0\n']#, Change made on Feb. 20, 2015 to exclude bins resolution
#'bins resolution 10\n']
if resolution != False:
aimless_file.append('resolution %s\n' % resolution)
aimless_file.append('eof')
self.write_file(comfile, aimless_file)
os.chmod(comfile, stat.S_IRWXU)
cmd = './%s' % comfile
# os.system(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
return logfile
def pointless(self):
"""
Runs pointless on the default reflection file, XDS_ASCII.HKl
to produce an mtz file suitable for input to aimless.
"""
self.logger.debug("FastIntegration::pointless")
self.tprint(arg=" Running Pointless", level=10, color="white")
hklfile = 'XDS_ASCII.HKL'
mtzfile = '_'.join([self.image_data['image_prefix'], 'pointless.mtz'])
logfile = mtzfile.replace('mtz', 'log')
if self.spacegroup:
cmd = ('pointless xdsin %s hklout %s << eof > %s\nSETTING C2\nSPACEGROUP HKLIN\n eof'
% (hklfile, mtzfile, logfile))
else:
cmd = ('pointless xdsin %s hklout %s << eof > %s\n SETTING C2 \n eof'
% (hklfile, mtzfile, logfile))
self.logger.debug("cmd = %s", cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
# sts = os.waitpid(p.pid, 0)[1]
tmp = open(logfile, "r").readlines()
return_value="Failed"
for i in range(-10, -1):
if tmp[i].startswith('P.R.Evans'):
return_value=mtzfile
break
return(return_value)
def parse_xdsstat(self, log, tables_length):
"""
Parses the output of xdsstat (XDSSTAT.LP) to pull out the Rd
information
"""
self.logger.debug('FastIntegration::parsse_xdsstat')
rd_table = []
xdsstat = open(log,'r').readlines()
for line in xdsstat:
if 'DIFFERENCE' in line:
split_line = line.split()
# extract Framediff, R_d, Rd_notfriedel, Rd_friedel.
table_line = [split_line[0], split_line[2], split_line[4], split_line[6] ]
rd_table.append(table_line)
title = 'Rd vs frame_difference'
xlabel = 'Frame Difference'
ylabels = ['Rd', 'Rd_notfriedel', 'Rd_friedel']
xcol = 0
ycols = [1,2,3]
tableNum = tables_length
rd_graph = (title, xlabel, ylabels, xcol, ycols, tableNum)
return(rd_graph, rd_table)
def xdsstat(self):
"""
Runs xdsstat, a program that extracts some extra statistics
from the results of XDS CORRECT.
In order for this to run, xdsstat should be installed in the user's path.
And a script called xdsstat.sh should also be created and available in the path.
Information about the availability of xdssstat can be obtained at the xdswiki:
http://strucbio.biologie.uni-konstanz.de/xdswiki/index.php/Xdsstat#Availability
xdsstat.sh is a simple three line shell script:
#!/bin/tcsh
xdsstat << eof > XDSSTAT.LP
XDS_ASCII.HKL
eof
It runs xdsstat on the default reflection file XDS_ASCII.HKL and sends the
output to the file XDSSTAT.LP
"""
self.logger.debug('FastIntegration::xdsstat')
self.tprint(arg=" Running XDSSTAT", level=10, color="white")
# Check to see if xdsstat exists in the path
test = find_executable("xdsstat.sh")
if test == None:
self.logger.debug(' xdsstat.sh is not in the defined PATH')
# Write xdsstat.sh
xdsststsh = ["#!/bin/bash\n",
"xdsstat << eof > XDSSTAT.LP\n",
"XDS_ASCII.HKL\n",
"eof\n"]
self.write_file("xdsstat.sh", xdsststsh)
os.chmod("./xdsstat.sh", stat.S_IRWXU)
try:
job = Process(target=Utils.processLocal, args=(('xdsstat.sh'), self.logger))
job.start()
while job.is_alive():
time.sleep(1)
except IOError as e:
self.logger.debug(' xdsstat.sh failed to run properly')
self.logger.debug(e)
return('Failed')
if os.path.isfile('XDSSTAT.LP'):
return('XDSSTAT.LP')
else:
self.logger.debug(' XDSSTAT.LP does not exist')
return('Failed')
def finish_data(self, results):
"""
Final creation of various files (e.g. an mtz file with R-flag added,
.sca files with native or anomalous data treatment)
"""
in_file = os.path.join(results['dir'], results['mtzfile'])
self.logger.debug('FastIntegration::finish_data - in_file = %s', in_file)
# Truncate the data.
comfile = ['#!/bin/csh\n',
'truncate hklin %s hklout truncated.mtz << eof > truncate.log\n'
% in_file,
'ranges 60\n',
'eof\n']
self.write_file('truncate.sh', comfile)
os.chmod('truncate.sh', stat.S_IRWXU)
p = subprocess.Popen('./truncate.sh',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
# Set the free R flag.
comfile = ['#!/bin/csh\n',
'freerflag hklin truncated.mtz hklout freer.mtz <<eof > freer.log\n',
'END\n',
'eof']
self.write_file('freer.sh', comfile)
os.chmod('freer.sh', stat.S_IRWXU)
p = subprocess.Popen('./freer.sh',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
# Create the merged scalepack format file.
comfile = ['#!/bin/csh\n',
'mtz2various hklin truncated.mtz hklout NATIVE.sca ',
'<< eof > mtz2scaNAT.log\n',
'OUTPUT SCALEPACK\n',
'labin I=IMEAN SIGI=SIGIMEAN\n',
'END\n',
'eof']
self.write_file('mtz2scaNAT.sh', comfile)
os.chmod('mtz2scaNAT.sh', stat.S_IRWXU)
p = subprocess.Popen('./mtz2scaNAT.sh',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
self.fixMtz2Sca('NATIVE.sca')
Utils.fixSCA(self, 'NATIVE.sca')
# Create the unmerged scalepack format file.
comfile = ['#!/bin/csh\n',
'mtz2various hklin truncated.mtz hklout ANOM.sca ',
'<< eof > mtz2scaANOM.log\n',
'OUTPUT SCALEPACK\n',
'labin I(+)=I(+) SIGI(+)=SIGI(+) I(-)=I(-) SIGI(-)=SIGI(-)\n',
'END\n',
'eof']
self.write_file('mtz2scaANOM.sh', comfile)
os.chmod('mtz2scaANOM.sh', stat.S_IRWXU)
p = subprocess.Popen('./mtz2scaANOM.sh',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
self.fixMtz2Sca('ANOM.sca')
Utils.fixSCA(self, 'ANOM.sca')
# Create a mosflm matrix file
correct_file = os.path.join(results['dir'], 'CORRECT.LP')
Xds2Mosflm(xds_file=correct_file, mat_file="reference.mat")
# Clean up the filesystem.
# Move some files around
if os.path.isdir('%s/xds_lp_files' % self.dirs['work']) == False:
os.mkdir('%s/xds_lp_files' % self.dirs['work'])
os.system('cp %s/*.LP %s/xds_lp_files/' % (results['dir'], self.dirs['work']))
tar_name = '_'.join([self.image_data['image_prefix'], str(self.image_data['run_number'])])
results_dir = os.path.join(self.dirs['work'], tar_name)
if os.path.isdir(results_dir) == False:
os.mkdir(results_dir)
prefix = '%s/%s_%s' %(results_dir, self.image_data['image_prefix'],
self.image_data['run_number'])
os.system('cp freer.mtz %s_free.mtz' % prefix)
os.system('cp NATIVE.sca %s_NATIVE.sca' % prefix)
os.system('cp ANOM.sca %s_ANOM.sca' % prefix)
os.system('cp %s/*aimless.log %s_aimless.log' %(results['dir'], prefix))
os.system('cp %s/*aimless.com %s_aimless.com' %(results['dir'], prefix))
os.system('cp %s/*pointless.mtz %s_mergable.mtz' %(results['dir'], prefix))
os.system('cp %s/*pointless.log %s_pointless.log' %(results['dir'], prefix))
os.system('cp %s/XDS.LOG %s_XDS.LOG' %(results['dir'], prefix))
os.system('cp %s/XDS.INP %s_XDS.INP' %(results['dir'], prefix))
os.system('cp %s/CORRECT.LP %s_CORRECT.LP' %(results['dir'], prefix))
os.system('cp %s/INTEGRATE.LP %s_INTEGRATE.LP' %(results['dir'], prefix))
os.system('cp %s/XDSSTAT.LP %s_XDSSTAT.LP' %(results['dir'], prefix))
os.system('cp %s/XDS_ASCII.HKL %s_XDS.HKL' %(results['dir'], prefix))
# Remove any integration directories.
os.system('rm -rf wedge_*')
# Remove extra files in working directory.
os.system('rm -f *.mtz *.sca *.sh *.log junk_*')
# Create a downloadable tar file.
tar_dir = tar_name
tar_name += '.tar.bz2'
tarname = os.path.join(self.dirs['work'], tar_name)
# print 'tar -cjf %s %s' %(tar_name, tar_dir)
# print os.getcwd()
os.chdir(self.dirs['work'])
# print os.getcwd()
os.system('tar -cjf %s %s' %(tar_name, tar_dir))
# Tarball the XDS log files
lp_name = 'xds_lp_files.tar.bz2'
# print "tar -cjf %s xds_lp_files/" % lp_name
os.system("tar -cjf %s xds_lp_files/" % lp_name)
# Remove xds_lp_files directory
os.system('rm -rf xds_lp_files')
# If ramdisks were used, erase files from ram_disks.
if self.ram_use == True and self.settings['ram_cleanup'] == True:
remove_command = 'rm -rf /dev/shm/%s' % self.image_data['image_prefix']
for node in self.ram_nodes[0]:
command2 = 'ssh -x %s "%s"' % (node, remove_command)
p = subprocess.Popen(command2,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
tmp = results
#if shelxc_results != None:
# tmp['shelxc_results'] = shelxc_results
files = {'mergable' : '%s_mergable.mtz' % prefix,
'mtzfile' : '%s_free.mtz' % prefix,
'ANOM_sca' : '%s_ANOM.sca' % prefix,
'NATIVE_sca' : '%s_NATIVE.sca' % prefix,
'scala_log' : '%s_scala.log' % prefix,
'scala_com' : '%s_scala.com' % prefix,
'xds_data' : '%s_XDS.HKL' % prefix,
'xds_log' : '%s_XDS.LOG' % prefix,
'xds_com' : '%s_XDS.INP' % prefix,
'downloadable' : tarname
}
tmp['files'] = files
return(tmp)
def fixMtz2Sca(self, scafile):
"""
Corrects the scalepack file generated by mtz2various by removing
whitespace in the spacegroup name.
"""
self.logger.debug('FastIntegration::fixMtz2Sca scafile = %s' % scafile)
inlines = open(scafile, 'r').readlines()
symline = inlines[2]
newline = (symline[:symline.index(symline.split()[6])]
+ ''.join(symline.split()[6:]) + '\n')
inlines[2] = newline
self.write_file(scafile, inlines)
return
def run_analysis(self, data_to_analyze, dir):
"""
Runs "pdbquery" and xtriage on the integrated data.
data_to_analyze = the integrated mtzfile
dir = the working integration directory
"""
self.logger.debug('FastIntegration::run_analysis')
self.logger.debug(' data = %s' % data_to_analyze)
self.logger.debug(' dir = %s' % dir)
analysis_dir = os.path.join(dir, 'analysis')
if os.path.isdir(analysis_dir) == False:
os.mkdir(analysis_dir)
run_dict = {'fullname' : self.image_data['fullname'],
# 'fullname' : self.first_image
'total' : self.image_data['total'],
'osc_range' : self.image_data['osc_range'],
'x_beam' : self.image_data['x_beam'],
'y_beam' : self.image_data['y_beam'],
'two_theta' : self.image_data.get("twotheta", 0),
'distance' : self.image_data['distance']
}
pdb_input = []
pdb_dict = {}
pdb_dict['run'] = run_dict
pdb_dict['dir'] = analysis_dir
pdb_dict['data'] = data_to_analyze
pdb_dict["agent_directories"] = self.dirs.get("agent_directories", False)
pdb_dict['control'] = self.controller_address
pdb_dict['process_id'] = self.process_id
pdb_input.append(pdb_dict)
self.logger.debug(' Sending pdb_input to Autostats')
# try:
T = AutoStats(pdb_input, self.logger)
self.logger.debug('I KNOW WHO YOU ARE')
# except:
# self.logger.debug(' Execution of AutoStats failed')
# return('Failed')
return "Success"
# def process_shelxC(self, unitcell, spacegroup, scafile):
# """
# Runs shelxC. Determines an appropriate cutoff for anomalous signal.
# Inserts table of shelxC results into the results summary page.
# """
# self.logger.debug('FastIntegration::process_shelxC')
# command = ('shelxc junk << EOF\nCELL %s\nSPAG %s\nSAD %s\nEOF'
# % (unitcell, spacegroup, scafile) )
# shelx_log = []
# output0 = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT)
# output0.wait()
# for line in output0.stdout:
# shelx_log.append(line.strip())
# self.logger.debug(line)
# results = self.parse_shelxC(shelx_log)
# res = False
# for i,v in enumerate(results['shelx_dsig']):
# dsig = float(v)
# if dsig > 1.0:
# res =results['shelx_res'][i]
# results['shelx_rescut'] = res
# #self.insert_shelx_results(results)
# return results
# def parse_shelxC(self, logfile):
# """
# Parses the shelxc output.
# """
# self.logger.debug('FastIntegration::parse_shelxC')
# shelxc_results={}
# for line in logfile:
# if line.startswith('Resl'):
# if line.split()[2] == '-':
# shelxc_results['shelx_res'] = line.split()[3::2]
# else:
# shelxc_results['shelx_res'] = line.split()[2:]
# #shelxc_results['shelx_res'] = line.split()[3::2]
# shelxc_results['shelx_res'] = line.split()[2:]
# elif line.startswith('N(data)'):
# shelxc_results['shelx_data'] = line.split()[1:]
# elif line.startswith('<I/sig>'):
# shelxc_results['shelx_isig'] = line.split()[1:]
# elif line.startswith('%Complete'):
# shelxc_results['shelx_comp'] = line.split()[1:]
# elif line.startswith('<d"/sig>'):
# shelxc_results['shelx_dsig'] = line.split()[1:]
# return(shelxc_results)
# def insert_shelx_results(self, results):
# """
# Inserts shelxC results into the results summary webpage.
# """
# self.logger.debug('FastIntegration::insert_shelx_results')
#
# htmlfile = open('results.php', 'r').readlines()
# if results['shelx_rescut'] == False:
# text = ('\nAnalysis of ShelxC results finds no resolution shell '
# + 'where d"/sig is greater than 1.0.\n')
# htmlfile.insert(-10, text)
# else:
# text = ('\nAnalsysis of ShelxC results finds d"/sig greater than '
# + '1.0 for at least one resolution shell.\n')
# htmlfile.insert(-10, text)
# shelxc = ('<div align ="center">\n' +
# '<h3 class="green">ShelxC analysis of data</h3>\n' +
# '<table class="integrate">\n' +
# '<tr><th>Resl.</th>')
# for item in results['shelx_res']:
# shelxc += ('<td>%s</td>' % item)
# shelxc += ('</tr>\n<tr class="alt"><th>N(data)</th>')
# for item in results['shelx_data']:
# shelxc += ('<td>%s</td>' % item)
# shelxc +=('</tr>\n<tr><th>IsigI</th>')
# for item in results['shelx_isig']:
# shelxc += ('<td>%s</td>' % item)
# shelxc += ('</tr>\n<tr class="alt"><th>%Complete</th>')
# for item in results['shelx_comp']:
# shelxc += ('<td>%s</td>' % item)
# shelxc += ('</tr>\n<tr><th>d"/sig</th>')
# for item in results['shelx_dsig']:
# shelxc += ('<td>%s</td>' % item)
# shelxc += ('</tr>\n<caption>For zero signal d"/sig should be '
# + 'about 0.80</caption>\n</table></div><br>\n')
# htmlfile.insert(-9, shelxc)
# self.write_file('results.php', htmlfile)
# return
def parse_integrateLP(self):
"""
Parse the INTEGRATE.LP file and extract information
about the mosaicity.
"""
self.logger.debug('FastIntegration::parse_integrateLP')
lp = open('INTEGRATE.LP', 'r').readlines()
for linenum, line in enumerate(lp):
if 'SUGGESTED VALUES FOR INPUT PARAMETERS' in line:
avg_mosaicity_line = lp[linenum + 2]
avg_mosaicity = avg_mosaicity_line.strip().split(' ')[-1]
return(avg_mosaicity)
def parse_correctLP(self):
"""
Parses the CORRECT.LP file to extract information
"""
self.logger.debug('FastIntegration::parse_correctLP')
lp = open('CORRECT.LP', 'r').readlines()
for i, line in enumerate(lp):
if 'ISa\n' in line:
isa_line = lp[i + 1]
break
ISa = isa_line.strip().split()[-1]
return(ISa)
def find_xds_symm(self, xdsdir, xdsinp):
"""
Checks xds results for consistency with user input spacegroup.
If inconsistent, tries to force user input spacegroup on data.
Returns new input file for intgration
"""
sg_num = int(Utils.std2intl[self.spacegroup])
# Change to directory
os.chdir(xdsdir)
new_inp = self.modify_xdsinput_for_symm(xdsinp, sg_num, "IDXREF.LP")
# Make sure we end in the right place
os.chdir(self.dirs['work'])
return new_inp
def modify_xdsinput_for_symm(self, xdsinp, sg_num, logfile):
"""
Modifys the XDS input to rerun integration in user input spacegroup
"""
if sg_num == 1:
bravais = 'aP'
elif sg_num >= 3 <= 4:
bravais = 'mP'
elif sg_num == 5:
bravais = 'mC'
elif sg_num >= 16 <= 19:
bravais = 'oP'
elif sg_num >= 20 <= 21:
bravais = 'oC'
elif sg_num == 22:
bravais = 'oF'
elif sg_num >= 23 <= 24:
bravais = 'oI'
elif sg_num >= 75 <= 78 or sg_num >= 89 <= 96:
bravais = 'tP'
elif sg_num >= 79 <= 80 or sg_num >= 97 <=98:
bravais = 'tI'
elif sg_num >= 143 <= 145 or sg_num >= 149 <= 154 or sg_num >= 168 <= 182:
bravais = 'hP'
elif sg_num == 146 or sg_num == 155:
bravais = 'hR'
elif sg_num == 195 or sg_num == 198 or sg_num >= 207 <= 208 or sg_num >= 212 <= 213:
bravais = 'cP'
elif sg_num == 196 or sg_num >= 209 <= 210:
bravais = 'cF'
elif sg_num == 197 or sg_num == 199 or sg_num == 211 or sg_num == 214:
bravais = 'cI'
# Now search IDXREF.LP for matching cell information.
idxref = open(logfile, 'r').readlines()
for line in idxref:
# print line
if bravais in line and '*' in line:
splitline = line.split()
# print splitline
# print splitline[4:]
break
cell = ('%s %s %s %s %s %s' % tuple(splitline[4:]))
xdsinp[-2] = 'JOB=DEFPIX INTEGRATE CORRECT\n\n'
xdsinp.append('SPACE_GROUP_NUMBER=%d\n' % sg_num)
xdsinp.append('UNIT_CELL_CONSTANTS=%s\n' % cell)
# self.write_file('XDS.INP', xdsinp)
return xdsinp
def print_results(self, results):
"""Print out results to the terminal"""
if isinstance(results, dict):
# Print summary
summary = results["summary"]
# pprint(summary)
self.tprint(" Spacegroup: %s" % summary["scaling_spacegroup"], 99, "white")
self.tprint(" Unit cell: %5.1f %5.1f %5.1f %5.2f %5.2f %5.2f" %
tuple(summary["scaling_unit_cell"]), 99, "white")
self.tprint(" Mosaicity: %5.3f" % summary["mosaicity"], 99, "white")
self.tprint(" overall inner shell outer shell", 99, "white")
self.tprint(" High res limit %5.2f %5.2f %5.2f" %
tuple(summary["bins_high"]), 99, "white")
self.tprint(" Low res limit %5.2f %5.2f %5.2f" %
tuple(summary["bins_low"]), 99, "white")
self.tprint(" Completeness %5.1f %5.1f %5.1f" %
tuple(summary["completeness"]), 99, "white")
self.tprint(" Multiplicity %4.1f %4.1f %4.1f" %
tuple(summary["multiplicity"]), 99, "white")
self.tprint(" I/sigma(I) %4.1f %4.1f %4.1f" %
tuple(summary["isigi"]), 99, "white")
self.tprint(" CC(1/2) %5.3f %5.3f %5.3f" %
tuple(summary["cc-half"]), 99, "white")
self.tprint(" Rmerge %5.3f %5.3f %5.3f" %
tuple(summary["rmerge_norm"]), 99, "white")
self.tprint(" Anom Rmerge %5.3f %5.3f %5.3f" %
tuple(summary["rmerge_anom"]), 99, "white")
self.tprint(" Rmeas %5.3f %5.3f %5.3f" %
tuple(summary["rmeas_norm"]), 99, "white")
self.tprint(" Anom Rmeas %5.3f %5.3f %5.3f" %
tuple(summary["rmeas_anom"]), 99, "white")
self.tprint(" Rpim %5.3f %5.3f %5.3f" %
tuple(summary["rpim_norm"]), 99, "white")
self.tprint(" Anom Rpim %5.3f %5.3f %5.3f" %
tuple(summary["rpim_anom"]), 99, "white")
self.tprint(" Anom Completeness %5.1f %5.1f %5.1f" %
tuple(summary["anom_completeness"]), 99, "white")
self.tprint(" Anom Multiplicity %4.1f %4.1f %4.1f" %
tuple(summary["anom_multiplicity"]), 99, "white")
self.tprint(" Anom Correlation %5.3f %5.3f %5.3f" %
tuple(summary["anom_correlation"]), 99, "white")
self.tprint(" Anom Slope %5.3f" % summary["anom_slope"][0], 99, "white")
self.tprint(" Observations %7d %7d %7d" %
tuple(summary["total_obs"]), 99, "white")
self.tprint(" Unique Observations %7d %7d %7d\n" %
tuple(summary["unique_obs"]), 99, "white")
def print_plots(self, results):
"""
Display plots on the commandline
Possible titles
plot_titles = [
'I/sigma, Mean Mn(I)/sd(Mn(I))',
'Average I, RMS deviation, and Sd',
'Completeness',
'RMS correlation ration',
'Imean/RMS scatter',
'Rmerge, Rfull, Rmeas, Rpim vs. Resolution',
'Radiation Damage',
'Rmerge vs Frame',
'Redundancy',
'Anomalous & Imean CCs vs Resolution'
]
"""
# Plot as long as JSON output is not selected
if self.settings.get("show_plots", True) and (not self.settings.get("json", False)):
plots = results["plots"]
# Determine the open terminal size
term_size = os.popen('stty size', 'r').read().split()
plot_type = "Rmerge vs Frame"
if plot_type in plots:
plot_data = plots[plot_type]["data"]
# plot_params = plots[plot_type]["parameters"]
# Get each subplot
raw = False
# smoothed = False
for subplot in plot_data:
if subplot["parameters"]["linelabel"] == "Rmerge":
raw = subplot
# Determine plot extent
y_array = numpy.array(raw["series"][0]["ys"])
y_max = y_array.max() * 1.1
y_min = 0 # max(0, (y_array.min() - 10))
x_array = numpy.array(raw["series"][0]["xs"])
x_max = x_array.max()
x_min = x_array.min()
gnuplot = subprocess.Popen(["gnuplot"],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
gnuplot.stdin.write("""set term dumb %d,%d
set title 'Rmerge vs. Batch'
set xlabel 'Image #'
set ylabel 'Rmerge' rotate by 90 \n""" %
(int(term_size[1])-20, 30))
# Create the plot string
plot_string = "plot [%d:%d] [%f:%f] " % (x_min, x_max, y_min, y_max)
plot_string += "'-' using 1:2 title 'Rmerge' with lines\n"
# plot_string += "'-' using 1:2 title 'Smooth' with points\n"
gnuplot.stdin.write(plot_string)
# Run through the data and add to gnuplot
for plot in (raw, ): #smoothed):
# plot = plot_data["data"][i]
xs = plot["series"][0]["xs"]
ys = plot["series"][0]["ys"]
# print xs
# print ys
for i, j in zip(xs, ys):
gnuplot.stdin.write("%f %f\n" % (i, j))
gnuplot.stdin.write("e\n")
# Now plot!
gnuplot.stdin.flush()
time.sleep(2)
gnuplot.terminate()
def print_info(self):
"""
Print information regarding programs utilized by RAPD
"""
self.logger.debug('AutoindexingStrategy::print_info')
# try:
self.tprint(arg="\nRAPD integration uses:", level=99, color="blue")
"""
'\n\nRAPD used the following programs for integrating and scaling the dataset:\n',
' XDS - \n',
' "XDS", W. Kabsch (2010) Acta Cryst. D66, 125-132.\n',
' "Integration, scaling, space-group assignment and post-refinement",',
' W. Kabsch (2010) Acta Cryst. D66, 133-144.\n',
' pointless and aimless - \n',
' "Scaling and assessment of data quality", P.R.',
' Evans (2006) Acta Cryst. D62, 72-82.\n',
' "An introduction to data reduction: space-group',
' determination and intensity statistics,',
' P.R. Evans (2011) Acta Cryst. D67, 282-292\n',
' "How good are my data and what is the resolution?"',
' P.R. Evans and G.N. Murshudov (2013) Acta Cryst. D66,',
' 1204-1214.\n',
' truncate, freerflag, and mtz2various - \n',
' "The CCP4 Suite: Programs for Protein ',
'Crystallography". Acta Cryst. D50, 760-763 \n',
' xdsstat - \n http://strucbio.biologie.',
'uni-konstanz.de/xdswiki/index.php/Xdsstat\n',
'\n</pre></div></div></body>'
]
"""
info_string = """ XDS
"XDS", W. Kabsch (2010) Acta Cryst. D66, 125-132.
"Integration, scaling, space-group assignment and post-refinement",
W. Kabsch (2010) Acta Cryst. D66, 133-144.
Pointless & Aimless
"Scaling and assessment of data quality", P.R. Evans (2006) Acta Cryst.
D62, 72-82.
"An introduction to data reduction: space-group determination and
intensity statistics", P.R. Evans (2011) Acta Cryst. D67, 282-292.
"How good are my data and what is the resolution?", P.R. Evans and
G.N. Murshudov (2013) Acta Cryst. D66, 1204-1214.
"""
self.tprint(arg=info_string, level=99, color="white")
self.logger.debug(info_string)
def write_json(self, results):
"""Write a file with the JSON version of the results"""
json_string = json.dumps(results)
# Output to terminal?
if self.settings["json"]:
print json_string
# Write a file
with open("result.json", 'w') as outfile:
outfile.writelines(json_string)
class DataHandler(threading.Thread):
"""
Handles the data that is received from the incoming clientsocket
Creates a new process by instantiating a subclassed multiprocessing.Process
instance which will act on the information which is passed to it upon
instantiation. That class will then send back results on the pipe
which it is passed and Handler will send that up the clientsocket.
"""
def __init__(self, input, tprint=False, logger=False, verbose=True):
threading.Thread.__init__(self)
self.input = input
self.verbose = verbose
# If the logging instance is passed in...
if logger:
self.logger = logger
else:
# Otherwise get the logger Instance
self.logger = logging.getLogger("RAPDLogger")
self.logger.debug("DataHandler.__init__")
# Store tprint for use throughout
if tprint:
self.tprint = tprint
# Dead end if no tprint passed
else:
def func(arg=False, level=False, verbosity=False, color=False):
pass
self.tprint = func
self.start()
def run(self):
# Create a pipe to allow interprocess communication.
#parent_pipe,child_pipe = Pipe()
# Instantiate the integration case
tmp = RapdAgent(None, self.input, self.tprint, self.logger)
# Print out what would be sent back to the RAPD caller via the pipe
# self.logger.debug parent_pipe.recv()
if __name__ == '__main__':
# Set up logging
LOG_FILENAME = '/gpfs5/users/necat/David/process/temp3/fast_integration.logger'
logger = logging.getLogger('RAPDLogger')
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=1000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler
)
# Construct test input
command = 'INTEGRATE'
dirs = { 'images' : \
'/gpfs6/users/necat/test_data/lyso/',
'data_root_dir' : 'gpfs6/users/necat/',
'work' : '/gpfs5/users/necat/David/process/temp3/',
'html' : '/gpfs5/users/necat/David/process/temp3/',
'user' : '/home/dneau/RAPD_testing/test/'}
image_data = {'osc_start' : '0.00',
'osc_range' : '0.10',
'size1' : '2463',
'size2' : '2527',
'image_prefix' : 'lysozym-1',
'beamline' : '24_ID_C',
'ID' : 'lysozym-1_1',
'detector' : 'PILATUS',
'distance' : '380.00',
'x_beam' : '215.1',
'y_beam' : '211.2',
'pixel_size' : '0.172',
'wavelength' : '0.9999',
'run_number' : '1',
'twotheta' : 0.0,
'ccd_image_saturation' : '65535',
'directory' : '/gpfs6/users/necat/test_data/lyso/',
'directory' : '/gpfs6/users/necat/test_data/lyso/',
'process_id' : '0',
'fullname' : \
'/gpfs6/users/yale/Pyle_Aug11/image/marco/GIIi/mm2-2/mm2-2_1_005.img' }
run_data = {'distance' : '380.0',
'image_prefix' : 'lysozym-1',
'run_number' : '1',
'start' : 1,
'time' : 1.0,
'directory' : '/gpfs6/users/necat/test_data/lyso/',
'total' : 500}
data = {'image_data' : image_data,
'run_data' : run_data}
settings = {'spacegroup' : 'P41212',
'work_directory' : '/home/dneau/RAPD_testing/test/mosflm_test',
'work_dir_override' : 'False',
'anomalous' : 'False',
'multiprocessing' : 'True',
'ram_integrate' : False,
'ram_nodes' : [['compute-0-15', 'compute-0-1', 'compute-0-2', 'compute-0-3', 'compute-0-4',
'compute-0-5','compute-0-6', 'compute-0-7', 'compute-0-8', 'compute-0-9',
'compute-0-10', 'compute-0-11', 'compute-0-12', 'compute-0-13',
'compute-0-14'],
[1, 61, 121, 181, 241, 301, 361, 421, 481, 541, 601, 661, 721, 781, 841],
[60, 120, 180, 240, 300, 360, 420, 480, 540, 600, 660, 720, 780, 840, 900]
],
'ram_cleanup' : False
}
controller_address = ['127.0.0.1' , 50001]
input = [command, dirs, data, settings, controller_address]
# Call the handler.
T = DataHandler(input, logger)
| agpl-3.0 | 9,169,712,204,212,176,000 | 41.180723 | 148 | 0.501756 | false |
vencejo/LSystem-en-Minecraft- | math3D.py | 1 | 3574 | ''' Programa basado en el trabajo de Daniel Bates http://www.cl.cam.ac.uk/~db434/
cuyo codigo fuente se puede ver en: http://www.cl.cam.ac.uk/~db434/files/setblockdemo.py '''
from math import sin, cos, radians,degrees, sqrt, pow , acos
class coordinate3d:
"""Class used to represent a point in 3D space."""
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
def __add__(self, other):
return coordinate3d(self.x+other.x, self.y+other.y, self.z+other.z)
def __mul__(self, other):
#Multiplicacion por un escalar
return coordinate3d(self.x*other, self.y*other, self.z*other)
def __str__(self):
return str([self.x, self.y, self.z])
def modulo(self):
return sqrt(pow(self.x,2)+pow(self.y,2)+pow(self.z,2))
class transformation:
"""Representation of homogeneous matrices used to apply transformations to
coordinates - using a 4x4 matrix allows shifts as well as scales/rotations.
Transformations can be combined by multiplying them together."""
def __init__(self, matrix):
self.matrix = matrix
def __mul__(self, other):
if isinstance(other, transformation):
return self.compose(other)
elif isinstance(other, coordinate3d):
return self.apply(other)
else:
print "Can't multiply transformation by {0}".format(type(other))
def compose(self, other):
"""Compose this transformation with another, returning a new transformation."""
newmatrix = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
for i in range(4):
for j in range(4):
for k in range(4):
newmatrix[i][k] += self.matrix[i][j]*other.matrix[j][k]
return transformation(newmatrix)
def apply(self, point):
"""Apply this transformation to a coordinate, returning a new coordinate."""
return coordinate3d(
self.matrix[0][0]*point.x + self.matrix[0][1]*point.y + self.matrix[0][2]*point.z + self.matrix[0][3],
self.matrix[1][0]*point.x + self.matrix[1][1]*point.y + self.matrix[1][2]*point.z + self.matrix[1][3],
self.matrix[2][0]*point.x + self.matrix[2][1]*point.y + self.matrix[2][2]*point.z + self.matrix[2][3])
## Transformation functions
def identity():
return transformation([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]])
def shift(x,y,z):
"""Move by a given offset."""
return transformation([[1,0,0,x],
[0,1,0,y],
[0,0,1,z],
[0,0,0,1]])
def rotationx(angle):
"""Rotate about the x axis by the given number of degrees."""
angle = radians(angle)
return transformation([[1, 0, 0, 0],
[0, cos(angle), sin(angle), 0],
[0, -sin(angle), cos(angle), 0],
[0, 0, 0, 1]])
def rotationy(angle):
"""Rotate about the y axis by the given number of degrees."""
angle = radians(angle)
return transformation([[ cos(angle), 0, sin(angle), 0],
[ 0, 1, 0, 0],
[-sin(angle), 0, cos(angle), 0],
[ 0, 0, 0, 1]])
def rotationz(angle):
"""Rotate about the z axis by the given number of degrees."""
angle = radians(angle)
return transformation([[ cos(angle), sin(angle), 0, 0],
[-sin(angle), cos(angle), 0, 0],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1]])
| gpl-2.0 | 2,883,782,418,861,403,600 | 35.10101 | 110 | 0.547566 | false |
EDUlib/edx-platform | common/djangoapps/student/migrations/0031_auto_20200317_1122.py | 1 | 1461 | # Generated by Django 1.11.29 on 2020-03-17 11:22
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('student', '0030_userprofile_phone_number'),
]
operations = [
migrations.CreateModel(
name='AccountRecoveryConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('csv_file', models.FileField(help_text='It expect that the data will be provided in a csv file format with first row being the header and columns will be as follows: username, email, new_email', upload_to='', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['csv'])])),
('changed_by', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
)
]
| agpl-3.0 | -8,898,915,915,143,417,000 | 47.7 | 355 | 0.622861 | false |
awacha/cct | cct/processinggui/project/subtractor.py | 1 | 13120 | import logging
import re
import typing
from multiprocessing.managers import SyncManager
from multiprocessing.pool import Pool
from PyQt5 import QtCore
from .backgroundrunner import JobRecord, BackgroundRunner
from ...core.processing.subtractingjob import SubtractingJob
logger=logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class SubtractionJobRecord(JobRecord):
ValidMethods = ['None', 'Constant', 'Interval', 'Power-law']
samplename: str
backgroundname: typing.Optional[str]
_scalingmethod: str # 'None', 'Constant', 'Interval', 'Power-law'
scalingparameters: typing.Any
def __init__(self, lockmanager: SyncManager, samplename: str,
backgroundname: typing.Optional[str] = None, scalingmethod: str = 'None',
scalingparameters: typing.Any = None):
super().__init__(lockmanager)
self.samplename = samplename
self.backgroundname = backgroundname
self.scalingmethod = scalingmethod
self.scalingparameters = scalingparameters
@property
def scalingmethod(self) -> str:
return self._scalingmethod
@scalingmethod.setter
def scalingmethod(self, newvalue: str):
if newvalue not in ['None', 'Constant', 'Interval', 'Power-law']:
raise ValueError('Invalid scaling method: "{}" (type: {})'.format(newvalue, type(newvalue)))
self._scalingmethod = newvalue
if newvalue == 'None':
self.scalingparameters = None
elif newvalue == 'Constant':
self.scalingparameters = 0
elif newvalue == 'Interval':
self.scalingparameters = (0, 0, 10) # qmin, qmax, qcount
elif newvalue == 'Power-law':
self.scalingparameters = (0, 0, 10, None) # qmin, qmax, qcount, exponent
else:
assert False
def formatParameters(self) -> str:
if self._scalingmethod == 'None':
return '--'
elif self._scalingmethod == 'Constant':
return '{:.6f}'.format(self.scalingparameters)
elif self._scalingmethod == 'Interval':
return '[{:.3f}, {:.3f}]'.format(*self.scalingparameters)
elif self._scalingmethod == 'Power-law':
return '[{:.3f}, {:.3f}]'.format(*self.scalingparameters)
else:
raise ValueError('Invalid scaling method: {}'.format(self._scalingmethod))
def submit(self, jobid: int, pool: Pool, project: "Project"):
super().submit(jobid, pool, project)
if self.backgroundname is None:
return
self.asyncresult = pool.apply_async(
SubtractingJob.run,
kwds={'jobid': jobid,
'h5writerLock': project.h5Lock,
'killswitch': self.killswitch,
'resultsqueue': self.messageQueue,
'h5file': project.config.hdf5,
'samplename': self.samplename,
'backgroundname': self.backgroundname,
'subtractmode': self.scalingmethod,
'subtractparameters': self.scalingparameters
})
def reap(self, project: "Project"):
self.lastProcessingResult = self.asyncresult.get()
self.statusmessage = 'Finished in {:.2f} seconds.'.format(self.lastProcessingResult.time_total)
self.asyncresult = None
super().reap(project)
def __repr__(self) -> str:
if self.scalingmethod == 'None':
subtractparameters = '()'
elif self.scalingmethod == 'Constant':
subtractparameters = '{:.16g}'.format(self.scalingparameters)
elif self.scalingmethod == 'Interval':
subtractparameters = '(qmin={:.16g}, qmax={:.16g}, qcount={:d})'.format(*self.scalingparameters)
elif self.scalingmethod == 'Power-law':
subtractparameters = '(qmin={:.16g}, qmax={:.16g}, qcount={:d}, exponent={})'.format(
self.scalingparameters[0],
self.scalingparameters[1],
self.scalingparameters[2],
'{:.16g}'.format(self.scalingparameters[3] if self.scalingparameters[3] is not None else 'None')
)
else:
raise ValueError('Unknown scaling method: {}'.format(self.scalingmethod))
return "SubtractionJobRecord(sample='{}', background='{}', mode='{}', params='{}')".format(
self.samplename,
self.backgroundname if self.backgroundname is not None else '-- None --',
self.scalingmethod,
subtractparameters)
@classmethod
def fromString(cls, string: str, lockManager: SyncManager):
m = re.match(r"SubtractionJobRecord\(sample='(?P<samplename>.+)', background='(?P<backgroundname>.+)'"
r", mode='(?P<mode>.+)', params='(?P<params>.+)'\)", string)
if m is None:
raise ValueError('Error interpreting linearized subtraction job record: {}'.format(string))
samplename = m['samplename']
backgroundname = m['backgroundname']
if backgroundname == '-- None --':
backgroundname = None
mode = m['mode']
if mode not in ['None', 'Constant', 'Interval', 'Power-law']:
raise ValueError('Unknown subtraction mode: {}'.format(mode))
if mode == 'None':
if m['params'] != '()':
raise ValueError('Subtraction mode "None" does not need any parameters.')
params = None
elif mode == 'Constant':
params = float(m['params'])
elif mode == 'Interval':
m1 = re.match(r'\(qmin=(?P<qmin>.+), qmax=(?P<qmax>.+), qcount=(?P<qcount>.+)\)', m['params'])
params = (float(m1['qmin']), float(m1['qmax']), int(m1['qcount']))
elif mode == 'Power-law':
m1 = re.match(r'\(qmin=(?P<qmin>.+), qmax=(?P<qmax>.+), qcount=(?P<qcount>.+), exponent=(?P<exponent>)\)',
m['params'])
params = (float(m1['qmin']), float(m1['qmax']), int(m1['qcount']),
None if m1['exponent'] == 'None' else float(m1['exponent']))
else:
assert False
return cls(lockManager, samplename, backgroundname, mode, params)
class Subtractor(BackgroundRunner):
_columnnames = ['Sample', 'Background', 'Scaling method', 'Scaling parameters', 'Result'] # fill this
_jobs = typing.List[SubtractionJobRecord]
def __init__(self, project: "Project"):
super().__init__(project)
self.fromConfig()
def data(self, index: QtCore.QModelIndex, role: int = QtCore.Qt.DisplayRole) -> typing.Any:
if role == QtCore.Qt.DisplayRole:
if index.column() == 0:
return self._jobs[index.row()].samplename
elif index.column() == 1:
return self._jobs[index.row()].backgroundname if self._jobs[
index.row()].backgroundname is not None else '-- None --'
elif index.column() == 2:
return self._jobs[index.row()].scalingmethod
elif index.column() == 3:
return self._jobs[index.row()].formatParameters()
elif index.column() == 4:
return self._jobs[index.row()].statusmessage
elif role == QtCore.Qt.EditRole:
if index.column() == 3:
return self._jobs[index.row()].scalingparameters
return super().data(index, role)
def setData(self, index: QtCore.QModelIndex, value: typing.Any, role: int = None) -> bool:
if index.column() == 3 and role == QtCore.Qt.EditRole:
self._jobs[index.row()].scalingparameters = value
self.dataChanged.emit(self.index(index.row(), index.column()), self.index(index.row(), index.column()))
self.toConfig()
return True
elif index.column() == 1 and role == QtCore.Qt.EditRole:
self._jobs[index.row()].backgroundname = value
self.dataChanged.emit(self.index(index.row(), index.column()), self.index(index.row(), index.column()))
self.toConfig()
return True
elif index.column() == 2 and role == QtCore.Qt.EditRole:
self._jobs[index.row()].scalingmethod = value
self.dataChanged.emit(self.index(index.row(), index.column()), self.index(index.row(), self.columnCount()))
self.toConfig()
return True
return super().setData(index, value, role)
def flags(self, index: QtCore.QModelIndex) -> QtCore.Qt.ItemFlag:
# edit this to your needs
if index.column() == 0:
return QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsUserCheckable
elif index.column() == 1:
return QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled
elif index.column() == 2:
if self[index].backgroundname is not None:
return QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled
else:
return QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable
elif index.column() == 3:
if self[index].backgroundname is not None and self[index].scalingmethod != 'None':
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsEditable
else:
return QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable
elif index.column() == 4:
return QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
def removeRow(self, row: int, parent: QtCore.QModelIndex = None, updateConfig: bool = True) -> bool:
return self.removeRows(row, 1, parent, updateConfig=updateConfig)
def removeRows(self, row: int, count: int, parent: QtCore.QModelIndex = None, updateConfig: bool = True) -> bool:
if isinstance(parent, QtCore.QModelIndex) and parent.isValid():
raise ValueError('This is a flat model')
self.beginRemoveRows(QtCore.QModelIndex(), row, row + count)
for i in reversed(range(row, row + count)):
del self._jobs[i]
self.endRemoveRows()
if updateConfig:
self.toConfig()
return True
def add(self, samplename: str, updateConfig: bool = True):
# edit this to your needs
self.beginInsertRows(QtCore.QModelIndex(), self.rowCount(), self.rowCount() + 1)
self._jobs.append(SubtractionJobRecord(self.project.multiprocessingmanager, samplename))
self.endInsertRows()
if updateConfig:
self.toConfig()
def __contains__(self, item: str):
return bool([d for d in self._jobs if d.samplename == item])
def samplenames(self) -> typing.List[str]:
return sorted(set([d.samplename for d in self._jobs]))
def __getitem__(self, item):
return self._jobs[item] if not isinstance(item, QtCore.QModelIndex) else self._jobs[item.row()]
def _recreateJobs(self):
# we don't need this.
samplenames = sorted(self.project.headerList.samples())
self.fromConfig()
# add missing samples
for sn in samplenames:
if sn not in self:
self.add(sn, updateConfig=False)
# remove invalid samples
for invalidjob in [j for j in self._jobs if j.samplename not in samplenames]:
rowindex = self._jobs.index(invalidjob)
self.beginRemoveRows(QtCore.QModelIndex(), rowindex, rowindex)
self._jobs.remove(invalidjob)
self.endRemoveRows()
# set background name to None where the original background name is now invalid
for invalidbg in [j for j in self._jobs if j.backgroundname not in samplenames]:
rowindex = self._jobs.index(invalidbg)
invalidbg.backgroundname = None
self.dataChanged.emit(self.index(rowindex, 0), self.index(rowindex, self.columnCount()))
self.toConfig()
def updateList(self):
"""Update the list with new sample names and remove invalid ones."""
self._recreateJobs()
def toConfig(self):
logger.debug('Subtractor.toConfig')
if len(list(self.project.headerList.samples())) == 0:
logger.debug('Not clobbering config')
return # do not clobber the config
self.project.config.subtraction = '; '.join([repr(j) for j in self._jobs])
def fromConfig(self):
self.beginResetModel()
self._jobs = []
for string in self.project.config.subtraction.split(';'):
if not string:
continue
self._jobs.append(SubtractionJobRecord.fromString(string.strip(), self.project.multiprocessingmanager))
self.endResetModel()
def configItemChanged(self, section: str, itemname: str, newvalue: typing.Any):
if itemname == 'subtraction':
self.fromConfig()
| bsd-3-clause | -7,995,335,344,592,896,000 | 46.194245 | 136 | 0.603049 | false |
sunnynarayan/Complaint-Redressal | crs/wardenOffice/views.py | 1 | 12845 | from django.shortcuts import render
from django.shortcuts import render, redirect
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.template import RequestContext, loader
from django.contrib import auth
from django.core.context_processors import csrf
from django.contrib.auth.models import User
from django.contrib.auth import authenticate,login,logout
from django.contrib.sessions.models import Session
import hashlib
import datetime
from login.models import *
import re
from student.views import *
def isWardenOffice(request):
user_type = request.session.get("user_type",'')
if user_type != "wardenOffice":
return False
else:
return True
def wardenOfficeComplainView(request):
if not (isWardenOffice(request)):
return redirect('/crs/')
uid=request.session.get('uid')
# PublicComplainObjects = Complainlink.objects.all?().filter(wardenid = uid).filter(studid = 0);
# query1 = 'SELECT * FROM complainLink WHERE woID = ' + str(uid) + ' AND studID = 0'
# query2 = 'SELECT * FROM complainLink WHERE woID = ' + str(uid) + ' AND studID != 0'
query1 = 'SELECT * FROM `complain`, complainLink WHERE (complain.status = 2 OR complain.status = 22 OR complain.status=12 OR complain.status=3 OR complain.status=23 OR complain.status=13) AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID = 0) AND complain.cid = complainLink.CID'
query2 = 'SELECT * FROM `complain`, complainLink WHERE (complain.status = 2 OR complain.status=22 OR complain.status=12 OR complain.status=3 OR complain.status=23 OR complain.status=13) AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID != 0) AND complain.cid = complainLink.CID'
PublicComplainObjects = Complainlink.objects.raw(query1)
PrivateComplainObjects = Complainlink.objects.raw(query2)
# PrivateComplainObjects=Complainlink.objects.all().filter(wardenid = uid).exclude(studid = 0);
Privatelist=[];
Publiclist=[];
for num in PrivateComplainObjects:
numCid=num.cid
Privatelist.append(Complain.objects.get(cid=numCid)); #username in fac table
for num in PublicComplainObjects:
numCid=num.cid
Publiclist.append(Complain.objects.get(cid=numCid))
return render_to_response('wardenOffice/wardenHome.html',{'list1' : Publiclist, 'list2':Privatelist, 'msg': request.session.get('name')});
def wardenOfficeViewComplain(complainObject):
# indexF = request.GET.get('CID')
# index = int(indexF)
# qry = "SELECT * FROM complain a, complainLink b WHERE b.CID = " + str(index) + " AND (b.secID = " + str(request.session.get('uid')) + " OR b.studID = 0 ) AND b.CID = a.cid"
# complainObject = Complain.objects.raw(qry)
# return render_to_response("secretary/complainDetail.html", {'item': complainObject[0]})
comment = []
documents = []
try:
documents.extend(Document.objects.get(cid=complainObject[0].cid))
except:
pass
try:
comment.extend(Comment.objects.filter(cid = complainObject[0].cid))
except:
pass
return render_to_response("wardenOffice/complainDetail.html", {'item': complainObject[0],'documents':documents,'comment':comment})
def wardenOfficeHome(request):
if not (isWardenOffice(request)):
return redirect('/crs/')
return render_to_response('wardenOffice/wardenHome.html', {'msg' : request.session.get('name') });
def forwardToWarden(request):
if not (isWardenOffice(request)):
return redirect('/crs/')
complainArray=request.POST.getlist('complain')
length = len(complainArray)
for x in range(0,length):
comid = complainArray[x]
ClO =Complainlink.objects.get(cid=comid)
hostel=(Complain.objects.get(cid=comid)).hostel
wardenId = (Warden.objects.get(hostel=hostel)).fid
ClO.wardenid = wardenId
obj=Complain.objects.get(cid=ClO.cid)
ClO.save()
if obj.status==2:
obj.status=3
obj.save()
elif obj.status==12:
obj.status=13
obj.save()
else:
obj.status=23
obj.save()
# complainObj.wardenID = wardenID
# complainObj.save()
return redirect('../wardenComplain');
def getHostelType(hostelstr):
if hostelstr == "Ashoka":
return 1
elif hostelstr == "Aryabhatta":
return 2
elif hostelstr == "Chanakya1":
return 3
elif hostelstr == "Chanakya2":
return 4
elif hostelstr == "GBH":
return 5
else:
return 0
def isAddressed(address):
if address == "Addressed":
return 0
elif address == "New":
return 1
else:
return 2
def complainType(typec):
if typec=="Mess":
return 1
elif typec=="Environment":
return 2
elif typec=="Technical":
return 3
elif typec=="Maintenance":
return 4
elif typec=="Mess":
return 5
else:
return 6
def showHostelWiseComplain(request,hostel,isadd):
if not (isWardenOffice(request)):
return redirect('/crs/')
uid=request.session.get('uid')
hostelType = getHostelType(hostel)
isadd=isAddressed(isadd)
if hostelType == 0:
return HttpResponse('error')
if isadd==1:
query1 = 'SELECT * FROM `complain`, complainLink WHERE (complain.status = 2 OR complain.status = 22 OR complain.status=12 OR complain.status=3 OR complain.status=23 OR complain.status=13) AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID = 0) AND complain.cid = complainLink.CID AND complain.hostel = ' + str(hostelType)
query2 = 'SELECT * FROM `complain`, complainLink WHERE (complain.status = 2 OR complain.status=22 OR complain.status=12 OR complain.status=3 OR complain.status=23 OR complain.status=13) AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID != 0) AND complain.cid = complainLink.CID AND complain.hostel = ' + str(hostelType)
elif isadd==0:
query1 = 'SELECT * FROM `complain`, complainLink WHERE complain.status=0 AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID = 0) AND complain.cid = complainLink.CID AND complain.hostel = ' + str(hostelType)
query2 = 'SELECT * FROM `complain`, complainLink WHERE complain.status=0 AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID != 0) AND complain.cid = complainLink.CID AND complain.hostel = ' + str(hostelType)
else:
return HttpResponse('error')
PublicComplainObjects = Complainlink.objects.raw(query1)
PrivateComplainObjects = Complainlink.objects.raw(query2)
# PrivateComplainObjects=Complainlink.objects.all().filter(wardenid = uid).exclude(studid = 0);
Privatelist=[];
Publiclist=[];
for num in PrivateComplainObjects:
numCid=num.cid
Privatelist.append(Complain.objects.get(cid=numCid)); #username in fac table
for num in PublicComplainObjects:
numCid=num.cid
Publiclist.append(Complain.objects.get(cid=numCid));
return render_to_response('wardenOffice/wardenHome.html',{'list1' : Publiclist, 'list2':Privatelist, 'msg': request.session.get('name')});
def showHostelTypeWiseComplain(request,hostel,typeComplain):
if not (isWardenOffice(request)):
return redirect('/crs/')
uid=request.session.get('uid')
hostelType = getHostelType(hostel)
typec = complainType(typeComplain)
if hostelType == 0 or typec==6:
return HttpResponse('error')
query1 = "SELECT * FROM complain, complainLink WHERE (complain.status = 2 OR complain.status = 22 OR complain.status=12 OR complain.status=3 OR complain.status=23 OR complain.status=13) AND (complainLink.woID = " + str(uid) + " AND complainLink.studID = 0) AND complain.cid = complainLink.CID AND complain.hostel = " + str(hostelType) + " AND complain.type = " + str(typec)
query2 = 'SELECT * FROM complain, complainLink WHERE (complain.status = 2 OR complain.status=22 OR complain.status=12 OR complain.status=3 OR complain.status=23 OR complain.status=13) AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID != 0) AND complain.cid = complainLink.CID AND complain.hostel = ' + str(hostelType) + ' AND complain.type = ' + str(typec)
PublicComplainObjects = Complainlink.objects.raw(query1)
PrivateComplainObjects = Complainlink.objects.raw(query2)
PrivateComplainObjects=Complainlink.objects.all().filter(wardenid = uid).exclude(studid = 0);
Privatelist=[];
Publiclist=[];
for num in PrivateComplainObjects:
numCid=num.cid
Privatelist.append(Complain.objects.get(cid=numCid)); #username in fac table
for num in PublicComplainObjects:
numCid=num.cid
Publiclist.append(Complain.objects.get(cid=numCid));
return render_to_response('wardenOffice/wardenHome.html',{'list1' : Publiclist, 'list2':Privatelist, 'msg': request.session.get('name')});
def showHostelAdUnadWiseComplain(request,hostel,typec,isadd):
if not (isWardenOffice(request)):
return redirect('/crs/')
uid=request.session.get('uid')
hostelType = getHostelType(hostel)
typec=complainType(typec)
addressed=isAddressed(isadd)
if hostelType==0 or typec == 6 or addressed == 2:
return HttpResponse('error1')
if addressed==1:
query1 = 'SELECT * FROM complain, complainLink WHERE (complain.status = 2 OR complain.status = 22 OR complain.status=12 OR complain.status=3 OR complain.status=23 OR complain.status=13) AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID = 0) AND complain.cid = complainLink.CID AND complain.hostel = ' + str(hostelType) + ' AND complain.type = ' + str(typec)
query2 = 'SELECT * FROM `complain`, complainLink WHERE (complain.status = 2 OR complain.status=22 OR complain.status=12 OR complain.status=3 OR complain.status=23 OR complain.status=13) AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID != 0) AND complain.cid = complainLink.CID AND complain.hostel = ' + str(hostelType) + ' AND complain.type = ' + str(typec)
elif addressed==0:
query1 = 'SELECT * FROM `complain`, complainLink WHERE complain.status=0 AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID = 0) AND complain.cid = complainLink.CID AND complain.hostel = ' + str(hostelType) + ' AND complain.type = ' + str(typec)
query2 = 'SELECT * FROM `complain`, complainLink WHERE complain.status=0 AND (complainLink.woID = ' + str(uid) + ' AND complainLink.studID != 0) AND complain.cid = complainLink.CID AND complain.hostel = ' + str(hostelType) + ' AND complain.type = ' + str(typec)
else:
return HttpResponse('error2')
PublicComplainObjects = Complainlink.objects.raw(query1)
PrivateComplainObjects = Complainlink.objects.raw(query2)
# PrivateComplainObjects=Complainlink.objects.all().filter(wardenid = uid).exclude(studid = 0);
Privatelist=[];
Publiclist=[];
for num in PrivateComplainObjects:
numCid=num.cid
Privatelist.append(Complain.objects.get(cid=numCid)); #username in fac table
for num in PublicComplainObjects:
numCid=num.cid
Publiclist.append(Complain.objects.get(cid=numCid));
return render_to_response('wardenOffice/wardenHome.html',{'list1' : Publiclist, 'list2':Privatelist, 'msg': request.session.get('name')});
def showHostelSecWiseInfo(request,hostel):
if not (isWardenOffice(request)):
return redirect('/crs/')
uid=request.session.get('uid')
hostelType = getHostelType(hostel)
if hostelType == 0:
return HttpResponse('error')
obj1=Secretary.objects.filter(hostel=hostelType)
stud=[]
for sec in obj1:
stud.append(Student.objects.get(uid=sec.uid))
# obj=Student.objects.filter()
# return HttpResponse(obj)
return render_to_response('wardenOffice/viewSecretary.html',{'list1':obj1,'list2':stud})
def showHostelStudWiseInfo(request,hostel):
if not (isWardenOffice(request)):
return redirect('/crs/')
uid=request.session.get('uid')
hostelType = getHostelType(hostel)
if hostelType == 0:
return HttpResponse('error')
obj=Student.objects.filter(hostel=hostelType)
# return HttpResponse(obj)
return render_to_response('wardenOffice/viewStudent.html',{'list':obj})
def viewSecretary(request):
if not (isWardenOffice(request)):
return redirect('/crs/')
# try:
uid=request.session.get('uid')
ashokaseclist=[];
aryabhattaseclist=[];
chanakya1seclist=[];
chanakya2seclist=[];
test=[1,2,3,4];
for num in test:
ashokaseclist.append(Secretary.objects.filter(hostel = 0).filter(type = num));
aryabhattaseclist.append(Secretary.objects.filter(hostel = 1).filter(type = num));
chanakya1seclist.append(Secretary.objects.filter(hostel = 2).filter(type = num));
chanakya2seclist.append(Secretary.objects.filter(hostel = 3).filter(type = num));
return render_to_response('wardenOffice/wardenOfficeViewComplain.html',{'list1':ashokaseclist, 'list2' :aryabhattaseclist,'list3':chanakya1seclist,'list4':chanakya2seclist});
# except:
# return render_to_response('login/loginPage.html');
# def ForwardComplain(request):
# try:
# uid=request.session.get('uid');
#
# except:
# return render_to_response('login/loginPage.html');
# Create your views here.
| mit | -603,925,967,535,629,800 | 45.224265 | 376 | 0.720125 | false |
tingelst/pymanopt | pymanopt/manifolds/complexcircle.py | 1 | 2567 | from __future__ import division
import warnings
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
from pymanopt.manifolds.manifold import Manifold
class ComplexCircle(Manifold):
"""
The manifold of complex numbers with unit-modulus.
Description of vectors z in C^n (complex) such that each component z(i)
has unit modulus. The manifold structure is the Riemannian submanifold
structure from the embedding space R^2 x ... x R^2, i.e., the complex
circle is identified with the unit circle in the real plane. This
implementation is based on complexcirclefactory.m from the Manopt MATLAB
package.
"""
def __init__(self, n=1):
if n == 1:
self._name = "Complex circle S^1"
else:
self._name = "Complex circle (S^1)^{:d}".format(n)
self._n = n
def __str__(self):
return self._name
@property
def dim(self):
return self._n
def inner(self, z, v, w):
return v.conj().dot(w).real
def norm(self, x, v):
return la.norm(v)
def dist(self, x, y):
return la.norm(np.arccos((x.conj() * y).real))
@property
def typicaldist(self):
return np.pi * np.sqrt(self._n)
def proj(self, z, u):
return u - (u.conj() * z).real * z
tangent = proj
def ehess2rhess(self, z, egrad, ehess, zdot):
return self.proj(z, (z * egrad.conj()).real * zdot)
def exp(self, z, v):
y = np.zeros(self._n)
abs_v = np.abs(v)
mask = abs_v > 0
not_mask = np.logical_not(mask)
y[mask] = (z[mask] * np.cos(abs_v[mask]) +
v[mask] * (np.sin(abs_v[mask]) / abs_v[mask]))
y[not_mask] = z[not_mask]
return y
def retr(self, z, v):
return self._normalize(z + v)
def log(self, x1, x2):
v = self.proj(x1, x2 - x1)
abs_v = np.abs(v)
di = np.arccos((x1.conj() * x2).real)
factors = di / abs_v
factors[di <= 1e-6] = 1
return v * factors
def rand(self):
n = self._n
return self._normalize(rnd.randn(n) + 1j * rnd.randn(n))
def randvec(self, z):
v = rnd.randn(self._n) * (1j * z)
return v / self.norm(z, v)
def transp(self, x1, x2, d):
return self.proj(x2, d)
def pairmean(self, z1, z2):
return self._normalize(z1 + z2)
@staticmethod
def _normalize(x):
"""
Normalize the entries of x element-wise by their absolute values.
"""
return x / np.abs(x)
| bsd-3-clause | -8,596,242,755,595,871,000 | 24.929293 | 76 | 0.558239 | false |
DorianDepriester/mtex2abaqus | MTEX2abaqus/AbaqusImport.py | 1 | 2602 | import string
import csv
import os
from abaqusConstants import *
from part import *
from material import *
from section import *
from assembly import *
from load import *
from mesh import *
from visualization import *
def importEBSD(inpFileName):
while True:
fileName, file_extension = os.path.splitext(inpFileName)
# Load grain properties
try:
file = open(fileName+'.csv', "r")
reader = csv.DictReader(file,delimiter='\t',lineterminator='\n',quoting = csv.QUOTE_NONNUMERIC)
phase=[];Xx=[];Xy=[];Xz=[];Yx=[];Yy=[];Yz=[]
for row in reader:
phase.append(row['Phase'],)
Xx.append(row['Xx'],)
Xy.append(row['Xy'],)
Xz.append(row['Xz'],)
Yx.append(row['Yx'],)
Yy.append(row['Yy'],)
Yz.append(row['Yz'],)
file.close()
except IOError:
print 'Error:',fileName+'.csv','not found.'
break
mdbName=os.path.basename(fileName)
# Import INP file
try:
mdb.ModelFromInputFile(name=mdbName,inputFileName=inpFileName)
pk=mdb.models[mdbName].parts.keys()
partName=pk[0]
except IndexError:
print 'Error:',fileName+'.inp','not found.'
break
# Set the new part visible
p1 = mdb.models[mdbName].parts[partName]
session.viewports['Viewport: 1'].setValues(displayedObject=p1)
# Copy sets from assembly to part
a=mdb.models[mdbName].rootAssembly
sets=a.sets
sets_list=sets.keys()
p = mdb.models[mdbName].parts[partName]
for grainID in sets_list:
set_i=sets[grainID]
if grainID.startswith('GRAIN'):
IDs=[j.label for j in set_i.elements]
p.SetFromElementLabels(elementLabels=IDs,name=grainID)
# Assign properties to elements
phaseList=set(phase)
for i in list(phaseList):
mdb.models[mdbName].Material(name=i)
for i in range(0,len(phase)):
sectionID='GRAIN_{:d}'.format(i+1)
mdb.models[mdbName].HomogeneousSolidSection(name=sectionID, material=phase[i],thickness=None)
region = p.sets[sectionID]
p.SectionAssignment(region=region, sectionName=sectionID,offset=0.0,offsetType=MIDDLE_SURFACE, offsetField='',thicknessAssignment=FROM_SECTION)
datumName='ORIENT_{:d}'.format(i+1)
p.DatumCsysByThreePoints(name=datumName, coordSysType=CARTESIAN, origin=(.0,.0,.0), point1=(Xx[i], Xy[i], Xz[i]), point2=(Yx[i], Yy[i], Yz[i]))
id=p.features[datumName].id
orientation = p.datums[id]
p.MaterialOrientation(region=region,orientationType=SYSTEM,axis=AXIS_3,localCsys=orientation,fieldName='',additionalRotationType=ROTATION_NONE, angle=0.0,additionalRotationField='', stackDirection=STACK_3)
break | mit | -6,537,968,607,731,280,000 | 31.818182 | 208 | 0.684858 | false |
xor-xor/electre_diviz | ElectreTriClassAssignments/ElectreTriClassAssignments.py | 1 | 4490 | #!/usr/bin/env python
"""
ElectreTriClassAssignments - computes assignments according to the Electre TRI
method. It generates separate outputs for the conjuctive ('pessimistic') and
disjunctive ('optimistic') assignments.
Usage:
ElectreTriClassAssignments.py -i DIR -o DIR
Options:
-i DIR Specify input directory. It should contain the following files:
alternatives.xml
classes.xml
classes_profiles.xml
outranking.xml
-o DIR Specify output directory. Files generated as output:
assignments_conjuctive.xml
assignments_disjunctive.xml
--version Show version.
-h --help Show this screen.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import traceback
from docopt import docopt
from common import assignments_to_xmcda, create_messages_file, get_dirs, \
get_error_message, get_input_data, get_relation_type, write_xmcda, Vividict
__version__ = '0.2.0'
def assign_class(alternatives, categories_rank, categories_profiles,
outranking):
# sort categories by their rank, but we want the worst one on the 'left'
# - hence 'reverse=True'
categories = [i[0] for i in sorted(categories_rank.items(),
key=lambda x: x[1], reverse=True)]
exploitation = Vividict()
for alternative in alternatives:
# conjuctive ('pessimistic' - from 'best' to 'worst')
conjuctive_idx = 0
for profile_idx, profile in list(enumerate(categories_profiles))[::-1]:
relation = get_relation_type(alternative, profile, outranking)
if relation in ('indifference', 'preference'):
conjuctive_idx = profile_idx + 1
break
else:
continue
# disjunctive ('optimistic' - from 'worst' to 'best')
disjunctive_idx = len(categories_profiles)
for profile_idx, profile in enumerate(categories_profiles):
relation = get_relation_type(profile, alternative, outranking)
if relation == 'preference':
disjunctive_idx = profile_idx
break
else:
continue
exploitation[alternative] = (categories[conjuctive_idx],
categories[disjunctive_idx])
return exploitation
def main():
try:
args = docopt(__doc__, version=__version__)
output_dir = None
input_dir, output_dir = get_dirs(args)
filenames = [
# every tuple below == (filename, is_optional)
('alternatives.xml', False),
('classes.xml', False),
('classes_profiles.xml', False),
('outranking.xml', False),
]
params = [
'alternatives',
'categories_profiles',
'categories_rank',
'outranking',
]
d = get_input_data(input_dir, filenames, params,
comparison_with='boundary_profiles')
assignments = assign_class(d.alternatives, d.categories_rank,
d.categories_profiles, d.outranking)
# uncomment this if you want output combined as a single file (and
# remember to import assignments_as_intervals_to_xmcda):
# xmcda_intervals = assignments_as_intervals_to_xmcda(assignments)
# write_xmcda(xmcda_intervals,
# os.path.join(output_dir, 'assignments_intervals.xml'))
assignments_con = {i[0]: i[1][0] for i in assignments.iteritems()}
xmcda_con = assignments_to_xmcda(assignments_con)
write_xmcda(xmcda_con, os.path.join(output_dir,
'assignments_conjuctive.xml'))
assignments_dis = {i[0]: i[1][1] for i in assignments.iteritems()}
xmcda_dis = assignments_to_xmcda(assignments_dis)
write_xmcda(xmcda_dis, os.path.join(output_dir,
'assignments_disjunctive.xml'))
create_messages_file(None, ('Everything OK.',), output_dir)
return 0
except Exception, err:
err_msg = get_error_message(err)
log_msg = traceback.format_exc()
print(log_msg.strip())
create_messages_file((err_msg, ), (log_msg, ), output_dir)
return 1
if __name__ == '__main__':
sys.exit(main())
| mit | 3,253,285,074,870,855,700 | 37.376068 | 82 | 0.58686 | false |
zlohner/TournamentOrganizer | model/player.py | 1 | 2904 | #!/usr/bin/env python3
WIN_MATCH_POINTS = 3
LOSE_MATCH_POINTS = 0
DRAW_MATCH_POINTS = 1
import sys
import random
import numpy
class Player(object):
def __init__(self, name, user):
self.name = name
self.user = user
self.match_wins = 0
self.match_losses = 0
self.match_draws = 0
self.game_wins = 0
self.game_losses = 0
self.game_draws = 0
self.opponents = set()
self.byes = 0
self.sort_constant = random.randint(1, sys.maxsize)
def add_record(self, record):
game_wins, game_losses, game_draws = record
self.game_wins += game_wins
self.game_losses += game_losses
self.game_draws += game_draws
def record_win(self, record):
self.add_record(record)
self.match_wins += 1
self.user.match_wins += 1
def record_loss(self, record):
self.add_record(record)
self.match_losses += 1
self.user.match_losses += 1
def record_draw(self, record):
self.add_record(record)
self.match_draws += 1
self.user.match_draws += 1
def match_points(self):
return \
WIN_MATCH_POINTS * self.match_wins + \
DRAW_MATCH_POINTS * self.match_draws + \
LOSE_MATCH_POINTS * self.match_losses
def match_win_percent(self):
matches = self.match_wins + self.match_losses + self.match_draws
if matches == 0:
return 0
else:
return float(self.match_wins) / float(matches)
def game_win_percent(self):
games = self.game_wins + self.game_losses + self.game_draws
if games == 0:
return 0
else:
return float(self.game_wins) / float(games)
def played(self, player):
return player in self.opponents
def __eq__(self, other):
return other != None and self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
val = 0
for c in self.name:
val += ord(c)
val *= 31
return val
def __lt__(self, other):
self_OMWP = 1
other_OMWP = 1
if len(self.opponents) > 0:
self_OMWP = numpy.mean([opp.match_win_percent() for opp in self.opponents])
if len(other.opponents) > 0:
other_OMWP = numpy.mean([opp.match_win_percent() for opp in other.opponents])
self_GWP = self.game_win_percent()
other_GWP = other.game_win_percent()
if self.match_points() > other.match_points():
return True
elif self.match_points() == other.match_points() \
and self_OMWP > other_OMWP:
return True
elif self.match_points() == other.match_points() \
and self_OMWP == other_OMWP \
and self_GWP > other_GWP:
return True
elif self.match_points() == other.match_points() \
and self_OMWP == other_OMWP \
and self_GWP == other_GWP \
and self.sort_constant < other.sort_constant:
return True
else:
return False
def record_str(self):
return str(self.match_wins) + '-' + str(self.match_losses) + '-' + str(self.match_draws)
def formatted(self):
return self.name + '\t\t ' + self.record_str()
def __str__(self):
return '(' + self.name + ' - ' + self.record_str() + ')'
| gpl-3.0 | 1,534,049,206,389,733,400 | 23.610169 | 90 | 0.65668 | false |
AltSchool/django-allauth | allauth/account/adapter.py | 1 | 19322 | from __future__ import unicode_literals
import hashlib
import json
import time
import warnings
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import (
authenticate,
get_backends,
login as django_login,
logout as django_logout,
)
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.password_validation import validate_password
from django.contrib.sites.shortcuts import get_current_site
from django.core.cache import cache
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import resolve_url
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from allauth.compat import force_str, ugettext_lazy as _
from ..utils import (
build_absolute_uri,
email_address_exists,
generate_unique_username,
get_user_model,
import_attribute,
)
from . import app_settings
class DefaultAccountAdapter(object):
error_messages = {
'username_blacklisted':
_('Username can not be used. Please use other username.'),
'username_taken':
AbstractUser._meta.get_field('username').error_messages['unique'],
'too_many_login_attempts':
_('Too many failed login attempts. Try again later.'),
'email_taken':
_("A user is already registered with this e-mail address."),
}
def __init__(self, request=None):
self.request = request
def stash_verified_email(self, request, email):
request.session['account_verified_email'] = email
def unstash_verified_email(self, request):
ret = request.session.get('account_verified_email')
request.session['account_verified_email'] = None
return ret
def stash_user(self, request, user):
request.session['account_user'] = user
def unstash_user(self, request):
return request.session.pop('account_user', None)
def is_email_verified(self, request, email):
"""
Checks whether or not the email address is already verified
beyond allauth scope, for example, by having accepted an
invitation before signing up.
"""
ret = False
verified_email = request.session.get('account_verified_email')
if verified_email:
ret = verified_email.lower() == email.lower()
return ret
def format_email_subject(self, subject):
prefix = app_settings.EMAIL_SUBJECT_PREFIX
if prefix is None:
site = get_current_site(self.request)
prefix = "[{name}] ".format(name=site.name)
return prefix + force_str(subject)
def get_from_email(self):
"""
This is a hook that can be overridden to programatically
set the 'from' email address for sending emails
"""
return settings.DEFAULT_FROM_EMAIL
def render_mail(self, template_prefix, email, context):
"""
Renders an e-mail to `email`. `template_prefix` identifies the
e-mail that is to be sent, e.g. "account/email/email_confirmation"
"""
subject = render_to_string('{0}_subject.txt'.format(template_prefix),
context)
# remove superfluous line breaks
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
from_email = self.get_from_email()
bodies = {}
for ext in ['html', 'txt']:
try:
template_name = '{0}_message.{1}'.format(template_prefix, ext)
bodies[ext] = render_to_string(template_name,
context).strip()
except TemplateDoesNotExist:
if ext == 'txt' and not bodies:
# We need at least one body
raise
if 'txt' in bodies:
msg = EmailMultiAlternatives(subject,
bodies['txt'],
from_email,
[email])
if 'html' in bodies:
msg.attach_alternative(bodies['html'], 'text/html')
else:
msg = EmailMessage(subject,
bodies['html'],
from_email,
[email])
msg.content_subtype = 'html' # Main content is now text/html
return msg
def send_mail(self, template_prefix, email, context):
msg = self.render_mail(template_prefix, email, context)
msg.send()
def get_login_redirect_url(self, request):
"""
Returns the default URL to redirect to after logging in. Note
that URLs passed explicitly (e.g. by passing along a `next`
GET parameter) take precedence over the value returned here.
"""
assert request.user.is_authenticated
url = getattr(settings, "LOGIN_REDIRECT_URLNAME", None)
if url:
warnings.warn("LOGIN_REDIRECT_URLNAME is deprecated, simply"
" use LOGIN_REDIRECT_URL with a URL name",
DeprecationWarning)
else:
url = settings.LOGIN_REDIRECT_URL
return resolve_url(url)
def get_logout_redirect_url(self, request):
"""
Returns the URL to redirect to after the user logs out. Note that
this method is also invoked if you attempt to log out while no users
is logged in. Therefore, request.user is not guaranteed to be an
authenticated user.
"""
return resolve_url(app_settings.LOGOUT_REDIRECT_URL)
def get_email_confirmation_redirect_url(self, request):
"""
The URL to return to after successful e-mail confirmation.
"""
if request.user.is_authenticated:
if app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL:
return \
app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL
else:
return self.get_login_redirect_url(request)
else:
return app_settings.EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL
def is_open_for_signup(self, request):
"""
Checks whether or not the site is open for signups.
Next to simply returning True/False you can also intervene the
regular flow by raising an ImmediateHttpResponse
"""
return True
def new_user(self, request):
"""
Instantiates a new User instance.
"""
user = get_user_model()()
return user
def populate_username(self, request, user):
"""
Fills in a valid username, if required and missing. If the
username is already present it is assumed to be valid
(unique).
"""
from .utils import user_username, user_email, user_field
first_name = user_field(user, 'first_name')
last_name = user_field(user, 'last_name')
email = user_email(user)
username = user_username(user)
if app_settings.USER_MODEL_USERNAME_FIELD:
user_username(
user,
username or self.generate_unique_username([
first_name,
last_name,
email,
username,
'user']))
def generate_unique_username(self, txts, regex=None):
return generate_unique_username(txts, regex)
def save_user(self, request, user, form, commit=True):
"""
Saves a new `User` instance using information provided in the
signup form.
"""
from .utils import user_username, user_email, user_field
data = form.cleaned_data
first_name = data.get('first_name')
last_name = data.get('last_name')
email = data.get('email')
username = data.get('username')
user_email(user, email)
user_username(user, username)
if first_name:
user_field(user, 'first_name', first_name)
if last_name:
user_field(user, 'last_name', last_name)
if 'password1' in data:
user.set_password(data["password1"])
else:
user.set_unusable_password()
self.populate_username(request, user)
if commit:
# Ability not to commit makes it easier to derive from
# this adapter by adding
user.save()
return user
def clean_username(self, username, shallow=False):
"""
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen.
"""
for validator in app_settings.USERNAME_VALIDATORS:
validator(username)
# TODO: Add regexp support to USERNAME_BLACKLIST
username_blacklist_lower = [ub.lower()
for ub in app_settings.USERNAME_BLACKLIST]
if username.lower() in username_blacklist_lower:
raise forms.ValidationError(
self.error_messages['username_blacklisted'])
# Skipping database lookups when shallow is True, needed for unique
# username generation.
if not shallow:
from .utils import filter_users_by_username
if filter_users_by_username(username).exists():
user_model = get_user_model()
username_field = app_settings.USER_MODEL_USERNAME_FIELD
error_message = user_model._meta.get_field(
username_field).error_messages.get('unique')
if not error_message:
error_message = self.error_messages['username_taken']
raise forms.ValidationError(
error_message,
params={
'model_name': user_model.__name__,
'field_label': username_field,
}
)
return username
def clean_email(self, email):
"""
Validates an email value. You can hook into this if you want to
(dynamically) restrict what email addresses can be chosen.
"""
return email
def clean_password(self, password, user=None):
"""
Validates a password. You can hook into this if you want to
restric the allowed password choices.
"""
min_length = app_settings.PASSWORD_MIN_LENGTH
if min_length and len(password) < min_length:
raise forms.ValidationError(_("Password must be a minimum of {0} "
"characters.").format(min_length))
validate_password(password, user)
return password
def validate_unique_email(self, email):
if email_address_exists(email):
raise forms.ValidationError(self.error_messages['email_taken'])
return email
def add_message(self, request, level, message_template,
message_context=None, extra_tags=''):
"""
Wrapper of `django.contrib.messages.add_message`, that reads
the message text from a template.
"""
if 'django.contrib.messages' in settings.INSTALLED_APPS:
try:
if message_context is None:
message_context = {}
message = render_to_string(message_template,
message_context).strip()
if message:
messages.add_message(request, level, message,
extra_tags=extra_tags)
except TemplateDoesNotExist:
pass
def ajax_response(self, request, response, redirect_to=None, form=None,
data=None):
resp = {}
status = response.status_code
if redirect_to:
status = 200
resp['location'] = redirect_to
if form:
if request.method == 'POST':
if form.is_valid():
status = 200
else:
status = 400
else:
status = 200
resp['form'] = self.ajax_response_form(form)
if hasattr(response, 'render'):
response.render()
resp['html'] = response.content.decode('utf8')
if data is not None:
resp['data'] = data
return HttpResponse(json.dumps(resp),
status=status,
content_type='application/json')
def ajax_response_form(self, form):
form_spec = {
'fields': {},
'field_order': [],
'errors': form.non_field_errors()
}
for field in form:
field_spec = {
'label': force_str(field.label),
'value': field.value(),
'help_text': force_str(field.help_text),
'errors': [
force_str(e) for e in field.errors
],
'widget': {
'attrs': {
k: force_str(v)
for k, v in field.field.widget.attrs.items()
}
}
}
form_spec['fields'][field.html_name] = field_spec
form_spec['field_order'].append(field.html_name)
return form_spec
def login(self, request, user):
# HACK: This is not nice. The proper Django way is to use an
# authentication backend
if not hasattr(user, 'backend'):
from .auth_backends import AuthenticationBackend
backends = get_backends()
backend = None
for b in backends:
if isinstance(b, AuthenticationBackend):
# prefer our own backend
backend = b
break
elif not backend and hasattr(b, 'get_user'):
# Pick the first vald one
backend = b
backend_path = '.'.join([backend.__module__,
backend.__class__.__name__])
user.backend = backend_path
django_login(request, user)
def logout(self, request):
django_logout(request)
def confirm_email(self, request, email_address):
"""
Marks the email address as confirmed on the db
"""
email_address.verified = True
email_address.set_as_primary(conditional=True)
email_address.save()
def set_password(self, user, password):
user.set_password(password)
user.save()
def get_user_search_fields(self):
user = get_user_model()()
return filter(lambda a: a and hasattr(user, a),
[app_settings.USER_MODEL_USERNAME_FIELD,
'first_name', 'last_name', 'email'])
def is_safe_url(self, url):
from django.utils.http import is_safe_url
return is_safe_url(url, allowed_hosts=None)
def get_email_confirmation_url(self, request, emailconfirmation):
"""Constructs the email confirmation (activation) url.
Note that if you have architected your system such that email
confirmations are sent outside of the request context `request`
can be `None` here.
"""
url = reverse(
"account_confirm_email",
args=[emailconfirmation.key])
ret = build_absolute_uri(
request,
url)
return ret
def send_confirmation_mail(self, request, emailconfirmation, signup):
current_site = get_current_site(request)
activate_url = self.get_email_confirmation_url(
request,
emailconfirmation)
ctx = {
"user": emailconfirmation.email_address.user,
"activate_url": activate_url,
"current_site": current_site,
"key": emailconfirmation.key,
}
if signup:
email_template = 'account/email/email_confirmation_signup'
else:
email_template = 'account/email/email_confirmation'
self.send_mail(email_template,
emailconfirmation.email_address.email,
ctx)
def respond_user_inactive(self, request, user):
return HttpResponseRedirect(
reverse('account_inactive'))
def respond_email_verification_sent(self, request, user):
return HttpResponseRedirect(
reverse('account_email_verification_sent'))
def _get_login_attempts_cache_key(self, request, **credentials):
site = get_current_site(request)
login = credentials.get('email', credentials.get('username', ''))
login_key = hashlib.sha256(login.encode('utf8')).hexdigest()
return 'allauth/login_attempts@{site_id}:{login}'.format(
site_id=site.pk,
login=login_key)
def pre_authenticate(self, request, **credentials):
if app_settings.LOGIN_ATTEMPTS_LIMIT:
cache_key = self._get_login_attempts_cache_key(
request, **credentials)
login_data = cache.get(cache_key, None)
if login_data:
dt = timezone.now()
current_attempt_time = time.mktime(dt.timetuple())
if (len(login_data) >= app_settings.LOGIN_ATTEMPTS_LIMIT and
current_attempt_time < (
login_data[-1] +
app_settings.LOGIN_ATTEMPTS_TIMEOUT)):
raise forms.ValidationError(
self.error_messages['too_many_login_attempts'])
def authenticate(self, request, **credentials):
"""Only authenticates, does not actually login. See `login`"""
from allauth.account.auth_backends import AuthenticationBackend
self.pre_authenticate(request, **credentials)
AuthenticationBackend.unstash_authenticated_user()
user = authenticate(request, **credentials)
alt_user = AuthenticationBackend.unstash_authenticated_user()
user = user or alt_user
if user and app_settings.LOGIN_ATTEMPTS_LIMIT:
cache_key = self._get_login_attempts_cache_key(
request, **credentials)
cache.delete(cache_key)
else:
self.authentication_failed(request, **credentials)
return user
def authentication_failed(self, request, **credentials):
if app_settings.LOGIN_ATTEMPTS_LIMIT:
cache_key = self._get_login_attempts_cache_key(
request, **credentials
)
data = cache.get(cache_key, [])
dt = timezone.now()
data.append(time.mktime(dt.timetuple()))
cache.set(cache_key, data, app_settings.LOGIN_ATTEMPTS_TIMEOUT)
def is_ajax(self, request):
return request.is_ajax()
def get_adapter(request=None):
return import_attribute(app_settings.ADAPTER)(request)
| mit | -2,770,081,460,482,284,500 | 36.518447 | 78 | 0.568782 | false |
albertoconnor/apisdk | relateiq/lists.py | 1 | 3641 | from .riq_obj import RIQObject
from .riq_base import RIQBase
from .listitems import ListItems
from .listitem import ListItem
# TODO: Add version, externalId, category
# TODO: Payload exception if missing required fields
class List(RIQObject,RIQBase) :
# Object Attributes
_id = None
_modifiedDate = None
_title = None
_listType = None
_fields = None
_size = None
def __init__(self, _id=None, title=None, modifiedDate=None, fields=None, data=None) :
if data != None :
self.parse(data)
elif self.id(_id) != None :
self.get()
self.title(title)
self.modifiedDate(modifiedDate)
self.fields(fields)
self.ListItems = ListItems(self)
@classmethod
def node(cls) :
return 'lists'
def parse(self,data) :
self.id(data.get('id',None))
self.modifiedDate(data.get('modifiedDate',None))
self.title(data.get('title',None))
self.listType(data.get('listType',None))
self.fields(data.get('fields',None))
self.size(data.get('size', None))
return self
# Data Payload
def payload(self) :
payload = {
'title' : self.title(),
'fields' : self.fields()
}
if self.id() != None :
payload['id'] = self.id()
return payload
# Hybrid
def id(self,value=None) :
if value != None :
self._id = value
return self._id
def modifiedDate(self,value=None) :
if value != None :
self._modifiedDate = value
return self._modifiedDate
def title(self,value=None) :
if value != None :
self._title = value
return self._title
def listType(self,value=None) :
if value != None :
self._listType = value
return self._listType
def fields(self,value=None) :
if value != None :
self._fields = value
return self._fields or []
def size(self, value=None):
if value != None:
self._size = value
return self._size
# Sub Endpoints
def ListItem(self,*args,**kwargs) :
kwargs['parent'] = self
return ListItem(*args,**kwargs)
# Lookup Functions
# Convert a field name to a field key (eg "Status" --> "0")
def fieldKey(self,name) :
#if the "name" is already a key, just return it
for field in self.fields() :
if field.get('id',None) == name :
return name
#otherwise, find the field whose "name" is name, and return that field's id
for field in self.fields() :
if field.get('name',None) == name :
return field.get('id',name)
#print "[WARN] Field is a Linked Field and has no Schema in List: " + name
return name
def fieldValue(self,key,value=None) :
for field in self.fields() :
if field.get('id',None) == key :
return key
for field in self.fields() :
if field.get('display',None) == key :
return field.get('id',key)
return key
def fieldOption(self,key,value=None) :
for field in self.fields() :
if field.get('id',None) == key :
return key
for field in self.fields() :
if field.get('display',None) == key :
return field.get('id',key)
return key
def fieldMap(self):
return {field["id"]: field for field in self._fields}
def fetchListSize(self):
self.get({"includeSize" : True})
return self.size() | apache-2.0 | -4,409,821,769,299,002,400 | 27.677165 | 89 | 0.552046 | false |
hgn/hippod | hippod/report_generator.py | 1 | 27079 | import os
import shutil
import markdown
import tempfile
import datetime
import json
import re
import tempfile
import logging
import glob
import hippod.api_shared
import hippod.error_object
log = logging.getLogger()
class ReportGenerator(object):
LAST_ACHIEVEMENTS = 1
FILTER_BY_ANCHOR = 2
FILTER_BY_CHOICE = 3
PDF = 4
@staticmethod
def generate(app, outputs, report_filter, report_meta):
reports_path = app['REPORT_PATH']
tmp_path = os.path.join(app['DB_ROOT_PATH'], 'tmp')
if not os.path.isdir(tmp_path):
os.mkdir(tmp_path)
list_of_lists = ReportGenerator.ReportGeneratorCollector.search(app, report_filter, report_meta)
date = str(datetime.datetime.now().replace(second=0, microsecond=0).isoformat(sep='-'))
doc_name = '{}-report.pdf'.format(date)
pdf_out_path = os.path.join(reports_path, doc_name)
rpd = ReportGenerator.ReportGeneratorDocument(list_of_lists, tmp_path)
stored_data = rpd.store_data_in_tmp(app)
rpd.generate_pdf(app, pdf_out_path, stored_data)
class ReportGeneratorDocument(object):
def __init__(self, list_of_lists, tmp_path):
self.list_of_lists = list_of_lists
# self.tmp_path = tempfile.TemporaryFile()
self.tmp_path = tmp_path
# def __del__(self):
# shutil.rmtree(self.tmp_path)
def get_format_snippet(app, data_id):
db_path = app['DB_DATA_PATH']
attr_path = os.path.join(db_path, data_id, 'attr.db')
with open(attr_path, 'r') as f:
content = json.load(f)
snippet_format = content['mime-type'].split('-')[-1]
return snippet_format
def get_dst_path(self, data_type, sub_dir, name):
if data_type == '.png':
dst_path = os.path.join(sub_dir, '{}.png'.format(name))
elif data_type == '.jpg':
dst_path = os.path.join(sub_dir, '{}.jpg'.format(name))
elif data_type == '.jpeg':
dst_path = os.path.join(sub_dir, '{}.jpeg'.format(name))
elif data_type == '.gif':
dst_path = os.path.join(sub_dir, '{}.gif'.format(name))
else:
# FIXME: not sure, but this function should return. If
# not dst_path is undefined and will definitly crash some
# lines later!
log.error("data type not supported: {}".format(data_type))
return None
return dst_path
def copy_to_dst(self, src_path, dst_path):
with open(src_path, 'rb') as file:
data = file.read()
decoded = hippod.hasher.decode_base64_data(data)
with open(dst_path, 'wb') as file:
file.write(decoded)
with open(dst_path, 'wb') as dst:
shutil.copyfile(src_path, dst_path)
def store_data(self, app, data, sub_dir):
src_path = os.path.join(app['DB_DATA_PATH'], data['data-id'], 'blob.bin')
src_path_snippet = os.path.join(app['DB_SNIPPET_PATH'], '{}.png'.format(data['data-id']))
if not os.path.isdir(sub_dir):
os.mkdir(sub_dir)
# check whether data is a image or description or snippet
if 'type' not in data:
head, tail = os.path.split(data['name'])
name, data_type = os.path.splitext(tail)
dst_path = self.get_dst_path(data_type, sub_dir, name)
if not dst_path:
return None
self.copy_to_dst(src_path, dst_path)
elif data['type'] == 'description':
dst_path = os.path.join(sub_dir, 'description.md')
with open(dst_path, 'wb') as dst:
shutil.copyfile(src_path, dst_path)
elif data['type'] == 'snippet':
# in case of snippet root of src is snippet_db
if 'name' in data:
head, tail = os.path.split(data['name'])
name, data_type = os.path.splitext(tail)
else:
name = data['data-id']
data_type = get_format_snippet(app, data['data-id'])
src_path = os.path.join(app['DB_SNIPPET_PATH'], '{}{}'.format(data['data-id'], data_type))
dst_path = self.get_dst_path(data_type, sub_dir, name)
if not dst_path:
return None
self.copy_to_dst(src_path, dst_path)
# else:
# FIXME: error handling
return dst_path
def store_achievement(self, app, achievement_path, sub_dir):
with open(achievement_path, 'r') as achiev:
content = json.load(achiev)
if not os.path.isdir(sub_dir):
os.mkdir(sub_dir)
# check whether data is achievement like
if 'result' in content:
dst_path = os.path.join(sub_dir, 'achievement.db')
with open(dst_path, 'w') as dst:
content = json.dumps(content, sort_keys=True,indent=4,
separators=(',', ': '))
dst.write(content)
return dst_path
else:
return None
def store_attachment(self, app, attachment_path, sub_dir):
with open(attachment_path, 'r') as attach:
content = json.load(attach)
if not os.path.isdir(sub_dir):
os.mkdir(sub_dir)
dst_path = os.path.join(sub_dir, 'attachment.db')
with open(dst_path, 'w') as dst:
content = json.dumps(content, sort_keys=True,indent=4,
separators=(',', ': '))
dst.write(content)
return dst_path
def get_achievement_content(self, achievement_path):
with open(achievement_path) as achievement:
content = json.load(achievement)
return content
def get_attachment_content(self, attachment_path):
default_attach = dict()
default_attach['responsible'] = 'anonymous'
if not attachment_path:
return default_attach
with open(attachment_path) as attach:
content = json.load(attach)
return content
def add_data(self, description_path, file_path):
with open(description_path, 'r') as file:
description = file.read()
with open(description_path, 'w') as file:
description = str(description) + '\n' + ''.format(file_path) + '\n'
file.write(description)
def design_description(self, achievement_content, categories, attach_content, title):
result = achievement_content['result']
submitter = achievement_content['submitter']
test_date = achievement_content['test-date']
categories = categories
responsible = attach_content['responsible']
description = '# {} #\n\n'.format(title)
description += '----------------------- ----------\n'
description += '**Test Result** {}\n'.format(result)
description += '**Categories** {}\n'.format(categories)
description += '**Submitter** {}\n'.format(submitter)
description += '**Responsible** {}\n'.format(responsible)
description += '**Test-Date** {}\n'.format(test_date)
description += '----------------------- ----------\n\n'
for data in achievement_content['data-references']:
description += '\n\n'.format(data)
return description
def add_achievement(self, description_path, achievement_path, title, \
achievement_data, attachment_path, categories):
attach_content = self.get_attachment_content(attachment_path)
achievement_content = self.get_achievement_content(achievement_path)
achievement_content['data-references'] = achievement_data
if description_path == None:
# remove '/achievement.db' of the path and create a 'description.md' file in this directory
tmp_item_path = os.path.dirname(achievement_path)
description_path = os.path.join(tmp_item_path, 'description.md')
with open(description_path, 'w') as file:
descr = self.design_description(achievement_content, categories, attach_content, title)
file.write(descr)
return description_path
else:
with open(description_path, 'r') as file:
description_only = file.read()
with open(description_path, 'w') as file:
descr = self.design_description(achievement_content, categories, attach_content, title)
descr += str(description_only)
file.write(descr)
return description_path
def sanitize_description(self, description_path):
with open(description_path, 'r') as input_file:
descr_lines = input_file.readlines()
with open(description_path, 'w') as output_file:
for line in descr_lines:
match = re.search(r'^#[#]*', line)
p = re.compile(r'(#[#]*)')
if match != None:
newline = p.sub('{}#'.format(match.group(0)), line)
output_file.write(newline)
else:
output_file.write(line)
def adjust_image_reference(self, description_path, attach_path, data_type):
# looks for available references and arrange the path in the refeferences to the images
# stored in the tmp file, then returns bool whether image is referenced or not
data_type = data_type.replace(".", "")
reference_available = False
head, tail = os.path.split(attach_path)
with open(description_path, 'r') as input_file:
in_descr = input_file.readlines()
with open(description_path, 'w') as output_file:
# search for 'xxx(xxx.data_type)'
regex = r'(\()(.*[.]' + '{})'.format(data_type)
# regex_compile is a pattern which only looks for the part after the caption
# in the reference
regex_compile = r'\(.*[.]' + '{}'.format(data_type)
p = re.compile(regex_compile)
for line in in_descr:
match = re.search(regex, line)
if match:
# check whether match 'xxx.xxx' is the wanted image data like 'image.png'
if match.group(2) == tail:
reference_available = True
# exchange only the file path in the refernce(after the caption) with the
# new tmp file path
newline = p.sub('({}'.format(attach_path), line)
output_file.write(newline)
else:
output_file.write(line)
else:
output_file.write(line)
return reference_available
def fetch_data_list_subcontainer(self, subcontainer_path):
with open(subcontainer_path, 'r') as subc:
content = json.load(subc)
if 'data' not in content['object-item'] or len(content['object-item']['data']) == 0:
return None
data_list = content['object-item']['data']
return data_list
def fetch_data_list_achievement(self, achievement_path):
with open(achievement_path, 'r') as achievement:
content = json.load(achievement)
if 'data' not in content or len(content['data']) == 0:
return None
data_list = content['data']
return data_list
def store_data_subcontainer(self, app, data_list, sub_dir):
stored_paths = list()
for i, data in enumerate(data_list):
stored_data_path = self.store_data(app, data, sub_dir)
if stored_data_path == None:
continue
stored_paths.append(stored_data_path)
return stored_paths
def store_data_achievement(self, app, data_list, sub_dir):
stored_paths = list()
for i, data in enumerate(data_list):
stored_data_path = self.store_data(app, data, sub_dir)
if stored_data_path != None:
stored_paths.append(stored_data_path)
return stored_paths
def store_data_in_tmp(self, app):
db_path = app['DB_OBJECT_PATH']
files_catalog = dict()
for j, item in enumerate(self.list_of_lists):
sub_dir = os.path.join(self.tmp_path, 'item{}'.format(j))
files_catalog[sub_dir] = dict()
files_catalog[sub_dir]['data'] = dict()
files_catalog[sub_dir]['data']['achievements'] = list()
files_catalog[sub_dir]['data']['subcontainer'] = list()
sha_major = item[0]
sha_minor = item[1]
achievement_id = item[2]
title = item[3]
last_attachment = item[4]
categories = item[5]
files_catalog[sub_dir]['title'] = title
files_catalog[sub_dir]['categories'] = categories
subcontainer = os.path.join(db_path, sha_major[0:2], sha_major, sha_minor, 'subcontainer.db')
achievement = os.path.join(db_path, sha_major[0:2], sha_major, sha_minor, 'achievements', '{}.db'.format(achievement_id))
if not last_attachment:
files_catalog[sub_dir]['attachment'] = None
else:
attachment = os.path.join(db_path, sha_major[0:2], sha_major, 'attachments', last_attachment)
stored_data_path = self.store_attachment(app, attachment, sub_dir)
files_catalog[sub_dir]['attachment'] = stored_data_path
stored_data_path = self.store_achievement(app, achievement, sub_dir)
files_catalog[sub_dir]['achievement'] = stored_data_path
data_list_achievement = self.fetch_data_list_achievement(achievement)
if data_list_achievement != None:
stored_paths = self.store_data_achievement(app, data_list_achievement, sub_dir)
for path in stored_paths: files_catalog[sub_dir]['data']['achievements'].append(path)
data_list_subcontainer = self.fetch_data_list_subcontainer(subcontainer)
if data_list_subcontainer == None:
continue
stored_paths = self.store_data_subcontainer(app, data_list_subcontainer, sub_dir)
for path in stored_paths: files_catalog[sub_dir]['data']['subcontainer'].append(path)
return files_catalog
def _pandoc_generate(self, app, markdown_in_path, pdf_out_path):
assert(os.path.isfile(markdown_in_path))
cmd = "pandoc "
cmd += "--latex-engine xelatex "
if "REPORT-PDF-TEMPLATE" in app:
cmd += "--template {} ".format(app["REPORT-PDF-TEMPLATE"])
cmd += "--listings "
cmd += "--toc "
cmd += "{} ".format(markdown_in_path)
cmd += " -o \"{}\" ".format(pdf_out_path)
log.debug("executing: \"{}\"".format(cmd))
os.system(cmd)
def generate_pdf(self, app, pdf_out_path, tmp_data):
sub_reports = list()
for key, item in tmp_data.items():
title = item['title']
categories = item['categories']
achievement_data = item['data']['achievements']
attachment_path = item['attachment']
counter = 0
for d in item['data']['subcontainer']:
counter += 1
name, data_type = os.path.splitext(d)
if data_type == '.md':
self.sanitize_description(d)
description_path = d
if 'achievement' in item:
achievement_path = item['achievement']
self.add_achievement(description_path, achievement_path, title, \
achievement_data, attachment_path, categories)
counter = 0
# if no '.md' found --> use at least title and test result for the report
elif counter == len(item['data']['subcontainer']):
if 'achievement' in item:
achievement_path = item['achievement']
description_path = self.add_achievement(None, achievement_path, title, \
achievement_data, attachment_path, categories)
else:
continue
for d in item['data']['subcontainer']:
name, data_type = os.path.splitext(d)
if data_type == '.png':
attach_path = d
elif data_type == '.jpg':
attach_path = d
elif data_type == '.jpeg':
attach_path = d
elif data_type == '.gif':
attach_path = d
else:
continue
ok = self.adjust_image_reference(description_path, attach_path, data_type)
if not ok:
self.add_data(description_path, attach_path)
if len(item['data']['subcontainer']) == 0:
achievement_path = item['achievement']
description_path = self.add_achievement(None, achievement_path, title, \
achievement_data, attachment_path, categories)
sub_reports.append(description_path)
for i in range(len(sub_reports) - 1):
with open(sub_reports[i+1], 'r') as file2:
description2 = file2.read()
with open(sub_reports[0], 'r') as file1:
description1 = file1.read()
description1 = str(description1) + '\n \n \n' + str(description2)
with open(sub_reports[0], 'w') as file1:
file1.write(description1)
# FIXME, need arguments
self._pandoc_generate(app, sub_reports[0], pdf_out_path)
# shutil.rmtree(self.tmp_path)
class ReportGeneratorCollector(object):
@staticmethod
def null_func(data):
pass
@staticmethod
def search(app, filter_type, filter_meta):
object_index_data = hippod.api_shared.object_index_read(app)
if not object_index_data:
return None
# maybe specify limit in filter?
search_list = list()
# list_sort_func = (null_func(), reversed)[bool(True)] # here variable reversed instead of hardcoded True
# for cont in list_sort_func(object_index_data):
for cont in object_index_data:
ok, cont_obj = hippod.api_shared.read_cont_obj_by_id(app, cont['object-item-id'])
if not ok:
log.error("cannot read container {} by sha although it's in object-index.db".format(cont['object-item-id']))
continue
title = cont_obj['title']
categories = cont_obj['categories']
if filter_type == ReportGenerator.LAST_ACHIEVEMENTS:
last_achiev_list = ReportGenerator.ReportGeneratorCollector.search_last_achievements(app, cont['object-item-id'], cont_obj)
last_attach = ReportGenerator.ReportGeneratorCollector.search_last_attachment(app, cont['object-item-id'])
last_achiev_list.append(title)
last_achiev_list.append(last_attach)
last_achiev_list.append(categories)
search_list.append(last_achiev_list)
elif filter_type == ReportGenerator.FILTER_BY_ANCHOR:
ReportGenerator.ReportGeneratorCollector.search_anchored_achievements(app, cont['object-item-id'], cont_obj)
elif filter_type == ReportGenerator.FILTER_BY_CHOICE:
choiced_achiev_list = ReportGenerator.ReportGeneratorCollector.search_choiced_achievements(app, cont['object-item-id'], cont_obj, filter_meta)
if not choiced_achiev_list:
continue
for sub_list in choiced_achiev_list:
last_attach = ReportGenerator.ReportGeneratorCollector.search_last_attachment(app, cont['object-item-id'])
# last attachment?
sub_list.append(title)
sub_list.append(last_attach)
sub_list.append(categories)
search_list.append(sub_list)
return search_list
@staticmethod
def search_last_achievements(app, sha_major, cont_obj):
ret_list = list()
buff_dict = dict()
# fetch latest subcontainer (subcontainer with latest achievement) and related meta
for sub_cont in cont_obj['subcontainer-list']:
sc = sub_cont['sha-minor']
ok, full_sub_cont = hippod.api_shared.read_subcont_obj_by_id(app, sha_major, sc)
if not ok:
log.error('cannot read subcontainer {}/{} by sha although sha_minor in subcontainer-list'.format(sha_major, sha_minor))
continue
data = hippod.api_object_get_full.get_all_achievement_data(app, sha_major, sc, full_sub_cont)
if data:
buff_dict[sc] = data[0]['date-added']
if data:
latest_sha_minor = max(buff_dict, key=lambda key: buff_dict[key])
latest_index = next(index for (index,d) in enumerate(cont_obj['subcontainer-list']) if d['sha-minor'] == latest_sha_minor)
ret_list.append(sha_major)
if not data:
sub_cont_last = cont_obj['subcontainer-list'][0]
latest_sha_minor = sub_cont_last['sha-minor']
else:
sub_cont_last = cont_obj['subcontainer-list'][latest_index]
ret_list.append(sub_cont_last['sha-minor'])
db_root_path = app['DB_OBJECT_PATH']
subcntr_path = os.path.join(db_root_path, sha_major[0:2], sha_major,\
latest_sha_minor, 'subcontainer.db')
with open(subcntr_path) as file:
full_sub_cont_last = json.load(file)
data = hippod.api_object_get_detail.get_last_achievement_data(app, sha_major, latest_sha_minor, full_sub_cont_last)
ret_list.append(data['id'])
return ret_list
@staticmethod
def search_choiced_achievements(app, sha_major, cont_obj, filter_meta):
ret_list = list()
if 'anchors' in filter_meta:
anchors_filter = filter_meta['anchors']
if 'submitter' in filter_meta:
submitter_filter = filter_meta['submitter']
# FIXME: multiple entries shouldn't be in the ret_list!
for sub_cont in cont_obj['subcontainer-list']:
sub_ret_list = list()
sha_minor = sub_cont['sha-minor']
ok, full_sub_cont = hippod.api_shared.read_subcont_obj_by_id(app, sha_major, sha_minor)
achievements = full_sub_cont['achievements']
achievement_data_list = hippod.api_object_get_full.get_all_achievement_data(app, sha_major, sha_minor, full_sub_cont)
for achievement in achievement_data_list:
sub_ret_list1 = list()
submitter = achievement['submitter']
if 'anchors' in filter_meta and 'submitter' in filter_meta:
if 'anchor' in achievement:
anchor = achievement['anchor']
if submitter in submitter_filter and anchor in anchors_filter:
sub_ret_list1.append(sha_major)
sub_ret_list1.append(sha_minor)
sub_ret_list1.append(str(achievement['id']))
else:
continue
else:
continue
elif 'anchors' in filter_meta:
if 'anchor' in achievement:
anchor = achievement['anchor']
if anchor in anchors_filter:
sub_ret_list1.append(sha_major)
sub_ret_list1.append(sha_minor)
sub_ret_list1.append(str(achievement['id']))
else:
continue
else:
continue
elif 'submitter' in filter_meta and submitter in submitter_filter:
sub_ret_list1.append(sha_major)
sub_ret_list1.append(sha_minor)
sub_ret_list1.append(str(achievement['id']))
else:
continue
sub_ret_list.append(sub_ret_list1)
for sub in sub_ret_list:
ret_list.append(sub)
return ret_list
@staticmethod
def search_anchored_achievements(app, sha_major, cont_obj):
pass
@staticmethod
def search_last_attachment(app, sha_major):
obj_path = os.path.join(app['DB_OBJECT_PATH'])
attach_path = os.path.join(obj_path, sha_major[0:2], sha_major, 'attachments')
attach_files = os.path.join(attach_path, '*')
attach_list = glob.glob(attach_files)
if len(attach_list) == 0:
return None
last_attach = max(attach_list, key=os.path.getctime)
return last_attach | mit | 7,860,579,975,598,945,000 | 45.932409 | 162 | 0.518778 | false |
anovak10/plots | DDTmethod/DDT.py | 1 | 3479 | #
import os
import ROOT
from ROOT import *
from array import array
import math
from math import *
import sys
import pdb
def ComputeDDT(name, point, nPtBins, nRhoBins, H):
DDT = TH2F(name, "", nRhoBins, 50, 250 , nPtBins, 380, 1000)
DDT.SetStats(0)
nXb = H.GetXaxis().GetNbins()
nYb = H.GetYaxis().GetNbins()
for x in range(nXb):
for y in range(nYb):
proj = H.ProjectionZ("H3"+str(x)+str(y),x+1,x+1,y+1,y+1)
print str(x+1) + "," + str(y+1) + ": "+ str(proj.Integral())
p = array('d', [point*0.01])
q = array('d', [0.0]*len(p))
proj.GetQuantiles(len(p), q, p)
DDT.SetBinContent( x+1, y+1, q[0] )
return DDT
def DisplayDDT(DDT, toretain, SaveName, cut=""):
CMSLABL = TLatex()
CMSLABL.SetNDC()
CMSLABL.SetTextSize(0.045)
PRELABL = TLatex()
PRELABL.SetNDC()
PRELABL.SetTextSize(0.04)
THILABL = TLatex()
THILABL.SetNDC()
THILABL.SetTextSize(0.035)
CUTLABL = TLatex()
CUTLABL.SetNDC()
CUTLABL.SetTextSize(0.02)
C = TCanvas("TempCanvas", "Title", 800, 600)
plot = TPad("pad1", "The pad 80% of the height",0.02,0,0.95,1)
plot.Draw()
plot.cd()
DDT.SetStats(0)
DDT.GetXaxis().SetTitle("TAGM")
DDT.GetXaxis().SetTitleSize(0.045)
DDT.GetZaxis().SetTitle("TAGTau_{32}")
DDT.GetZaxis().SetTitleSize(0.045)
DDT.GetZaxis().SetRangeUser(0.5,0.75)
DDT.SetTitle("DDT at "+str(toretain)+"% efficinecy")
if SaveName.startswith("DDTdiff") == True:
DDT.GetZaxis().SetRangeUser(-0.1,0.1)
DDT.GetZaxis().SetTitle("#Delta TAGTau_{32}")
DDT.SetTitle("#Delta DDT at "+str(toretain)+"% efficinecy")
DDT.GetYaxis().SetTitle("TAGp_{T}")
DDT.GetYaxis().SetTitleSize(0.045)
DDT.GetYaxis().SetTitleOffset(1.145)
DDT.Draw("COLZ")
CMSLABL.DrawLatex(0.1465,0.85,"CMS")
THILABL.DrawLatex(0.81,0.91,"#bf{13 TeV}")
PRELABL.DrawLatex(0.1465,0.812,"#bf{#it{Simulation Preliminary}}")
CUTLABL.DrawLatex(0.1465,0.780,cut)
C.Print("MAP_"+SaveName+".png")
def histo(Bkgs, cut="T.lepJetCSV<100"):
H3 = TH3F("H3", "", 9, 50, 250, 12, 380, 1000, 500, 0, 1)
H3.SetStats(0)
for B in Bkgs:
F = TFile(B)
T = F.Get("tree_T1")
n = T.GetEntries()
for j in range(0, n): # Here is where we loop over all events.
T.GetEntry(j)
if T.TAGTau32 > 0.001:
if eval(cut):
weight = T.weight
PT = T.TAGPt
M = T.TAGM
H3.Fill(M, PT, T.TAGTau32, weight)
return H3
# Fetch samples
pwd = "/home/storage/andrzejnovak/March/"
Bkgs =[]
Bkgs.append(pwd+"WJetsToQQ.root")
for w in ["100To200", "200To400", "400To600", "600To800", "800To1200", "1200To2500", "2500ToInf"]:
Bkgs.append(pwd+"WJetsToLNu_HT-"+w+".root")
def study(cut,savename, toretain=20):
#Set-up
#toretain = toretain # Percentage to keep
if cut != "": Tcut = "T."+cut+" and " #Preselection cuts on data in pythonic form e.g. T.LepPt < 50 and ..
else: Tcut = ""
nbins1, nbins2 = 12, 9
# Signal Region
H3 = histo(Bkgs, cut=Tcut +"T.lepJetCSV >0.46")
DDT_sr = ComputeDDT("DDT_sr", toretain, nbins1, nbins2, H3)
DisplayDDT(DDT_sr, toretain, "DDT_SR"+cut, cut=cut)
# Sidebands
H3 = histo(Bkgs, cut=Tcut +"T.lepJetCSV <0.46")
DDT_br = ComputeDDT("DDT_sb", toretain, nbins1, nbins2, H3)
DisplayDDT(DDT_sb, toretain, "DDT_SB"+cut, cut=cut)
# Difference
DisplayDDT(DDT_sr-DDT_sb, toretain, "DDTdiff"+cut, cut=cut)
# Saving a file
Fout = TFile(savename+".root", "recreate")
Fout.cd()
DDT_sr.Write()
DDT_sb.Write()
Fout.Close()
study("", "DDT")
clist = ["LepPt<400", "LepTightness>2.9", "WPt>500"]
for i, c in enumerate(clist):
study(c, "DDT"+str(i))
| mit | 8,447,145,614,358,003,000 | 27.516393 | 109 | 0.652774 | false |
google-research/google-research | strategic_exploration/hrl/explorer.py | 1 | 6934 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import numpy as np
import random
from strategic_exploration.hrl.action import DefaultAction
from strategic_exploration.hrl.justification import Justification
from collections import Counter
class Explorer(object):
"""Defines an exploration scheme to find new abstract states."""
__metaclass__ = abc.ABCMeta
@classmethod
def from_config(cls, config, num_actions):
"""Creates an Explorer from a Config."""
if config.type == "uniform":
factory = UniformRandomExplorer
elif config.type == "repeat":
factory = RepeatedActionsExplorer
elif config.type == "mixture":
factory = MixtureExplorer
else:
raise ValueError("{} is not a valid Explorer type".format(config.type))
return factory.from_config(config, num_actions)
@abc.abstractmethod
def act(self, state):
"""Returns an Action and a string justification."""
raise NotImplementedError()
@abc.abstractmethod
def activate(self, node):
"""Starts the exploration episode.
Can be called again after
exploration episode terminates to begin a new exploration episode.
Args:
node (AbstractNode): node at which exploration episode is invoked
"""
raise NotImplementedError()
@abc.abstractmethod
def active(self):
"""Returns True while the exploration episode is ongoing.
Returns False before being activated or after exploration episode is
done.
"""
raise NotImplementedError()
class UniformRandomExplorer(Explorer):
"""Uniformly at random samples an action at each timestep.
Starts
exploration episode off by taking a random number of no-ops between 0 and
value specified in config (inclusive).
"""
@classmethod
def from_config(cls, config, num_actions):
# Make top inclusive
no_ops = np.random.randint(0, config.no_ops + 1)
return cls(config.exploration_horizon, num_actions, no_ops)
def __init__(self, exploration_horizon, num_actions, no_ops):
"""Constructs.
Args:
exploration_horizon (int): number of steps in exploration episode
num_actions (int): size of the action space
no_ops (int): number of no-ops to start exploration episode with
"""
self._exploration_horizon = exploration_horizon
self._steps_left = 0 # Not yet active
self._num_actions = num_actions
self._no_ops = no_ops
def act(self, state):
if not self.active():
raise ValueError("Exploration not active")
self._steps_left -= 1
if self._no_ops_left > 0:
action = DefaultAction(0)
s = "{} no-ops at {}: {} / {} visits, steps left {}".format(
self._no_ops, self._node.uid, self._node.visit_count,
self._node.min_visit_count, self._steps_left)
self._no_ops_left -= 1
return action, s
action = DefaultAction(random.randint(0, self._num_actions - 1))
s = "uniform random from {}: {} / {} visits, steps left {}".format(
self._node.uid, self._node.visit_count, self._node.min_visit_count,
self._steps_left)
return action, s
def activate(self, node):
if self.active():
raise ValueError("Exploration already active")
self._node = node
self._steps_left = self._exploration_horizon
self._no_ops_left = self._no_ops
def active(self):
return self._steps_left > 0
class RepeatedActionsExplorer(Explorer):
"""Samples an action and number of timesteps to repeat the action for.
Sampling is specified either as uniform or log uniform in config.
"""
@classmethod
def from_config(cls, config, num_actions):
if config.log_uniform:
sampler = lambda: np.exp(np.random.uniform(config.low, config.high))
else:
sampler = lambda: np.random.randint(config.low, config.high)
discrete_sampler = lambda: int(sampler())
return cls(config.exploration_horizon, num_actions, discrete_sampler)
def __init__(self, exploration_horizon, num_actions, repeat_sampler):
"""
Args:
exploration_horizon (int): number of steps in exploration episode
num_actions (int): size of the action space
repeat_sampler (Callable): returns an int for the number of actions
to repeat for
"""
self._exploration_horizon = exploration_horizon
self._steps_left = 0 # Not yet active
self._num_actions = num_actions
self._repeat_sampler = repeat_sampler
def act(self, state):
if not self.active():
raise ValueError("RepeatedActionsExplorer not active")
self._steps_left -= 1
if self._repeat == 0:
self._repeat = self._repeat_sampler()
self._repeated_action = DefaultAction(
random.randint(0, self._num_actions - 1))
self._repeat -= 1
s = "repeat {} random from {}: {} / {} visits, steps left {}".format(
self._repeat, self._node.uid, self._node.visit_count,
self._node.min_visit_count, self._steps_left)
return self._repeated_action, s
def activate(self, node):
if self.active():
raise ValueError("Exploration already active")
self._node = node
self._steps_left = self._exploration_horizon
self._repeat = 0
def active(self):
return self._steps_left > 0
class MixtureExplorer(Explorer):
"""On each activation, uniformly at random selects one of its explorers and
follows it for the entire exploration episode.
"""
@classmethod
def from_config(cls, config, num_actions):
explorers = [
super(MixtureExplorer, cls).from_config(subconfig, num_actions)
for subconfig in config.mixture
]
return cls(explorers)
def __init__(self, explorers):
"""Constructs.
Args:
explorers (Explorer): different explorers to select from
"""
self._explorers = explorers
self._active_explorer = None
def act(self, state):
if not self.active():
raise ValueError("MixtureExplorer not active")
return self._active_explorer.act(state)
def activate(self, node):
if self.active():
raise ValueError("MixtureExplorer already active")
self._active_explorer = np.random.choice(self._explorers)
self._active_explorer.activate(node)
def active(self):
return self._active_explorer is not None and \
self._active_explorer.active()
| apache-2.0 | 648,319,589,055,274,200 | 30.375566 | 79 | 0.671474 | false |
koalalorenzo/python-digitalocean | digitalocean/tests/test_droplet.py | 1 | 43208 | import json
import unittest
import responses
import digitalocean
from .BaseTest import BaseTest
class TestDroplet(BaseTest):
@responses.activate
def setUp(self):
super(TestDroplet, self).setUp()
self.actions_url = self.base_url + "droplets/12345/actions/"
data = self.load_from_file('droplets/single.json')
url = self.base_url + "droplets/12345"
responses.add(responses.GET,
url,
body=data,
status=200,
content_type='application/json')
self.droplet = digitalocean.Droplet(id='12345', token=self.token).load()
@responses.activate
def test_load(self):
data = self.load_from_file('droplets/single.json')
url = self.base_url + "droplets/12345"
responses.add(responses.GET,
url,
body=data,
status=200,
content_type='application/json')
droplet = digitalocean.Droplet(id='12345', token=self.token)
d = droplet.load()
self.assert_get_url_equal(responses.calls[0].request.url, url)
self.assertEqual(d.id, 12345)
self.assertEqual(d.name, "example.com")
self.assertEqual(d.memory, 512)
self.assertEqual(d.vcpus, 1)
self.assertEqual(d.disk, 20)
self.assertEqual(d.backups, False)
self.assertEqual(d.ipv6, True)
self.assertEqual(d.private_networking, False)
self.assertEqual(d.region['slug'], "nyc3")
self.assertEqual(d.status, "active")
self.assertEqual(d.image['slug'], "ubuntu-14-04-x64")
self.assertEqual(d.size_slug, '512mb')
self.assertEqual(d.created_at, "2014-11-14T16:36:31Z")
self.assertEqual(d.ip_address, "104.131.186.241")
self.assertEqual(d.ip_v6_address,
"2604:A880:0800:0010:0000:0000:031D:2001")
self.assertEqual(d.kernel['id'], 2233)
self.assertEqual(d.features, ["ipv6", "virtio"])
self.assertEqual(d.tags, [])
self.assertEqual(d.vpc_uuid, "08187eaa-90eb-40d6-a8f0-0222b28ded72")
@responses.activate
def test_power_off(self):
data = self.load_from_file('droplet_actions/power_off.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.power_off()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "power_off"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "power_off")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_power_off_action(self):
data = self.load_from_file('droplet_actions/power_off.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.power_off(False)
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "power_off"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "power_off")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_power_on(self):
data = self.load_from_file('droplet_actions/power_on.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.power_on()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "power_on"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "power_on")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_power_on_action(self):
data = self.load_from_file('droplet_actions/power_on.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.power_on(return_dict=False)
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "power_on"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "power_on")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_shutdown(self):
data = self.load_from_file('droplet_actions/shutdown.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.shutdown()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "shutdown"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "shutdown")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_shutdown_action(self):
data = self.load_from_file('droplet_actions/shutdown.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.shutdown(return_dict=False)
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "shutdown"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "shutdown")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_reboot(self):
data = self.load_from_file('droplet_actions/reboot.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.reboot()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "reboot"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "reboot")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_reboot_action(self):
data = self.load_from_file('droplet_actions/reboot.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.reboot(return_dict=False)
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "reboot"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "reboot")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_power_cycle(self):
data = self.load_from_file('droplet_actions/power_cycle.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.power_cycle()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "power_cycle"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "power_cycle")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_power_cycle_action(self):
data = self.load_from_file('droplet_actions/power_cycle.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.power_cycle(return_dict=False)
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "power_cycle"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "power_cycle")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_reset_root_password(self):
data = self.load_from_file('droplet_actions/password_reset.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.reset_root_password()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "password_reset"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "password_reset")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_reset_root_password_action(self):
data = self.load_from_file('droplet_actions/password_reset.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.reset_root_password(return_dict=False)
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "password_reset"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "password_reset")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_take_snapshot(self):
data = self.load_from_file('droplet_actions/snapshot.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.take_snapshot("New Snapshot")
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "snapshot", "name": "New Snapshot"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "snapshot")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_take_snapshot_action(self):
data = self.load_from_file('droplet_actions/snapshot.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.take_snapshot("New Snapshot", return_dict=False)
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "snapshot", "name": "New Snapshot"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "snapshot")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_resize(self):
data = self.load_from_file('droplet_actions/resize.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.resize("64gb")
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "resize", "size": "64gb", "disk": "true"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "resize")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_resize_action(self):
data = self.load_from_file('droplet_actions/resize.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.resize("64gb", False)
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "resize", "size": "64gb", "disk": "true"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "resize")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_restore(self):
data = self.load_from_file('droplet_actions/restore.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.restore(image_id=78945)
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"image": 78945, "type": "restore"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "restore")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_restore_action(self):
data = self.load_from_file('droplet_actions/restore.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.restore(image_id=78945, return_dict=False)
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"image": 78945, "type": "restore"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "restore")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_rebuild_passing_image(self):
"""
Test rebuilding an droplet from a provided image id.
"""
data = self.load_from_file('droplet_actions/rebuild.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.rebuild(image_id=78945)
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"image": 78945, "type": "rebuild"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "rebuild")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_rebuild_passing_image_action(self):
"""
Test rebuilding an droplet from a provided image id.
"""
data = self.load_from_file('droplet_actions/rebuild.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.rebuild(image_id=78945, return_dict=False)
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"image": 78945, "type": "rebuild"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "rebuild")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_rebuild_not_passing_image(self):
"""
Test rebuilding an droplet from its original parent image id.
"""
data = self.load_from_file('droplet_actions/rebuild.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.rebuild()
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"image": 6918990, "type": "rebuild"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "rebuild")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_rebuild_not_passing_image_action(self):
"""
Test rebuilding an droplet from its original parent image id.
"""
data = self.load_from_file('droplet_actions/rebuild.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.rebuild(return_dict=False)
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"image": 6918990, "type": "rebuild"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "rebuild")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_enable_backups(self):
data = self.load_from_file('droplet_actions/enable_backups.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.enable_backups()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "enable_backups"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "enable_backups")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_disable_backups(self):
data = self.load_from_file('droplet_actions/disable_backups.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.disable_backups()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "disable_backups"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "disable_backups")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_disable_backups_action(self):
data = self.load_from_file('droplet_actions/disable_backups.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.disable_backups(return_dict=False)
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "disable_backups"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "disable_backups")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_destroy(self):
url = self.base_url + "droplets/12345"
responses.add(responses.DELETE,
url,
status=204,
content_type='application/json')
self.droplet.destroy()
self.assertEqual(responses.calls[0].request.url,
self.base_url + "droplets/12345")
@responses.activate
def test_rename(self):
data = self.load_from_file('droplet_actions/rename.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.rename(name="New Name")
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "rename", "name": "New Name"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "rename")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_rename_action(self):
data = self.load_from_file('droplet_actions/rename.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.rename(name="New Name", return_dict=False)
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "rename", "name": "New Name"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "rename")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_enable_private_networking(self):
data = self.load_from_file('droplet_actions/enable_private_networking.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.enable_private_networking()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "enable_private_networking"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "enable_private_networking")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_enable_private_networking_action(self):
data = self.load_from_file('droplet_actions/enable_private_networking.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.enable_private_networking(return_dict=False)
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "enable_private_networking"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "enable_private_networking")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_enable_ipv6(self):
data = self.load_from_file('droplet_actions/enable_ipv6.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.enable_ipv6()
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "enable_ipv6"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "enable_ipv6")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_enable_ipv6_action(self):
data = self.load_from_file('droplet_actions/enable_ipv6.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.enable_ipv6(return_dict=False)
self.assertEqual(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{"type": "enable_ipv6"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "enable_ipv6")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
def test_change_kernel_exception(self):
with self.assertRaises(Exception) as error:
self.droplet.change_kernel(kernel=123)
exception = error.exception
self.assertEqual(str(exception), 'Use Kernel object')
@responses.activate
def test_change_kernel(self):
data = self.load_from_file('droplet_actions/change_kernel.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.change_kernel(digitalocean.Kernel(id=123))
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{u"kernel": 123, u"type": u"change_kernel"})
self.assertEqual(response['action']['id'], 54321)
self.assertEqual(response['action']['status'], "in-progress")
self.assertEqual(response['action']['type'], "change_kernel")
self.assertEqual(response['action']['resource_id'], 12345)
self.assertEqual(response['action']['resource_type'], "droplet")
@responses.activate
def test_change_kernel_action(self):
data = self.load_from_file('droplet_actions/change_kernel.json')
responses.add(responses.POST, self.actions_url,
body=data,
status=201,
content_type='application/json')
response = self.droplet.change_kernel(digitalocean.Kernel(id=123),
return_dict=False)
self.assert_url_query_equal(responses.calls[0].request.url,
self.actions_url)
self.assertEqual(json.loads(responses.calls[0].request.body),
{u"kernel": 123, u"type": u"change_kernel"})
self.assertEqual(response.id, 54321)
self.assertEqual(response.status, "in-progress")
self.assertEqual(response.type, "change_kernel")
self.assertEqual(response.resource_id, 12345)
self.assertEqual(response.resource_type, "droplet")
@responses.activate
def test_create_no_keys(self):
data = self.load_from_file('droplet_actions/create.json')
url = self.base_url + "droplets/"
responses.add(responses.POST,
url,
body=data,
status=202,
content_type='application/json')
droplet = digitalocean.Droplet(name="example.com",
size_slug="512mb",
image="ubuntu-14-04-x64",
region="nyc3",
backups=True,
ipv6=True,
private_networking=False,
monitoring=True,
user_data="Some user data.",
token=self.token,
tags=["web"],
vpc_uuid="08187eaa-90eb-40d6-a8f0-0222b28ded72")
droplet.create()
self.assert_url_query_equal(responses.calls[0].request.url, url)
self.maxDiff = None
self.assertEqual(
json.loads(responses.calls[0].request.body),
{u"name": u"example.com", u"region": u"nyc3",
u"user_data": u"Some user data.", u"ipv6": True,
u"private_networking": False, u"monitoring": True,
u"backups": True, u"image": u"ubuntu-14-04-x64",
u"size": u"512mb", u"ssh_keys": [],
u"volumes": [], u"tags": ["web"],
u"vpc_uuid": "08187eaa-90eb-40d6-a8f0-0222b28ded72"})
self.assertEqual(droplet.id, 3164494)
self.assertEqual(droplet.action_ids, [36805096])
@responses.activate
def test_create_multiple_no_keys(self):
data = self.load_from_file('droplet_actions/create_multiple.json')
url = self.base_url + "droplets/"
responses.add(responses.POST,
url,
body=data,
status=202,
content_type='application/json')
droplets = digitalocean.Droplet.create_multiple(names=["example.com",
"example2.com"],
size_slug="512mb",
image="ubuntu-14-04-x64",
region="nyc3",
backups=True,
ipv6=True,
private_networking=False,
monitoring=True,
user_data="Some user data.",
token=self.token,
tags=["web"],
vpc_uuid="08187eaa-90eb-40d6-a8f0-0222b28ded72")
self.assert_url_query_equal(responses.calls[0].request.url, url)
self.assertEqual(len(droplets), 2)
self.assertEqual(droplets[0].id, 3164494)
self.assertEqual(droplets[1].id, 3164495)
self.assertEqual(droplets[0].action_ids, [36805096])
self.assertEqual(droplets[1].action_ids, [36805096])
self.maxDiff = None
self.assertEqual(
json.loads(responses.calls[0].request.body),
{u"names": [u"example.com", u"example2.com"], u"region": u"nyc3",
u"user_data": u"Some user data.", u"ipv6": True,
u"private_networking": False, u"monitoring": True,
u"backups": True, u"image": u"ubuntu-14-04-x64",
u"size": u"512mb", u"tags": ["web"],
u"vpc_uuid": "08187eaa-90eb-40d6-a8f0-0222b28ded72"})
@responses.activate
def test_get_actions(self):
data = self.load_from_file('actions/multi.json')
create = self.load_from_file('actions/create_completed.json')
ipv6 = self.load_from_file('actions/ipv6_completed.json')
responses.add(responses.GET, self.actions_url,
body=data,
status=200,
content_type='application/json')
responses.add(responses.GET, self.actions_url + "39388122",
body=create,
status=200,
content_type='application/json')
responses.add(responses.GET, self.actions_url + "39290099",
body=ipv6,
status=200,
content_type='application/json')
actions = self.droplet.get_actions()
self.assertEqual(len(actions), 2)
self.assertEqual(len(responses.calls), 3)
self.assert_get_url_equal(responses.calls[0].request.url,
self.actions_url)
self.assert_get_url_equal(responses.calls[1].request.url,
self.actions_url + "39388122")
self.assert_get_url_equal(responses.calls[2].request.url,
self.actions_url + "39290099")
self.assertEqual(actions[0].id, 39290099)
self.assertEqual(actions[0].type, "create")
self.assertEqual(actions[0].status, "completed")
self.assertEqual(actions[1].id, 39388122)
self.assertEqual(actions[1].type, "enable_ipv6")
self.assertEqual(actions[1].status, "completed")
@responses.activate
def test_get_action(self):
data = self.load_from_file('actions/create_completed.json')
url = self.base_url + "actions/39388122"
responses.add(responses.GET,
url,
body=data,
status=200,
content_type='application/json')
action = self.droplet.get_action(39388122)
self.assert_get_url_equal(responses.calls[0].request.url, url)
self.assertEqual(action.id, 39290099)
self.assertEqual(action.type, "create")
self.assertEqual(action.status, "completed")
def test_get_snapshots(self):
snapshots = self.droplet.get_snapshots()
self.assertEqual(len(snapshots), 1)
self.assertEqual(snapshots[0].id, 7938206)
@responses.activate
def test_get_kernel_available_no_pages(self):
data = self.load_from_file('kernels/list.json')
url = self.base_url + "droplets/12345/kernels/"
responses.add(responses.GET,
url,
body=data,
status=200,
content_type='application/json')
kernels = self.droplet.get_kernel_available()
self.assert_get_url_equal(responses.calls[0].request.url, url)
self.assertEqual(len(kernels), 2)
self.assertEqual(kernels[0].id, 61833229)
self.assertEqual(kernels[0].name,
"Ubuntu 14.04 x32 vmlinuz-3.13.0-24-generic")
@responses.activate
def test_get_kernel_available_with_pages(self):
one = self.load_from_file('kernels/page_one.json')
two = self.load_from_file('kernels/page_two.json')
url_0 = self.base_url + "droplets/12345/kernels/"
responses.add(responses.GET,
url_0,
body=one,
status=200,
content_type='application/json')
url_1 = self.base_url + "droplets/12345/kernels?page=2&per_page=200"
responses.add(responses.GET,
url_1,
body=two,
status=200,
content_type='application/json',
match_querystring=True)
kernels = self.droplet.get_kernel_available()
self.assert_get_url_equal(responses.calls[0].request.url, url_0)
self.assert_url_query_equal(responses.calls[1].request.url, url_1)
self.assertEqual(len(kernels), 3)
self.assertEqual(kernels[0].id, 61833229)
self.assertEqual(kernels[0].name,
"Ubuntu 14.04 x32 vmlinuz-3.13.0-24-generic")
self.assertEqual(kernels[2].id, 231)
self.assertEqual(kernels[2].name,
"Ubuntu 14.04 x64 vmlinuz-3.13.0-32-generic")
@responses.activate
def test_update_volumes_data(self):
droplet_response = self.load_from_file('droplets/single.json')
volume_response = self.load_from_file('volumes/single.json')
url_droplet =self.base_url + "droplets/12345"
url_volume = self.base_url + "volumes/506f78a4-e098-11e5-ad9f-000f53306ae1"
responses.add(responses.GET,
url_droplet,
body=droplet_response,
status=200,
content_type='application/json')
responses.add(responses.GET,
url_volume,
body=volume_response,
status=200,
content_type='application/json')
droplet = digitalocean.Droplet(id='12345', token=self.token)
d = droplet.load()
d.update_volumes_data()
self.assert_get_url_equal(responses.calls[0].request.url, url_droplet)
self.assert_get_url_equal(responses.calls[1].request.url, url_volume)
self.assertEqual(len(d.volumes), 1)
self.assertEqual(d.volumes[0].id, '506f78a4-e098-11e5-ad9f-000f53306ae1')
self.assertEqual(d.volumes[0].name, 'example')
self.assertEqual(d.volumes[0].region['slug'], 'nyc1')
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -3,303,552,210,026,232,000 | 41.61144 | 104 | 0.570982 | false |
yuxiang-zhou/deepmachine | deepmachine/layers/mesh.py | 1 | 6549 | import scipy
import numpy as np
import tensorflow as tf
import menpo.io as mio
import keras
import keras.backend as K
import io
from scipy import sparse
from ..utils import get_custom_objects, mesh as graph
from ..layers import Layer, InputSpec
class MeshReLU1B(Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
"""Bias and ReLU. One bias per filter."""
_, _, n_channels = input_shape
# Create a trainable weight variable for this layer.
self.bias = self.add_weight(
name='kernel',
shape=[1, n_channels],
initializer='uniform',
trainable=True)
super().build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.relu(x + self.bias)
def compute_output_shape(self, input_shape):
return input_shape
class MeshReLU2B(Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
"""Bias and ReLU. One bias per filter."""
_, n_vertexes, n_channels = input_shape
# Create a trainable weight variable for this layer.
self.bias = self.add_weight(
name='kernel',
shape=[n_vertexes, n_channels],
initializer='uniform',
trainable=True)
super().build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.relu(x + self.bias)
def compute_output_shape(self, input_shape):
return input_shape
class MeshPoolTrans(Layer):
def poolwT(self, x):
L = self._gl
Mp = L.shape[0]
_, M, Fin = x.get_shape().as_list()
# Rescale transform Matrix L and store as a TF sparse tensor. Copy to not modify the shared L.
L = scipy.sparse.csr_matrix(L)
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
L = tf.sparse_reorder(L)
x = tf.transpose(x, perm=[1, 2, 0]) # M x Fin x N
x = tf.reshape(x, [M, -1]) # M x Fin*N
x = tf.sparse_tensor_dense_matmul(L, x) # Mp x Fin*N
x = tf.reshape(x, [Mp, Fin, -1]) # Mp x Fin x N
x = tf.transpose(x, perm=[2, 0, 1]) # N x Mp x Fin
return x
def __init__(self, graph_laplacians, **kwargs):
self._gl = graph_laplacians.astype(np.float32)
super().__init__(**kwargs)
def build(self, input_shape):
"""Bias and ReLU. One bias per filter."""
super().build(input_shape) # Be sure to call this at the end
def call(self, x):
return self.poolwT(x)
def compute_output_shape(self, input_shape):
Mp = self._gl.shape[0]
N, _, Fin = input_shape
return (N, Mp, Fin)
def get_config(self):
# serialize sparse matrix
byte_stream = io.BytesIO()
sparse.save_npz(byte_stream, self._gl)
base_config = super().get_config()
base_config['graph_laplacians'] = byte_stream.getvalue().decode(
'latin1')
return base_config
@classmethod
def from_config(cls, config, custom_objects=None):
config['graph_laplacians'] = sparse.load_npz(
io.BytesIO(config['graph_laplacians'].encode('latin1')))
return cls(**config)
MeshPool = MeshPoolTrans
class MeshConv(Layer):
def chebyshev5(self, x, L, Fout, nK):
L = L.astype(np.float32)
_, M, Fin = x.get_shape().as_list()
# Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
L = scipy.sparse.csr_matrix(L)
L = graph.rescale_L(L, lmax=2)
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
L = tf.sparse_reorder(L)
# Transform to Chebyshev basis
x0 = tf.transpose(x, perm=[1, 2, 0]) # M x Fin x N
x0 = tf.reshape(x0, [M, -1]) # M x Fin*N
x = tf.expand_dims(x0, 0) # 1 x M x Fin*N
def concat(x, x_):
x_ = tf.expand_dims(x_, 0) # 1 x M x Fin*N
return tf.concat([x, x_], axis=0) # K x M x Fin*N
if nK > 1:
x1 = tf.sparse_tensor_dense_matmul(L, x0)
x = concat(x, x1)
for k in range(2, nK):
x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0 # M x Fin*N
x = concat(x, x2)
x0, x1 = x1, x2
x = tf.reshape(x, [nK, M, Fin, -1]) # K x M x Fin x N
x = tf.transpose(x, perm=[3, 1, 2, 0]) # N x M x Fin x K
x = tf.reshape(x, [-1, Fin*nK]) # N*M x Fin*K
# Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
W = self._weight_variable
x = tf.matmul(x, W) # N*M x Fout
out = tf.reshape(x, [-1, M, Fout]) # N x M x Fout
return out
def __init__(self, graph_laplacians, polynomial_order=6, nf=16, **kwargs):
self._gl = graph_laplacians
self._nf = nf
self._po = polynomial_order
super().__init__(**kwargs)
def build(self, input_shape):
"""Bias and ReLU. One bias per filter."""
_, _, n_channels = input_shape
# Create a trainable weight variable for this layer.
self._weight_variable = self.add_weight(
name='kernel',
shape=[n_channels * self._po, self._nf],
initializer='uniform',
trainable=True)
super().build(input_shape) # Be sure to call this at the end
def call(self, x):
return self.chebyshev5(x, self._gl, self._nf, self._po)
def compute_output_shape(self, input_shape):
N, M, _ = input_shape
return (N, M, self._nf)
def get_config(self):
# serialize sparse matrix
byte_stream = io.BytesIO()
sparse.save_npz(byte_stream, self._gl)
base_config = super().get_config()
base_config['polynomial_order'] = self._po
base_config['nf'] = self._nf
base_config['graph_laplacians'] = byte_stream.getvalue().decode(
'latin1')
return base_config
@classmethod
def from_config(cls, config, custom_objects=None):
config['graph_laplacians'] = sparse.load_npz(
io.BytesIO(config['graph_laplacians'].encode('latin1')))
return cls(**config)
get_custom_objects().update({
'MeshReLU1B': MeshReLU1B,
'MeshReLU2B': MeshReLU2B,
'MeshPool': MeshPool,
'MeshPoolTrans': MeshPoolTrans,
'MeshConv': MeshConv,
})
| mit | 2,244,360,863,735,461,000 | 31.261084 | 102 | 0.56375 | false |
fredokun/TikZ-Editor | tikz_editor/tools/documentIO/template.py | 1 | 1780 | # Copyright 2012 (C) Mickael Menu <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from string import Template
from .tags import *
class FileTemplate(object):
"""
The file template tool generates a full LaTeX/TikZ source from a template, preamble
and source.
"""
def __init__(self, template, preamble, source):
assert preamble is not None and source is not None
super(FileTemplate, self).__init__()
self.content = ""
self.preamble = preamble
self.source = source
self.latex_template = Template(template)
def buildFileContent(self):
"""
Builds the TikZ document with given preamble and source and the document template.
"""
self._buildPreambleChunk()
self._buildSourceChunk()
self._buildContentFromTemplate()
return self.content
def _buildPreambleChunk(self):
self.preamble = "%s\n%s\n%s\n" % (PREAMBLE_BEGIN_TAG, self.preamble, PREAMBLE_END_TAG)
def _buildSourceChunk(self):
self.source = "%s\n%s\n%s\n" % (SOURCE_BEGIN_TAG, self.source, SOURCE_END_TAG)
def _buildContentFromTemplate(self):
self.content = TIKZ_TAG + "\n"
self.content += self.latex_template.safe_substitute(PREAMBLE=self.preamble, SOURCE=self.source)
| gpl-2.0 | 7,310,290,482,443,180,000 | 33.901961 | 97 | 0.738764 | false |
MISP/misp-modules | misp_modules/modules/expansion/joesandbox_query.py | 1 | 2330 | # -*- coding: utf-8 -*-
import jbxapi
import json
from joe_parser import JoeParser
misperrors = {'error': 'Error'}
inputSource = ['link']
moduleinfo = {'version': '0.2', 'author': 'Christian Studer',
'description': 'Query Joe Sandbox API with a report URL to get the parsed data.',
'module-type': ['expansion']}
moduleconfig = ['apiurl', 'apikey', 'import_pe', 'import_mitre_attack']
def handler(q=False):
if q is False:
return False
request = json.loads(q)
apiurl = request['config'].get('apiurl') or 'https://jbxcloud.joesecurity.org/api'
apikey = request['config'].get('apikey')
parser_config = {
"import_pe": request["config"].get('import_pe', "false") == "true",
"mitre_attack": request["config"].get('import_mitre_attack', "false") == "true",
}
if not apikey:
return {'error': 'No API key provided'}
url = request['attribute']['value']
if "/submissions/" not in url:
return {'error': "The URL does not point to a Joe Sandbox analysis."}
submission_id = url.split('/')[-1] # The URL has the format https://example.net/submissions/12345
joe = jbxapi.JoeSandbox(apiurl=apiurl, apikey=apikey, user_agent='MISP joesandbox_query')
try:
joe_info = joe.submission_info(submission_id)
except jbxapi.ApiError as e:
return {'error': str(e)}
if joe_info["status"] != "finished":
return {'error': "The analysis has not finished yet."}
if joe_info['most_relevant_analysis'] is None:
return {'error': "No analysis belongs to this submission."}
analysis_webid = joe_info['most_relevant_analysis']['webid']
joe_parser = JoeParser(parser_config)
joe_data = json.loads(joe.analysis_download(analysis_webid, 'jsonfixed')[1])
joe_parser.parse_data(joe_data['analysis'])
joe_parser.finalize_results()
return {'results': joe_parser.results}
def introspection():
modulesetup = {}
try:
userConfig
modulesetup['userConfig'] = userConfig
except NameError:
pass
try:
inputSource
modulesetup['input'] = inputSource
except NameError:
pass
modulesetup['format'] = 'misp_standard'
return modulesetup
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
| agpl-3.0 | 5,023,869,576,487,750,000 | 29.657895 | 102 | 0.632189 | false |
Twangist/log_calls | log_calls/proxy_descriptors.py | 1 | 4911 | __author__ = 'brianoneill'
__doc__ = """
Module version = '0.1.14'
See docstrings for install_proxy_descriptor and ClassInstanceAttrProxy.
"""
from itertools import product, chain
__all__ = ['install_proxy_descriptor', 'ClassInstanceAttrProxy' ]
def install_proxy_descriptor(proxy_obj, attr_name_proxied_instance, descr_name, data=True, readonly=False):
"""
Create and install (setattr) on proxy_obj a descriptor named descr_name,
assuming proxy_obj has an attribute named attr_name_proxied_instance
which 'points' to an object that already has an attr/descr named descr_name;
the created descriptor will then just defer to that anterior attr/descr.
Suppose a, b are instances of classes A, B resp.,
and suppose b has an attr 'my_a' that points to a:
assert b.my_a is a
Thus proxy_obj == b,
attr_name_proxied_instance == 'my_a'.
Suppose a has an attribute 'x' which b wants to reflect
aka proxy, so that the value of b.x will be (will invoke) a.x.
b can set this up as follows:
install_proxy_descriptor(b, 'my_a', 'x') # b: b itself would say, self
data: True iff we should create & install a data descriptor,
else create & install a non-data-descriptor.
readonly: True iff created data descriptor should be readonly
(i.e. raise AttributeError on attempted 'set' operations).
"""
class ProxyDataDescr():
def __get__(this_descr, proxy, owner):
"todo"
### print("**** descriptor %s __get__ called" % descr_name)
return getattr(
getattr(proxy, attr_name_proxied_instance),
descr_name)
def __set__(this_descr, proxy, value):
"todo"
if not readonly:
setattr(
getattr(proxy, attr_name_proxied_instance),
descr_name,
value)
else:
# no can do:
raise AttributeError("%s is read-only on %r" % (descr_name, proxy))
class ProxyMethodDescr():
def __get__(this_descr, proxy, owner):
"todo"
### print("**** descriptor %s __get__ called" % descr_name)
return getattr(
getattr(proxy, attr_name_proxied_instance),
descr_name)
proxy_descr = (ProxyDataDescr if data else ProxyMethodDescr)()
setattr(proxy_obj.__class__, descr_name, proxy_descr)
class ClassInstanceAttrProxy():
"""Attributes on (instances of) some other class ==>
descriptors on (instances of) this class
(data descriptors are readonly).
The transform '==>' is accomplished by install_proxy_descriptor.
This class keeps a record of which other classes it has already created
descriptors for (_classes_and_attrs_proxied, initially empty set)
-- a little naively,
classname + marker + tuple(data_descriptor_names) + marker + tuple(method_descriptor_names).
Note that the attributes of instances of other class that are exposed
in this way can themselves be descriptors (e.g. properties).
"""
# Only create descriptors on the class once,
# for class of class_instance + these attributes/descr names:
# for a given descr_name (attr name) they'd be the same :)
_classes_and_attrs_proxied = set()
def __init__(self, *, class_instance, data_descriptor_names, method_descriptor_names):
"""What makes these work is the class_instance arg,
which a descriptor uses to access a class_instance
and from that its attr of the same name."""
self._proxied_instance_ = class_instance
class_and_descr_names = (
class_instance.__class__.__name__
+ '|'
+ ','.join(data_descriptor_names)
+ '|'
+ ','.join(method_descriptor_names)
)
if class_and_descr_names not in self._classes_and_attrs_proxied:
# Create descriptors *** on the class ***, once only per class.
# Same __get__/__set__ functions, called on different instances.
# It doesn't work to create them on instances:
# setattr(self, ... ) doesn't fly.
class_descr_names = chain(product(data_descriptor_names, {True}),
product(method_descriptor_names, {False})
)
for descr_name, is_data in list(class_descr_names):
# Create & add descriptor to this class. readonly only matters if is_data
install_proxy_descriptor(self, '_proxied_instance_', descr_name,
data=is_data, readonly=is_data)
# Record this class as 'already (successfully!) handled'
self._classes_and_attrs_proxied.add(class_and_descr_names)
| mit | -3,673,459,081,506,548,700 | 43.243243 | 107 | 0.598656 | false |
Endika/connector-accountedge | hr_employee_accountedge/__openerp__.py | 1 | 1503 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.·····
#
##############################################################################
{
"name": "Supplier id on expense line",
"version": "1.0",
"author": "Savoir-faire Linux,Odoo Community Association (OCA)",
"website": "http://www.savoirfairelinux.com",
"license": "GPL-3 or any later version",
"category": "Human Resources",
"description": """
This module adds the 'supplier_id_accountedge' field to the
hr.employee model.
""",
"depends": ['hr_expense'],
"data": [
'hr_employee_accountedge.xml',
],
'installable': False,
}
| agpl-3.0 | -7,775,416,562,202,140,000 | 38.421053 | 79 | 0.594793 | false |
neuroidss/nupic.vision | parameters.py | 1 | 5662 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import random
def linearRange(start, stop, step):
"""Make a list of allowed parameter values."""
pval = start
plist = [pval]
while pval < stop:
pval = pval + step
plist.append(pval)
return plist
class Parameters(object):
"""
This class provides methods for searching ranges of parameters to see how
they affect performance.
"""
def __init__(self):
"""
Have to keep track of the names and valid values of each parameter
defined by the user.
"""
# list of parameter names
self._names = []
# list of allowed parameter values
self._allowedValues = []
# list of past and present parameter value indexes
self._valueIndexes = []
# list of past and present results that correspond to each set of parameter
# values
self._results = []
# the number of possible combinations of parameter values for all parameters
self.numCombinations = 1
def define(self, name, allowedValues):
"""
This method allows users to define a parameter by providing its name and
a list of values for the parameter.
"""
if name not in self._names:
self._names.append(name)
self._allowedValues.append(allowedValues)
self.numCombinations = self.numCombinations * len(allowedValues)
else:
print "Parameter: ", name, " is already defined!"
def getNames(self):
"""
This method returns the names of all defined parameters.
"""
return self._names
def getValue(self, name):
"""
This method returns the current value of the parameter specified by name.
"""
assert name in self._names
i = self._names.index(name)
assert len(self._valueIndexes[-1]) > i
return self._allowedValues[i][self._valueIndexes[-1][i]]
def getAllValues(self):
"""
This method returns the current values of all defined parameters.
"""
return [self._allowedValues[i][j] for i,j in
enumerate(self._valueIndexes[-1])]
def appendResults(self,item):
"""
This method adds an item to the results list.
"""
print "Just completed parameter Combination: ", self.getAllValues()
self._results.append(item)
print
print "Parameter combinations completed: ",
print len(self._results), "/", self.numCombinations
print
def getNumResults(self):
"""
This method returns the number of items in the results list.
"""
return len(self._results)
def printResults(self, resultNames, formatStrings):
"""
This method prints a summary of all the results.
"""
print
print "Summary of Results"
print
headerList = self.getNames()
headerList.extend(resultNames)
headerString = ", ".join(headerList)
print headerString
for i, result in enumerate(self._results):
valueString = str([self._allowedValues[j][k] for j,k in
enumerate(self._valueIndexes[i])])[1:-1]
for j,formatString in enumerate(formatStrings):
valueString += formatString % result[j]
print valueString
def nextRandomCombination(self):
"""
This method randomly selects a value for each parameter from its list of
allowed parameter values. If the resulting combination has already been
used then it tries again.
"""
random_combination = [random.choice(self._allowedValues[i])
for i in range(len(self._names))]
if random_combination in self._values:
self.nextRandomCombination()
else:
self._values.append(random_combination)
print "Parameter Combination: ", self.getAllValues()
print
def nextCombination(self):
"""
This method finds the next combination of parameter values using the
allowed value lists for each parameter.
"""
if len(self._valueIndexes) == 0:
# list of value indexes is empty so this is the first combination,
# each parameter gets the first value in its list of allowed values
self._valueIndexes.append([0 for i in range(len(self._names))])
else:
newValueIndexes = self._valueIndexes[-1][:]
i = 0
while i < len(self._names):
# if current value is not the last in the list
if self._valueIndexes[-1][i] != len(self._allowedValues[i]) - 1:
# change parameter to next value in allowed value list and return
newValueIndexes[i] += 1
break
else:
# change parameter to first value in allowed value list
newValueIndexes[i] = 0
# move next parameter to next value in its allowed value list
i = i + 1
self._valueIndexes.append(newValueIndexes)
print "Parameter Combination: ", self.getAllValues()
print
| gpl-3.0 | -6,478,117,063,875,853,000 | 29.771739 | 80 | 0.656658 | false |
YudinYury/Python_Netology_homework | les16_hw_2.py | 1 | 3666 | '''lesson_1_6 homework «Разбор алгоритмических задач с собеседований»
решение задач с HackerRank
'''
#TODO 2:
# CSS colors are defined using a hexadecimal (HEX) notation for the combination of Red, Green, and Blue color values (RGB).
# Specifications of HEX Color Code
#
# ■ It must start with a '#' symbol.
# ■ It can have 3 or 6 digits.
# ■ Each digit is in the range of 0 to F. (1,...,9,0,A,B,C,D,E and F).
# ■ A-F letters can be lower case. (a,b,c,d,e and f are also valid digits).
#
# Valid Hex Color Codes
# #FFF
# #025
# #F0A1FB
#
# Invalid Hex Color Codes
# #fffabg
# #abcf
# #12365erff
#
# You are given N lines of CSS code. Your task is to print all valid Hex Color Codes, in order of their occurrence from top to bottom.
#
# CSS Code Pattern
# Selector
# {
# Property: Value;
# }
#
# Input Format
# The first line contains N, the number of code lines.
# The next N lines contains CSS Codes.
#
# Constraints
# 0 < N < 50
#
# Output Format
# Output the color codes with '#' symbols on separate lines.
#
# Sample Input
# 11
# #BED
# {
# color: #FfFdF8; background-color:#aef;
# font-size: 123px;
# background: -webkit-linear-gradient(top, #f9f9f9, #fff);
# }
# #Cab
# {
# background-color: #ABC;
# border: 2px dashed #fff;
# }
# Sample Output
# #FfFdF8
# #aef
# #f9f9f9
# #fff
# #ABC
# #fff
signature_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
hex_list=[] # список для результата
ss_list=[
'#BED',
'{',
' color: #FfFdF8; background-color:#aef;',
' font-size: 123px;', '',
'}',
'#Cab',
'{',
' background-color: #ABC;',
' border: 2px dashed #fff;',
'}'
]
sstr='#BED{ color: #FfFdF8; background-color:#aef; font-size: 123px;}#Cab{ background-color: #ABC; border: 2px dashed #fff;}'
# n = int(input())
n=11
ss=''
it=0
body_of_tag=0
for ss in ss_list:
step = 0
if ss.find('{') != -1:
body_of_tag=1
# print('Вошли в тело')
continue
if ss.find('}') != -1:
body_of_tag=0
# print('Вышли из тела')
continue
while body_of_tag:
it=ss.find('#',it,)
if it == -1:
it=0
break
it+=1
# print('begin find in ', ss[it::1])
new_num_str = '#'
for single in ss[it::1]:
if single.lower() in signature_list:
new_num_str+=single
# print('new_num_str=',new_num_str)
else: # закончился порядок цифр (0...9, A...F)
# print('end of find')
it += len(new_num_str) # пропускаем уже проверенную строку и дальше будем искать с нового места
step += len(new_num_str)
# print('it=', it, ' ss()=',ss[it::1])
# print('body_of_tag =', body_of_tag)
if len(new_num_str)==4 or len(new_num_str)==7:
hex_list.append(new_num_str)
# print('hex_list', hex_list)
new_num_str = ''
break
else:
new_num_str=''
break
for out in hex_list:
print(out)
# for i in range(11):
# # sstr+=input()
# # # ss.append(input())
# ss=ss_list.pop([0])
# print(ss)
# print(sstr)
# si=sstr.find('#')
# if si==-1:
# print('Not found "#"')
# exit()
# hex_num_str=''
# for ch in sstr:
# print(ch)
# if not ch.isdigit():
# continue
# if
| gpl-3.0 | -8,185,796,143,026,287,000 | 22.369128 | 137 | 0.525847 | false |
xpostudio4/red-de-emprendimiento | app/institutions/forms.py | 1 | 7474 |
# -*- encoding: utf-8 -*-
#core Django Imports
from django import forms
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField, AuthenticationForm
from django.forms.extras import widgets
#Third party apps
#Project apps import
from .models import UserProfile, Category, Event, MailingList, Organization
class CustomUserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password',
widget=forms.PasswordInput(attrs={'placeholder':'Contraseña'}))
password2 = forms.CharField(label='Password confirmation',
widget=forms.PasswordInput(attrs={'placeholder':'Confirmar Contraseña'}))
username = forms.CharField(required=False, max_length=30)
full_name = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={"placeholder":'Nombre Completo'}))
email = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={"placeholder":'Email'}))
class Meta:
model = UserProfile
fields = ('email', 'full_name')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# field does not have access to the initial value
# This is done here, rather than on the field, because the
return self.initial["password"]
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def __init__(self, *args, **kwargs):
super(CustomUserCreationForm, self).__init__(*args, **kwargs)
#change the html class of all the elements
for field in self.fields:
#of the form to get bootstrap 3 styling
self.fields[field].widget.attrs.update({'class':'form-control'})
self.fields.keyOrder = ['email', 'full_name', 'password1', 'password2']
def save(self, commit=True):
# Save the provided password in hashed format
user = super(CustomUserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class DashboardUserCreationForm(forms.ModelForm):
"""
This form is used to create a user from the dashboard.
"""
class Meta:
model = UserProfile
fields = ('email', 'full_name')
def __init__(self, *args, **kwargs):
super(DashboardUserCreationForm, self).__init__(*args, **kwargs)
#change the html class of all the elements of the form to get
#bootstrap 3 styling
for field in self.fields:
self.fields[field].widget.attrs.update({'class':'form-control'})
class UserProfileChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = UserProfile
fields = ('full_name', 'email')
widgets = {'user_form': forms.HiddenInput()}
def __init__(self, *args, **kwargs):
super(UserProfileChangeForm, self).__init__(*args, **kwargs)
#change the html class of all the elements of
#the form to get bootstrap 3 styling
for field in self.fields:
self.fields[field].widget.attrs.update({'class':'form-control'})
class UserProfileLoginForm(AuthenticationForm):
"""A Form for user login."""
form_fields = ["username", "password"]
username = forms.CharField(max_length=254, label="Correo Electronico",
widget=forms.TextInput(attrs={"placeholder":'Usuario'}))
password = forms.CharField(label='Password',
widget=forms.PasswordInput(attrs={'placeholder': 'Contraseña'}))
def __init__(self, *args, **kwargs):
super(UserProfileLoginForm, self).__init__(*args, **kwargs)
#change the html class of all the elements of the form to get bootstrap 3 styling
for field in self.fields:
self.fields[field].widget.attrs.update({'class':'form-control'})
class OrganizationForm(forms.ModelForm):
"""Form used when creating a new organization"""
categories = forms.ModelMultipleChoiceField(queryset=Category.objects.all())
description = forms.CharField(label="Descripción", required=False,
widget=forms.Textarea(attrs={'rows':'2'}))
url = forms.URLField(max_length=255,
help_text='Por favor introduzca la URL de la pagina',
widget=forms.TextInput(attrs={'placeholder': 'http://www.website.com'}))
class Meta:
"""declaration of the inherited class"""
model = Organization
fields = ('name',
'description',
'phone',
'url',
'address',
'province',
'categories',
)
def __init__(self, *args, **kwargs):
super(OrganizationForm, self).__init__(*args, **kwargs)
#change the html class of all the elements
#of the form to get bootstrap 3 styling
for field in self.fields:
if field != 'categories':
self.fields[field].widget.attrs.update({'class':'form-control'})
else:
self.fields[field].widget.attrs.update({'class':'organization-category'})
class OrganizationPictureForm(forms.ModelForm):
picture = forms.ImageField()
class Meta:
model = Organization
fields = (
'logo',
)
class EventForm(forms.ModelForm):
"""Form to handle event forms"""
description = forms.CharField(label="Descripción", widget=forms.Textarea(attrs={'rows':'5'}))
from_date = forms.CharField(widget=forms.TextInput(attrs={
'class':'date',
})
)
to_date = forms.CharField(widget=forms.TextInput(attrs={
'class':'date',
})
)
class Meta:
"""Model inheritance settings"""
model = Event
fields = ('name',
'categories',
'cost',
'description',
'from_date',
'to_date',
'url',
)
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
#change the html class of all the elements of the form to get bootstrap 3 styling
for field in self.fields:
if field != 'categories':
self.fields[field].widget.attrs.update({'class':'form-control'})
else:
self.fields[field].widget.attrs.update({'class':'event-category'})
class MailingListForm(forms.ModelForm):
class Meta:
Model = MailingList
fields = (
'full_name',
'email',
'province',
)
| mit | 2,371,676,960,138,874,400 | 35.975248 | 105 | 0.597269 | false |
ajaech/username_analytics | langOneVAll.py | 1 | 4934 | import code
import collections
import csv
import Classifier
import gzip
import numpy
import os
import pandas
import langid
import segmenter
from sklearn import metrics
from sklearn.metrics import classification_report
from Classifier import BayesClassifier
numpy.random.seed(666)
def capital_encode(username):
prev_is_letter = False
prev_is_capital = False
out = []
for char in username:
if char.isalpha() and prev_is_letter and not prev_is_capital and char.isupper():
out.append('$')
out.append(char.lower())
prev_is_letter = char.isalpha()
prev_is_capital = char.isupper()
return ''.join(out)
def load_twitter(filename):
with gzip.open(filename, 'rU') as f:
d = pandas.DataFrame([line for line in csv.DictReader(f)])
d.lang = d.lang.apply(str.strip)
d.name = d.name.apply(str.strip)
d.drop_duplicates(cols=['name'], inplace=True)
d['name_lower'] = d.name.apply(capital_encode)
#d['name_lower'] = d.name
return d
if os.path.exists('lang_train_cache.csv'):
train = pandas.DataFrame.from_csv('lang_train_cache.csv')
test = pandas.DataFrame.from_csv('lang_test_cache.csv')
else:
print 'can not load from cache'
d = load_twitter('data/lang/new_lang_data.txt.gz')
lang_counts = d.groupby('lang')['lang'].agg('count')
langs = set(lang_counts[lang_counts.values > 10000].index)
d = d[d.lang.apply(lambda x: x in langs)] # use only big languages
langid.set_languages(langs)
langid_labels = []
langid_scores = []
for i, idx in enumerate(d.index):
if i % 10000 == 0:
print i
langid_label, langid_score = langid.classify(d.text[idx])
langid_labels.append(langid_label)
langid_scores.append(langid_score)
d['lid_label'] = langid_labels
d['lid_score'] = langid_scores
d = d[(d.lid_score > 0.995) & (d.lang == d.lid_label)]
# random partioning
mask = numpy.random.rand(len(d)) < 0.8
train = d[mask]
test = d[~mask]
train.to_csv('lang_train_cache.csv', encoding='utf8')
test.to_csv('lang_test_cache.csv', encoding='utf8')
def getProbabilities(classifier):
results = []
for i in test.index:
name = test.name_lower[i]
lang = test.lang[i]
result = classifier.Classify(name)
result['lang'] = lang
result['name'] = name
results.append(result)
return pandas.DataFrame(results)
def get_preds(baseline, morph, weight):
columns = numpy.array(['True', 'False'])
z = weight * baseline[columns] + (1.0 - weight) * morph[columns]
idx = z.values.argmax(axis=1)
return columns[idx]
base_segmenter = segmenter.baseline_segmenter
morph_segmenter = segmenter.morph_segmenter(Classifier.model)
def getMetrics(truelabels, predlabels):
prec = metrics.precision_score(truelabels, predlabels, pos_label='True')
recall = metrics.recall_score(truelabels, predlabels, pos_label='True')
return prec, recall
all_langs = train.lang.unique()
for lang in all_langs:
labels = [str(x) for x in train.lang == lang]
testlabels = [str(x) for x in test.lang == lang]
baseline_classifier = BayesClassifier.Train(base_segmenter,
train.name_lower,
labels)
morph_classifier = BayesClassifier.Train(morph_segmenter,
train.name_lower,
labels)
baseline_results = getProbabilities(baseline_classifier)
morph_results = getProbabilities(morph_classifier)
preds_morph = get_preds(baseline_results, morph_results, 0.0)
preds_baseline = get_preds(baseline_results, morph_results, 1.0)
preds_combo = get_preds(baseline_results, morph_results, 0.5)
print 'language {0}'.format(lang)
prec, recall = getMetrics(testlabels, preds_morph)
print 'morph prec {0} recall {1}'.format(prec, recall)
prec, recall = getMetrics(testlabels, preds_combo)
print 'combo prec {0} recall {1}'.format(prec, recall)
prec, recall = getMetrics(testlabels, preds_baseline)
print 'baseline prec {0} recall {1}'.format(prec, recall)
def writeConfusion(outfile, counts):
total = sum(counts.values())
correct = [counts[p] for p in counts if p[0] == p[1]]
outfile.write('pred,lang,count\n')
for pred,lang in counts:
outfile.write('{0},{1},{2}\n'.format(pred, lang, counts[(pred,lang)]))
acc = (preds_morph == morph_results.lang).sum() / float(len(preds_morph))
print 'accuracy morph {0}'.format(acc)
acc = (preds_combo == morph_results.lang).sum() / float(len(preds_combo))
print 'accuracy {0}'.format(acc)
counts = collections.Counter(zip(preds_combo, morph_results.lang))
with open('morph_confusion.csv', 'w') as f:
writeConfusion(f, counts)
acc = (preds_baseline == morph_results.lang).sum() / float(len(preds_baseline))
print 'baseline accuracy {0}'.format(acc)
counts = collections.Counter(zip(preds_baseline, morph_results.lang))
with open('baseline_confusion.csv', 'w') as f:
writeConfusion(f, counts)
| gpl-2.0 | -8,076,812,040,308,764,000 | 30.832258 | 84 | 0.674503 | false |
cmusatyalab/isr-next | vmnetx/define.py | 1 | 2225 | #
# vmnetx.define - Creation of a new VMNetX-compatible VM
#
# Copyright (C) 2013 Carnegie Mellon University
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of version 2 of the GNU General Public License as published
# by the Free Software Foundation. A copy of the GNU General Public License
# should have been distributed along with this program in the file
# COPYING.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from contextlib import closing
import libvirt
import os
import subprocess
from .domain import DomainXML, DomainXMLError
from .util import DetailException
class MachineDefinitionError(DetailException):
pass
def define_machine(name, memory_mb, disk_gb):
with closing(libvirt.open('qemu:///session')) as conn:
# Ensure machine doesn't exist
try:
conn.lookupByName(name)
raise MachineDefinitionError('Machine already exists')
except libvirt.libvirtError:
pass
# Ensure disk doesn't exist
disk_dir = os.path.join(os.path.expanduser('~'), 'VirtualMachines')
disk_path = os.path.join(disk_dir, name + '.qcow')
if os.path.exists(disk_path):
raise MachineDefinitionError('%s already exists' % disk_path)
# Create disk
if not os.path.exists(disk_dir):
os.makedirs(disk_dir)
with open('/dev/null', 'r+') as null:
ret = subprocess.call(['qemu-img', 'create', '-f', 'qcow2',
disk_path, str(disk_gb) + 'G'], stdout=null)
if ret != 0:
raise MachineDefinitionError("Couldn't create disk image")
# Create machine
try:
domain_xml = DomainXML.get_template(conn, name, disk_path,
'qcow2', memory_mb)
conn.defineXML(domain_xml.xml)
except DomainXMLError, e:
raise MachineDefinitionError(str(e), e.detail)
except libvirt.libvirtError, e:
raise MachineDefinitionError(str(e))
| gpl-2.0 | 898,212,150,496,650,500 | 34.887097 | 77 | 0.659775 | false |
pyro-ppl/numpyro | numpyro/contrib/funsor/infer_util.py | 1 | 9920 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import defaultdict
from contextlib import contextmanager
import functools
import re
import funsor
import numpyro
from numpyro.contrib.funsor.enum_messenger import (
infer_config,
plate as enum_plate,
trace as packed_trace,
)
from numpyro.distributions.util import is_identically_one
from numpyro.handlers import substitute
funsor.set_backend("jax")
@contextmanager
def plate_to_enum_plate():
"""
A context manager to replace `numpyro.plate` statement by a funsor-based
:class:`~numpyro.contrib.funsor.enum_messenger.plate`.
This is useful when doing inference for the usual NumPyro programs with
`numpyro.plate` statements. For example, to get trace of a `model` whose discrete
latent sites are enumerated, we can use::
enum_model = numpyro.contrib.funsor.enum(model)
with plate_to_enum_plate():
model_trace = numpyro.contrib.funsor.trace(enum_model).get_trace(
*model_args, **model_kwargs)
"""
try:
numpyro.plate.__new__ = lambda cls, *args, **kwargs: enum_plate(*args, **kwargs)
yield
finally:
numpyro.plate.__new__ = lambda *args, **kwargs: object.__new__(numpyro.plate)
def config_enumerate(fn=None, default="parallel"):
"""
Configures enumeration for all relevant sites in a NumPyro model.
When configuring for exhaustive enumeration of discrete variables, this
configures all sample sites whose distribution satisfies
``.has_enumerate_support == True``.
This can be used as either a function::
model = config_enumerate(model)
or as a decorator::
@config_enumerate
def model(*args, **kwargs):
...
.. note:: Currently, only ``default='parallel'`` is supported.
:param callable fn: Python callable with NumPyro primitives.
:param str default: Which enumerate strategy to use, one of
"sequential", "parallel", or None. Defaults to "parallel".
"""
if fn is None: # support use as a decorator
return functools.partial(config_enumerate, default=default)
def config_fn(site):
if (
site["type"] == "sample"
and (not site["is_observed"])
and site["fn"].has_enumerate_support
):
return {"enumerate": site["infer"].get("enumerate", default)}
return {}
return infer_config(fn, config_fn)
def _get_shift(name):
"""helper function used internally in sarkka_bilmes_product"""
return len(re.search(r"^(_PREV_)*", name).group(0)) // 6
def _shift_name(name, t):
"""helper function used internally in sarkka_bilmes_product"""
if t >= 0:
return t * "_PREV_" + name
return name.replace("_PREV_" * -t, "", 1)
def compute_markov_factors(
time_to_factors,
time_to_init_vars,
time_to_markov_dims,
sum_vars,
prod_vars,
history,
sum_op,
prod_op,
):
"""
:param dict time_to_factors: a map from time variable to the log prob factors.
:param dict time_to_init_vars: a map from time variable to init discrete sites.
:param dict time_to_markov_dims: a map from time variable to dimensions at markov sites
(discrete sites that depend on previous steps).
:param frozenset sum_vars: all plate and enum dimensions in the trace.
:param frozenset prod_vars: all plate dimensions in the trace.
:param int history: The number of previous contexts visible from the current context.
:returns: a list of factors after eliminate time dimensions
"""
markov_factors = []
for time_var, log_factors in time_to_factors.items():
prev_vars = time_to_init_vars[time_var]
# we eliminate all plate and enum dimensions not available at markov sites.
eliminate_vars = (sum_vars | prod_vars) - time_to_markov_dims[time_var]
with funsor.interpretations.lazy:
lazy_result = funsor.sum_product.sum_product(
sum_op,
prod_op,
log_factors,
eliminate=eliminate_vars,
plates=prod_vars,
)
trans = funsor.optimizer.apply_optimizer(lazy_result)
if history > 1:
global_vars = frozenset(
set(trans.inputs)
- {time_var.name}
- prev_vars
- {_shift_name(k, -_get_shift(k)) for k in prev_vars}
)
markov_factors.append(
funsor.sum_product.sarkka_bilmes_product(
sum_op, prod_op, trans, time_var, global_vars
)
)
else:
# remove `_PREV_` prefix to convert prev to curr
prev_to_curr = {k: _shift_name(k, -_get_shift(k)) for k in prev_vars}
markov_factors.append(
funsor.sum_product.sequential_sum_product(
sum_op, prod_op, trans, time_var, prev_to_curr
)
)
return markov_factors
def _enum_log_density(model, model_args, model_kwargs, params, sum_op, prod_op):
"""Helper function to compute elbo and extract its components from execution traces."""
model = substitute(model, data=params)
with plate_to_enum_plate():
model_trace = packed_trace(model).get_trace(*model_args, **model_kwargs)
log_factors = []
time_to_factors = defaultdict(list) # log prob factors
time_to_init_vars = defaultdict(frozenset) # PP... variables
time_to_markov_dims = defaultdict(frozenset) # dimensions at markov sites
sum_vars, prod_vars = frozenset(), frozenset()
history = 1
log_measures = {}
for site in model_trace.values():
if site["type"] == "sample":
value = site["value"]
intermediates = site["intermediates"]
scale = site["scale"]
if intermediates:
log_prob = site["fn"].log_prob(value, intermediates)
else:
log_prob = site["fn"].log_prob(value)
if (scale is not None) and (not is_identically_one(scale)):
log_prob = scale * log_prob
dim_to_name = site["infer"]["dim_to_name"]
log_prob_factor = funsor.to_funsor(
log_prob, output=funsor.Real, dim_to_name=dim_to_name
)
time_dim = None
for dim, name in dim_to_name.items():
if name.startswith("_time"):
time_dim = funsor.Variable(name, funsor.Bint[log_prob.shape[dim]])
time_to_factors[time_dim].append(log_prob_factor)
history = max(
history, max(_get_shift(s) for s in dim_to_name.values())
)
time_to_init_vars[time_dim] |= frozenset(
s for s in dim_to_name.values() if s.startswith("_PREV_")
)
break
if time_dim is None:
log_factors.append(log_prob_factor)
if not site["is_observed"]:
log_measures[site["name"]] = log_prob_factor
sum_vars |= frozenset({site["name"]})
prod_vars |= frozenset(
f.name for f in site["cond_indep_stack"] if f.dim is not None
)
for time_dim, init_vars in time_to_init_vars.items():
for var in init_vars:
curr_var = _shift_name(var, -_get_shift(var))
dim_to_name = model_trace[curr_var]["infer"]["dim_to_name"]
if var in dim_to_name.values(): # i.e. _PREV_* (i.e. prev) in dim_to_name
time_to_markov_dims[time_dim] |= frozenset(
name for name in dim_to_name.values()
)
if len(time_to_factors) > 0:
markov_factors = compute_markov_factors(
time_to_factors,
time_to_init_vars,
time_to_markov_dims,
sum_vars,
prod_vars,
history,
sum_op,
prod_op,
)
log_factors = log_factors + markov_factors
with funsor.interpretations.lazy:
lazy_result = funsor.sum_product.sum_product(
sum_op,
prod_op,
log_factors,
eliminate=sum_vars | prod_vars,
plates=prod_vars,
)
result = funsor.optimizer.apply_optimizer(lazy_result)
if len(result.inputs) > 0:
raise ValueError(
"Expected the joint log density is a scalar, but got {}. "
"There seems to be something wrong at the following sites: {}.".format(
result.data.shape, {k.split("__BOUND")[0] for k in result.inputs}
)
)
return result, model_trace, log_measures
def log_density(model, model_args, model_kwargs, params):
"""
Similar to :func:`numpyro.infer.util.log_density` but works for models
with discrete latent variables. Internally, this uses :mod:`funsor`
to marginalize discrete latent sites and evaluate the joint log probability.
:param model: Python callable containing NumPyro primitives. Typically,
the model has been enumerated by using
:class:`~numpyro.contrib.funsor.enum_messenger.enum` handler::
def model(*args, **kwargs):
...
log_joint = log_density(enum(config_enumerate(model)), args, kwargs, params)
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: dictionary of current parameter values keyed by site
name.
:return: log of joint density and a corresponding model trace
"""
result, model_trace, _ = _enum_log_density(
model, model_args, model_kwargs, params, funsor.ops.logaddexp, funsor.ops.add
)
return result.data, model_trace
| apache-2.0 | -2,505,284,325,910,366,000 | 35.336996 | 91 | 0.594758 | false |
xirdneh1968/bungie_destiny_api | bungie_destiny_api/api.py | 1 | 15525 | """ BungieAPI module """
import urllib2
import json
import ConfigParser
import os.path
import sys
CONFIG_FILE_NAME = '.bungie_destiny_api.rc'
# API_KEY = <secret API key>
# put a BungieNet API-KEY obtained from https://www.bungie.net/en-US/User/API
# into a ini style file at $HOME/CONFIG_FILE_NAME.
CONFIG_FILE = (os.path.join(os.path.expanduser("~"), CONFIG_FILE_NAME))
config = ConfigParser.ConfigParser()
config.read(CONFIG_FILE)
API_KEY = config.get('api', 'API-KEY')
DEBUG = int(config.get('default', 'debug'))
# get_account https://www.bungie.net/platform/destiny/help/
# DEPRECATED use get_account_summary
def get_account(destiny_membership_id=None, membership_type=None
, definitions=None):
"""get_account()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id
+ "/Summary/" + optional_arg +")")
acct = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/' + optional_arg)
return acct
# get_account_summary https://www.bungie.net/platform/destiny/help/
def get_account_summary(destiny_membership_id=None, membership_type=None
, definitions=None):
"""get_account_summary()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id + "/Summary/" + optional_arg +")")
acct_summary = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/Summary/'
+ optional_arg)
return acct_summary
# get_activity_history_stats https://www.bungie.net/platform/destiny/help/
def get_activity_history_stats(destiny_membership_id=None
, membership_type=None, character_id=None
, definitions=None, page=None
, mode=None, count=None):
"""get_activity_history_stats()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if mode is None:
optional_arg = optional_arg + '&mode=None'
else:
optional_arg = optional_arg + '&mode=' + mode
if DEBUG:
print ("DEBUG: call_bungie_api(/Stats/ActivityHistory/"
+ membership_type + "/" + destiny_membership_id + "/"
+ character_id + "/" + optional_arg +")")
activity_history_stats = call_bungie_api('/Stats/ActivityHistory/'
+ membership_type + '/'
+ destiny_membership_id + '/'
+ character_id +'/'
+ optional_arg)
return activity_history_stats
# get_account_advisors https://www.bungie.net/platform/destiny/help/
def get_account_advisors(destiny_membership_id=None, membership_type=None
, definitions=None):
"""get_account_advisors()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id + "/Advisors/" + optional_arg +")")
acct_advisors = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/Advisors/'
+ optional_arg)
return acct_advisors
# get_account_advisors_v2 https://www.bungie.net/platform/destiny/help/
def get_account_advisors_v2(destiny_membership_id=None, membership_type=None
, character_id=None, definitions=None):
"""get_account_adviors()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id + "/Character/" + character_id
+ "/Advisors/V2/" + optional_arg +")")
acct_advisors_v2 = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/Character/'
+ character_id + '/Advisors/V2/'
+ optional_arg)
return acct_advisors_v2
# get_account_items https://www.bungie.net/platform/destiny/help/
def get_account_items(destiny_membership_id=None, membership_type=None
, definitions=None):
"""get_account_items()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id + "/Items/" + optional_arg +")")
acct_items = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/items/'
+ optional_arg)
return acct_items
# get_character_activities https://www.bungie.net/platform/destiny/help/
def get_character_activities(destiny_membership_id=None, membership_type=None
, character_id=None, definitions=None):
"""get_character_activities()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id + "/Character/"
+ character_id + "/Activities/" + optional_arg +")")
char_activities = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/Character/'
+ character_id + '/Activities/'
+ optional_arg)
return char_activities
# get_character_inventory https://www.bungie.net/platform/destiny/help/
# DEPREATED use get_character_inventorySummary instead!
def get_character_inventory(destiny_membership_id=None, membership_type=None
, character_id=None, definitions=None):
"""get_character_inventory()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id + "/Character/"
+ character_id + "/Inventory/" + optional_arg +")")
char_inventory = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/Character/'
+ character_id + '/Inventory/'
+ optional_arg)
return char_inventory
# get_character_inventory_summary https://www.bungie.net/platform/destiny/help/
def get_character_inventory_summary(destiny_membership_id=None
, membership_type=None
, character_id=None, definitions=None):
"""get_character_inventory_summary()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id + "/Character/"
+ character_id + "/Inventory/Summary/"
+ optional_arg +")")
char_inventory_summary = call_bungie_api('/' + membership_type
+ '/Account/'
+ destiny_membership_id
+ '/Character/'
+ character_id
+ '/Inventory/Summary/'
+ optional_arg)
return char_inventory_summary
# get_character_progression https://www.bungie.net/platform/destiny/help/
def get_character_progression(destiny_membership_id=None, membership_type=None
, character_id=None, definitions=None):
"""get_character_progression()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id + "/Character/"
+ character_id + "/Progression/"
+ optional_arg +")")
char_progression = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/Character/'
+ character_id + '/Progression/'
+ optional_arg)
return char_progression
# get_character_summary https://www.bungie.net/platform/destiny/help/
def get_character_summary(destiny_membership_id=None, membership_type=None
, character_id=None, definitions=None):
"""get_character_summary()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/" + membership_type + "/Account/"
+ destiny_membership_id + "/Character/"
+ character_id + "/" + optional_arg +")")
char_summary = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/Character/'
+ character_id + '/' + optional_arg)
return char_summary
# get_character_aggregate_stats at
# https://www.bungie.net/platform/destiny/help/
def get_character_aggregate_stats(destiny_membership_id=None
, membership_type=None
, character_id=None
, definitions=None):
"""get_character_aggregate_stats()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/Stats/AggregateActivityStats/"
+ membership_type + "/" + destiny_membership_id + "/"
+ character_id + "/" + optional_arg +")")
char_agg_activity_stats = call_bungie_api('/Stats/AggregateActivityStats/'
+ membership_type + '/'
+ destiny_membership_id + '/'
+ character_id + '/'
+ optional_arg)
return char_agg_activity_stats
# get_character_stats https://www.bungie.net/platform/destiny/help/
def get_character_stats(destiny_membership_id=None, membership_type=None
, character_id=None, dayend=None, modes=None
, period_type=None, groups=None, monthstart=None
, monthend=None, daystart=None):
"""get_character_stats()"""
if modes is None:
optional_arg = '?modes=None'
else:
optional_arg = '?modes=' + modes
if DEBUG:
print ("DEBUG: call_bungie_api(/Stats/" + membership_type + "/"
+ destiny_membership_id + "/" + character_id + "/"
+ optional_arg + ")")
char_stats = call_bungie_api('/Stats/' + membership_type + '/'
+ destiny_membership_id + '/'
+ character_id + '/' + optional_arg)
return char_stats
# get_account_stats https://www.bungie.net/platform/destiny/help/
def get_account_stats(destiny_membership_id=None, membership_type=None
, groups=None):
"""get_account_stats()"""
if groups is None:
optional_arg = ''
else:
optional_arg = '?groups=' + groups
if DEBUG:
print ("DEBUG: call_bungie_api(/Stats/Account/" + membership_type + "/"
+ destiny_membership_id + "/" + optional_arg + ")")
acct_stats = call_bungie_api('/Stats/Account/' + membership_type + '/'
+ destiny_membership_id + '/'
+ optional_arg)
return acct_stats
# get_activity_stats https://www.bungie.net/platform/destiny/help/
def get_activity_stats(activity_id=None, definitions=None):
"""get_activity_stats()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/Stats/PostGameCarnageReport/"
+ activity_id + "/" + optional_arg + ")")
activity_pgc_report_stats = call_bungie_api('/Stats/PostGameCarnageReport/'
+ activity_id + '/'
+ optional_arg)
return activity_pgc_report_stats
# get_char_uniq_weapon_stats at
# https://www.bungie.net/platform/destiny/help/
def get_char_uniq_weapon_stats(membership_type=None
, destiny_membership_id=None
, character_id=None
, definitions=None):
"""get_char_uniq_weapon_stats()"""
if definitions is None:
optional_arg = ''
else:
optional_arg = '?definitions=true'
if DEBUG:
print ("DEBUG: call_bungie_api(/Stats/UniqueWeapons/"
+ membership_type + "/" + destiny_membership_id + "/"
+ character_id + "/" + optional_arg + ")")
char_uniq_weapon_stats = call_bungie_api('/Stats/UniqueWeapons/'
+ membership_type + '/'
+ destiny_membership_id + '/'
+ character_id + '/'
+ optional_arg)
return char_uniq_weapon_stats
def call_bungie_api(method):
"""call_bungie_api()"""
# takes a BungieNet API method as documented at
# https://www.bungie.net/platform/destiny/help/
api_base = 'https://www.bungie.net/Platform/Destiny'
call_url = api_base + method
request = urllib2.Request(call_url, headers={'X-API-Key':API_KEY})
result = urllib2.urlopen(request).read().decode('utf-8')
parsed_result = json.loads(result)
return parsed_result
def get_characters(destiny_membership_id=None, membership_type=None):
"""get_characters()"""
_summary = call_bungie_api('/' + membership_type + '/Account/'
+ destiny_membership_id + '/Summary/')
print _summary
_character_1 = _summary['Response']['data']['characters'][0]\
['characterBase']['characterId']
_character_2 = _summary['Response']['data']['characters'][1]\
['characterBase']['characterId']
_character_3 = _summary['Response']['data']['characters'][2]\
['characterBase']['characterId']
_character = [_character_1, _character_2, _character_3]
return _character
| mit | -6,405,018,067,533,590,000 | 34.204082 | 79 | 0.533011 | false |
davidastephens/zipline | zipline/utils/tradingcalendar.py | 1 | 11044 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytz
from datetime import datetime, timedelta
from dateutil import rrule
start = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end = end_base + timedelta(days=365)
def canonicalize_datetime(dt):
# Strip out any HHMMSS or timezone info in the user's datetime, so that
# all the datetimes we return will be 00:00:00 UTC.
return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
mlk_day = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
byweekday=(rrule.MO(+3)),
cache=True,
dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(mlk_day)
presidents_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(presidents_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
memorial_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(memorial_day)
july_4th = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=4,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th)
july_4th_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_sunday)
july_4th_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_saturday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
byweekday=(rrule.TH(4)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then 24th, a Friday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
# Due to the terrorist attacks, the stock market did not open on 9/11/2001
# It did not open again until 9/17/2001.
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
for day_num in range(11, 17):
non_trading_days.append(
datetime(2001, 9, day_num, tzinfo=pytz.utc))
# Add closings due to Hurricane Sandy in 2012
# http://en.wikipedia.org/wiki/Hurricane_sandy
#
# The stock exchange was closed due to Hurricane Sandy's
# impact on New York.
# It closed on 10/29 and 10/30, reopening on 10/31
# October 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
for day_num in range(29, 31):
non_trading_days.append(
datetime(2012, 10, day_num, tzinfo=pytz.utc))
# Misc closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# National Days of Mourning
# - President Richard Nixon
non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))
# - President Ronald W. Reagan - June 11, 2004
non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))
# - President Gerald R. Ford - Jan 2, 2007
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
def get_early_closes(start, end):
# 1:00 PM close rules based on
# http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa
# and verified against http://www.nyse.com/pdfs/closings.pdf
# These rules are valid starting in 1993
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
day_after_thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
# 4th Friday isn't correct if month starts on Friday, so restrict to
# day range:
byweekday=(rrule.FR),
bymonthday=range(23, 30),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_after_thanksgiving)
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
friday_after_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# valid 1993-2007
until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(friday_after_christmas)
day_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=(rrule.MO, rrule.TU, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_before_independence_day)
day_after_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# starting in 2013: wednesday before independence day
until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(day_after_independence_day)
wednesday_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.WE,
cache=True,
# starting in 2013
dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),
until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))
)
early_close_rules.append(wednesday_before_independence_day)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
# Misc early closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# New Year's Eve
nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)
if start <= nye_1999 and nye_1999 <= end:
early_closes.append(nye_1999)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_closes(trading_days, early_closes, tz='US/Eastern'):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
for day in trading_days:
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
open_and_closes.ix[day]['market_open'] = market_open
open_and_closes.ix[day]['market_close'] = market_close
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes)
| apache-2.0 | -441,858,406,999,512,400 | 26.679198 | 103 | 0.605397 | false |
monk-ee/puppetdb-python | puppetdb/v4/reports.py | 1 | 3924 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Arcus, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
reports.py: A bunch of API methods for interacting with v4 reports in the PuppetDB API.
Operators
The only available OPERATOR is =.
Fields
FIELD may be any of the following. All fields support only the equality operator.
certname
the name of the node that the report was received from.
hash
the id of the report; these ids can be acquired via event queries (see the /events query endpoint).
The response is a JSON array of report summaries for all reports that matched the input parameters.
The summaries are sorted by the completion time of the report, in descending order:
[
{
"end-time": "2012-10-29T18:38:01.000Z",
"puppet-version": "3.0.1",
"receive-time": "2012-10-29T18:38:04.238Z",
"configuration-version": "1351535883",
"start-time": "2012-10-29T18:38:00.000Z",
"hash": "bd899b1ee825ec1d2c671fe5541b5c8f4a783472",
"certname": "foo.local",
"report-format": 4,
"transaction-uuid": "030c1717-f175-4644-b048-ac9ea328f221"
},
{
"end-time": "2012-10-26T22:39:32.000Z",
"puppet-version": "3.0.1",
"receive-time": "2012-10-26T22:39:35.305Z",
"configuration-version": "1351291174",
"start-time": "2012-10-26T22:39:31.000Z",
"hash": "cd4e5fd8846bac26d15d151664a40e0f2fa600b0",
"certname": "foo.local",
"report-format": 4,
"transaction-uuid": null
}
]
"""
__author__ = "monkee"
__version__ = "1.0.1"
__maintainer__ = "monk-ee"
__email__ = "[email protected]"
__status__ = "Development"
from puppetdb import utils
API_VERSION = 'v3'
def get_reports(api_url=None, query='', verify=False, cert=list()):
"""
Returns reports
:param api_url: Base PuppetDB API url
:param query: Required. A JSON array of query predicates, in prefix form. (The standard ["<OPERATOR>", "<FIELD>", "<VALUE>"] format.)
Response
[
{
"end-time": "2012-10-29T18:38:01.000Z",
"puppet-version": "3.0.1",
"receive-time": "2012-10-29T18:38:04.238Z",
"configuration-version": "1351535883",
"start-time": "2012-10-29T18:38:00.000Z",
"hash": "bd899b1ee825ec1d2c671fe5541b5c8f4a783472",
"certname": "foo.local",
"report-format": 4,
"transaction-uuid": "030c1717-f175-4644-b048-ac9ea328f221"
},
{
"end-time": "2012-10-26T22:39:32.000Z",
"puppet-version": "3.0.1",
"receive-time": "2012-10-26T22:39:35.305Z",
"configuration-version": "1351291174",
"start-time": "2012-10-26T22:39:31.000Z",
"hash": "cd4e5fd8846bac26d15d151664a40e0f2fa600b0",
"certname": "foo.local",
"report-format": 4,
"transaction-uuid": null
}
]
"""
return utils._make_api_request(api_url, '/reports', verify, cert, params={'query': query})
| mit | 27,615,388,178,460,460 | 35.333333 | 137 | 0.675331 | false |
laurentb/weboob | modules/serebii/module.py | 1 | 2111 | # -*- coding: utf-8 -*-
# Copyright(C) 2019-2020 Célande Adrien
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.tools.backend import Module
from weboob.capabilities.rpg import CapRPG
from .browser import SerebiiBrowser
__all__ = ['SerebiiModule']
class SerebiiModule(Module, CapRPG):
NAME = 'serebii'
DESCRIPTION = 'This website collects any data about Pokémon games.'
MAINTAINER = 'Célande Adrien'
EMAIL = '[email protected]'
LICENSE = 'LGPLv3+'
VERSION = '2.1'
BROWSER = SerebiiBrowser
def iter_characters(self):
return self.browser.iter_characters()
def get_character(self, character_id):
return self.browser.get_character(character_id)
def iter_skills(self, skill_type=None):
return self.browser.iter_skills(skill_type)
def get_skill(self, skill_id):
return self.browser.get_skill(skill_id)
def iter_skill_set(self, character_id, skill_type=None):
return self.browser.iter_skill_set(character_id, skill_type)
def iter_character_classes(self):
return self.browser.iter_character_classes()
def get_character_class(self, class_id):
"""
List weakness and strength of a Pokémon Type
"""
return self.browser.get_character_class(class_id)
def iter_collectable_items(self):
return self.browser.iter_collectable_items()
| lgpl-3.0 | -6,983,604,243,544,496,000 | 30.447761 | 77 | 0.710014 | false |
MOLSSI-BSE/basis_set_exchange | basis_set_exchange/tests/test_api_slow.py | 1 | 1690 | """
Tests for the BSE main API
"""
import pytest
import basis_set_exchange as bse
from .common_testvars import *
@pytest.mark.slow
@pytest.mark.parametrize('basis_name', bs_names)
@pytest.mark.parametrize('fmt', bs_write_formats)
@pytest.mark.parametrize('unc_gen', true_false)
@pytest.mark.parametrize('unc_seg', true_false)
@pytest.mark.parametrize('unc_spdf', true_false)
@pytest.mark.parametrize('make_gen', true_false)
@pytest.mark.parametrize('opt_gen', true_false)
def test_slow_get_basis_1(basis_name, fmt, unc_gen, unc_seg, unc_spdf, make_gen, opt_gen):
"""Tests getting all basis sets in all formats
and with every combination of option
Also tests memoization
"""
this_metadata = bs_metadata[basis_name]
for ver in this_metadata['versions'].keys():
bs1 = bse.get_basis(basis_name,
fmt=fmt,
version=ver,
uncontract_general=unc_gen,
uncontract_segmented=unc_seg,
uncontract_spdf=unc_spdf,
make_general=make_gen,
optimize_general=opt_gen,
header=False)
bs2 = bse.get_basis(basis_name,
fmt=fmt,
version=ver,
uncontract_general=unc_gen,
uncontract_segmented=unc_seg,
uncontract_spdf=unc_spdf,
make_general=make_gen,
optimize_general=opt_gen,
header=False)
assert bs1 == bs2
| bsd-3-clause | 4,514,062,005,073,885,700 | 33.489796 | 90 | 0.530769 | false |
jamiebull1/eppy | eppy/tests/test_IDF.py | 1 | 2313 | # Copyright (c) 2012 Santosh Philip
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""py.test for class IDF"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six import StringIO
from eppy.iddcurrent import iddcurrent
from eppy.modeleditor import IDF
def test_IDF():
"""py.test for class IDF"""
stored_idd = IDF.iddname
IDF.iddname = None
assert IDF.iddname == None
IDF.setiddname("gumby", testing=True)
assert IDF.iddname == "gumby"
IDF.setiddname("karamba", testing=True)
assert IDF.iddname != "karamba"
assert IDF.iddname == "gumby"
IDF.iddname = stored_idd
iddsnippet = iddcurrent.iddtxt
iddfhandle = StringIO(iddsnippet)
if IDF.getiddname() == None:
IDF.setiddname(iddfhandle)
class TestIDF(object):
"""py.test for IDF function"""
def test_removeidfobject(self):
"""py.test for IDF.removeidfobject """
idftxt = ""
idfhandle = StringIO(idftxt)
idf = IDF(idfhandle)
key = "BUILDING"
idf.newidfobject(key, Name="Building_remove")
idf.newidfobject(key, Name="Building1")
idf.newidfobject(key, Name="Building_remove")
idf.newidfobject(key, Name="Building2")
buildings = idf.idfobjects["building"]
removethis = buildings[-2]
idf.removeidfobject(removethis)
assert buildings[2].Name == "Building2"
assert idf.model.dt[key][2][1] == "Building2"
def test_popidfobject(self):
idftxt = ""
idfhandle = StringIO(idftxt)
idf = IDF(idfhandle)
key = "BUILDING"
idf.newidfobject(key, Name="Building_remove")
idf.newidfobject(key, Name="Building1")
idf.newidfobject(key, Name="Building_remove")
idf.newidfobject(key, Name="Building2")
buildings = idf.idfobjects["building"]
removethis = buildings[-2]
idf.popidfobject(key, 2)
assert buildings[2].Name == "Building2"
assert idf.model.dt[key][2][1] == "Building2"
| mit | -7,503,793,358,429,532,000 | 30.684932 | 73 | 0.609166 | false |
anbangr/trusted-juju | juju/providers/dummy.py | 1 | 7310 | import logging
import os
import tempfile
from twisted.internet.defer import inlineCallbacks, returnValue, succeed, fail
#from txzookeeper import ZookeeperClient
from txzookeeper.managed import ManagedClient
from juju.errors import (
EnvironmentNotFound, MachinesNotFound, ProviderError)
from juju.machine import ProviderMachine
from juju.machine.constraints import ConstraintSet
from juju.state.placement import UNASSIGNED_POLICY
from juju.providers.common.files import FileStorage
log = logging.getLogger("juju.providers")
class DummyMachine(ProviderMachine):
"""Provider machine implementation specific to the dummy provider."""
def __init__(self, *args, **kw):
super(DummyMachine, self).__init__(*args, **kw)
self._opened_ports = set()
class MachineProvider(object):
def __init__(self, environment_name, config):
self.environment_name = environment_name
self.config = config
self._machines = []
self._state = None
self._storage = None
def get_legacy_config_keys(self):
return set(("some-legacy-key",)) & set(self.config)
def get_placement_policy(self):
"""Get the unit placement policy for the provider.
:param preference: A user specified plcaement policy preference
"""
return self.config.get("placement", UNASSIGNED_POLICY)
def get_constraint_set(self):
cs = ConstraintSet(self.provider_type)
cs.register_generics([])
return succeed(cs)
@property
def provider_type(self):
return "dummy"
def connect(self, share=False):
"""Connect to the zookeeper juju running in the machine provider.
@param share: Requests sharing of the connection with other clients
attempting to connect to the same provider, if that's feasible.
Unused for the dummy provider.
"""
return ManagedClient(
os.environ.get("ZOOKEEPER_ADDRESS", "127.0.0.1:2181"),
session_timeout=1000).connect()
def get_machines(self, instance_ids=()):
"""List all the machine running in the provider."""
if not instance_ids:
return succeed(self._machines[:])
machines_by_id = dict(((m.instance_id, m) for m in self._machines))
machines = []
missing_instance_ids = []
for instance_id in instance_ids:
if instance_id in machines_by_id:
machines.append(machines_by_id[instance_id])
else:
missing_instance_ids.append(instance_id)
if missing_instance_ids:
return fail(MachinesNotFound(missing_instance_ids))
return succeed(machines)
def start_machine(self, machine_data, master=False):
"""Start a machine in the provider."""
if not "machine-id" in machine_data:
return fail(ProviderError(
"Machine state `machine-id` required in machine_data"))
dns_name = machine_data.get("dns-name")
machine = DummyMachine(len(self._machines), dns_name)
self._machines.append(machine)
return succeed([machine])
def get_machine(self, instance_id):
"""Retrieve a machine by provider machine id.
"""
for machine in self._machines:
if instance_id == machine.instance_id:
return succeed(machine)
return fail(MachinesNotFound([instance_id]))
def bootstrap(self, constraints):
"""
Bootstrap juju on the machine provider.
"""
if self._machines:
return succeed(self._machines[:1])
return self.start_machine({"machine-id": 0})
@inlineCallbacks
def shutdown_machines(self, machines):
"""
Terminate any machine resources associated to the provider.
"""
instance_ids = [m.instance_id for m in machines]
machines = yield self.get_machines(instance_ids)
for machine in machines:
self._machines.remove(machine)
returnValue(machines)
def shutdown_machine(self, machine):
"""Terminate the given machine"""
if not isinstance(machine, DummyMachine):
return fail(ProviderError("Invalid machine for provider"))
for m in self._machines:
if m.instance_id == machine.instance_id:
self._machines.remove(m)
return m
return fail(ProviderError("Machine not found %r" % machine))
@inlineCallbacks
def destroy_environment(self):
yield self.save_state({})
machines = yield self.get_machines()
machines = yield self.shutdown_machines(machines)
returnValue(machines)
def save_state(self, state):
"""Save the state to the provider."""
self._state = state
return succeed(None)
def load_state(self):
"""Load the state from the provider."""
if self._state:
state = self._state
else:
state = {}
return succeed(state)
def get_file_storage(self):
"""Retrieve the C{FileStorage} provider abstracion."""
if self._storage:
return self._storage
storage_path = self.config.get("storage-directory")
if storage_path is None:
storage_path = tempfile.mkdtemp()
self._storage = FileStorage(storage_path)
return self._storage
def get_serialization_data(self):
config = self.config.copy()
# Introduce an additional variable to simulate actual
# providers which may serialize additional values
# from the environment or other external sources.
config["dynamicduck"] = "magic"
return config
def open_port(self, machine, machine_id, port, protocol="tcp"):
"""Dummy equivalent of ec2-authorize-group"""
if not isinstance(machine, DummyMachine):
return fail(ProviderError("Invalid machine for provider"))
machine._opened_ports.add((port, protocol))
log.debug("Opened %s/%s on provider machine %r",
port, protocol, machine.instance_id)
return succeed(None)
def close_port(self, machine, machine_id, port, protocol="tcp"):
"""Dummy equivalent of ec2-revoke-group"""
if not isinstance(machine, DummyMachine):
return fail(ProviderError("Invalid machine for provider"))
try:
machine._opened_ports.remove((port, protocol))
log.debug("Closed %s/%s on provider machine %r",
port, protocol, machine.instance_id)
except KeyError:
pass
return succeed(None)
def get_opened_ports(self, machine, machine_id):
"""Dummy equivalent of ec2-describe-group
This returns the current exposed ports in the environment for
this machine. This directly goes against the provider. For
EC2, this would be eventually consistent.
"""
if not isinstance(machine, DummyMachine):
return fail(ProviderError("Invalid machine for provider"))
return succeed(machine._opened_ports)
def get_zookeeper_machines(self):
if self._machines:
return succeed(self._machines[:1])
return fail(EnvironmentNotFound("not bootstrapped"))
| agpl-3.0 | 1,932,143,490,729,965,300 | 34.485437 | 78 | 0.629001 | false |
oblalex/django-workflow | src/workflow/migrations/0004_auto__add_field_version_object_type.py | 1 | 6324 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Version.object_type'
db.add_column('workflow_version', 'object_type',
self.gf('django.db.models.fields.CharField')(default=u'ADD', max_length=3),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Version.object_type'
db.delete_column('workflow_version', 'object_type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'workflow.revision': {
'Meta': {'ordering': "[u'-date_created']", 'object_name': 'Revision'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'created_by_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_moderated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'moderated_by_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': "orm['workflow.Revision']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'DR'", 'max_length': '2'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'workflow.version': {
'Meta': {'object_name': 'Version'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'format': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.TextField', [], {}),
'object_id_int': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'object_repr': ('django.db.models.fields.TextField', [], {}),
'object_type': ('django.db.models.fields.CharField', [], {'default': "u'ADD'", 'max_length': '3'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['workflow.Revision']"}),
'serialized_data': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['workflow'] | mit | -7,054,477,247,670,684 | 70.067416 | 182 | 0.555503 | false |
masahir0y/barebox-yamada | scripts/remote/ratp.py | 6 | 21432 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import crcmod
import logging
import struct
from enum import Enum
from time import sleep
try:
from time import monotonic
except:
from .missing import monotonic
csum_func = crcmod.predefined.mkCrcFun('xmodem')
class RatpState(Enum):
listen = "listen" # 1
syn_sent = "syn-sent" # 2
syn_received = "syn-received" # 3
established = "established" # 4
fin_wait = "fin-wait" # 5
last_ack = "last-ack" # 6
closing = "closing" # 7
time_wait = "time-wait" # 8
closed = "closed" # 9
class RatpInvalidHeader(ValueError):
pass
class RatpInvalidPayload(ValueError):
pass
class RatpError(ValueError):
pass
class RatpPacket(object):
def __init__(self, data=None, flags=''):
self.payload = None
self.synch = 0x01
self._control = 0
self.length = 0
self.csum = 0
self.c_syn = False
self.c_ack = False
self.c_fin = False
self.c_rst = False
self.c_sn = 0
self.c_an = 0
self.c_eor = False
self.c_so = False
if data:
(self.synch, self._control, self.length, self.csum) = \
struct.unpack('!BBBB', data)
if self.synch != 0x01:
raise RatpInvalidHeader("invalid synch octet (%x != %x)" %
(self.synch, 0x01))
csum = (self._control + self.length + self.csum) & 0xff
if csum != 0xff:
raise RatpInvalidHeader("invalid csum octet (%x != %x)" %
(csum, 0xff))
self._unpack_control()
elif flags:
if 'S' in flags:
self.c_syn = True
if 'A' in flags:
self.c_ack = True
if 'F' in flags:
self.c_fin = True
if 'R' in flags:
self.c_rst = True
if 'E' in flags:
self.c_eor = True
def __repr__(self):
s = "RatpPacket("
if self.c_syn:
s += "SYN,"
if self.c_ack:
s += "ACK,"
if self.c_fin:
s += "FIN,"
if self.c_rst:
s += "RST,"
s += "SN=%i,AN=%i," % (self.c_sn, self.c_an)
if self.c_eor:
s += "EOR,"
if self.c_so:
s += "SO,DATA=%i)" % self.length
else:
s += "DATA=%i)" % self.length
return s
def _pack_control(self):
self._control = 0 | \
self.c_syn << 7 | \
self.c_ack << 6 | \
self.c_fin << 5 | \
self.c_rst << 4 | \
self.c_sn << 3 | \
self.c_an << 2 | \
self.c_eor << 1 | \
self.c_so << 0
def _unpack_control(self):
self.c_syn = bool(self._control & 1 << 7)
self.c_ack = bool(self._control & 1 << 6)
self.c_fin = bool(self._control & 1 << 5)
self.c_rst = bool(self._control & 1 << 4)
self.c_sn = bool(self._control & 1 << 3)
self.c_an = bool(self._control & 1 << 2)
self.c_eor = bool(self._control & 1 << 1)
self.c_so = bool(self._control & 1 << 0)
def pack(self):
self._pack_control()
self.csum = 0
self.csum = (self._control + self.length + self.csum)
self.csum = (self.csum & 0xff) ^ 0xff
return struct.pack('!BBBB', self.synch, self._control, self.length,
self.csum)
def unpack_payload(self, payload):
(c_recv,) = struct.unpack('!H', payload[-2:])
c_calc = csum_func(payload[:-2])
if c_recv != c_calc:
raise RatpInvalidPayload("bad checksum (%04x != %04x)" %
(c_recv, c_calc))
self.payload = payload[:-2]
def pack_payload(self):
c_calc = csum_func(self.payload)
return self.payload+struct.pack('!H', c_calc)
class RatpConnection(object):
def __init__(self):
self._state = RatpState.closed
self._passive = True
self._input = b''
self._s_sn = 0
self._r_sn = 0
self._retrans = None
self._retrans_counter = None
self._retrans_deadline = None
self._r_mdl = None
self._s_mdl = 0xff
self._rx_buf = [] # reassembly buffer
self._rx_queue = []
self._tx_queue = []
self._rtt_alpha = 0.8
self._rtt_beta = 2.0
self._srtt = 0.2
self._rto_min, self._rto_max = 0.2, 1
self._tx_timestamp = None
self.total_retransmits = 0
self.total_crc_errors = 0
def _update_srtt(self, rtt):
self._srtt = (self._rtt_alpha * self._srtt) + \
((1.0 - self._rtt_alpha) * rtt)
logging.info("SRTT: %r", self._srtt)
def _get_rto(self):
return min(self._rto_max,
max(self._rto_min, self._rtt_beta * self._srtt))
def _write(self, pkt):
if pkt.payload or pkt.c_so or pkt.c_syn or pkt.c_rst or pkt.c_fin:
self._s_sn = pkt.c_sn
if not self._retrans:
self._retrans = pkt
self._retrans_counter = 0
else:
self.total_retransmits += 1
self._retrans_counter += 1
if self._retrans_counter > 10:
raise RatpError("Maximum retransmit count exceeded")
self._retrans_deadline = monotonic()+self._get_rto()
logging.info("Write: %r", pkt)
self._write_raw(pkt.pack())
if pkt.payload:
self._write_raw(pkt.pack_payload())
self._tx_timestamp = monotonic()
def _check_rto(self):
if self._retrans is None:
return
if self._retrans_deadline < monotonic():
logging.debug("Retransmit...")
self._write(self._retrans)
def _check_time_wait(self):
if not self._state == RatpState.time_wait:
return
remaining = self._time_wait_deadline - monotonic()
if remaining < 0:
self._state = RatpState.closed
else:
logging.debug("Time-Wait: %.2f remaining" % remaining)
sleep(min(remaining, 0.1))
def _read(self):
if len(self._input) < 4:
self._input += self._read_raw(4-len(self._input))
if len(self._input) < 4:
return
try:
pkt = RatpPacket(data=self._input[:4])
except RatpInvalidHeader as e:
logging.info("%r", e)
self._input = self._input[1:]
return
self._input = self._input[4:]
logging.info("Read: %r", pkt)
if pkt.c_syn or pkt.c_rst or pkt.c_so or pkt.c_fin:
return pkt
if pkt.length == 0:
return pkt
while len(self._input) < pkt.length+2:
self._input += self._read_raw()
try:
pkt.unpack_payload(self._input[:pkt.length+2])
except RatpInvalidPayload as e:
self.total_crc_errors += 1
return
finally:
self._input = self._input[pkt.length+2:]
return pkt
def _close(self):
pass
def _a(self, r):
logging.info("A")
if r.c_rst:
return True
if r.c_ack:
s = RatpPacket(flags='R')
s.c_sn = r.c_an
self._write(s)
return False
if r.c_syn:
self._r_mdl = r.length
s = RatpPacket(flags='SA')
s.c_sn = 0
s.c_an = (r.c_sn + 1) % 2
s.length = self._s_mdl
self._write(s)
self._state = RatpState.syn_received
return False
return False
def _b(self, r):
logging.info("B")
if r.c_ack and r.c_an != (self._s_sn + 1) % 2:
if r.c_rst:
return False
else:
s = RatpPacket(flags='R')
s.c_sn = r.c_an
self._write(s)
return False
if r.c_rst:
if r.c_ack:
self._retrans = None
# FIXME: delete the TCB
self._state = RatpState.closed
return False
else:
return False
if r.c_syn:
if r.c_ack:
self._r_mdl = r.length
self._retrans = None
self._r_sn = r.c_sn
s = RatpPacket(flags='A')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
self._state = RatpState.established
return False
else:
self._retrans = None
s = RatpPacket(flags='SA')
s.c_sn = 0
s.c_an = (r.c_sn + 1) % 2
s.length = self._s_mdl
self._write(s)
self._state = RatpState.syn_received
return False
return False
def _c1(self, r):
logging.info("C1")
if r.c_sn != self._r_sn:
return True
if r.c_rst or r.c_fin:
return False
s = RatpPacket(flags='A')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
return False
def _c2(self, r):
logging.info("C2")
if r.c_sn != self._r_sn:
return True
if r.c_rst or r.c_fin:
return False
if r.c_syn:
s = RatpPacket(flags='RA')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
self._retrans = None
# FIXME: inform the user "Error: Connection reset"
self._state = RatpState.closed
return False
logging.info("C2: duplicate packet")
s = RatpPacket(flags='A')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
return False
def _d1(self, r):
logging.info("D1")
if not r.c_rst:
return True
if self._passive:
self._retrans = None
self._state = RatpState.listen
return False
else:
self._retrans = None
self._state = RatpState.closed
raise RatpError("Connection refused")
def _d2(self, r):
logging.info("D2")
if not r.c_rst:
return True
self._retrans = None
self._state = RatpState.closed
raise RatpError("Connection reset")
def _d3(self, r):
logging.info("C3")
if not r.c_rst:
return True
self._state = RatpState.closed
return False
def _e(self, r):
logging.info("E")
if not r.c_syn:
return True
self._retrans = None
s = RatpPacket(flags='R')
if r.c_ack:
s.c_sn = r.c_an
else:
s.c_sn = 0
self._write(s)
self._state = RatpState.closed
raise RatpError("Connection reset")
def _f1(self, r):
logging.info("F1")
if not r.c_ack:
return False
if r.c_an == (self._s_sn + 1) % 2:
return True
if self._passive:
self._retrans = None
s = RatpPacket(flags='R')
s.c_sn = r.c_an
self._write(s)
self._state = RatpState.listen
return False
else:
self._retrans = None
s = RatpPacket(flags='R')
s.c_sn = r.c_an
self._write(s)
self._state = RatpState.closed
raise RatpError("Connection refused")
def _f2(self, r):
logging.info("F2")
if not r.c_ack:
return False
if r.c_an == (self._s_sn + 1) % 2:
if self._retrans:
self._retrans = None
self._update_srtt(monotonic()-self._tx_timestamp)
# FIXME: inform the user with an "Ok" if a buffer has been
# entirely acknowledged. Another packet containing data may
# now be sent.
return True
return True
def _f3(self, r):
logging.info("F3")
if not r.c_ack:
return False
if r.c_an == (self._s_sn + 1) % 2:
return True
return True
def _g(self, r):
logging.info("G")
if not r.c_rst:
return False
self._retrans = None
if r.c_ack:
s = RatpPacket(flags='R')
s.c_sn = r.c_an
self._write(s)
else:
s = RatpPacket(flags='RA')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
return False
def _h1(self, r):
logging.info("H1")
self._state = RatpState.established
return self._common_i1(r)
def _h2(self, r):
logging.info("H2")
if not r.c_fin:
return True
if self._retrans is not None:
# FIXME: inform the user "Warning: Data left unsent.", "Connection closing."
self._retrans = None
s = RatpPacket(flags='FA')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
self._state = RatpState.last_ack
raise RatpError("Connection closed by remote")
def _h3(self, r):
logging.info("H3")
if not r.c_fin:
# Our fin was lost, rely on retransmission
return False
if (r.length and not r.c_syn and not r.c_rst and not r.c_fin) or r.c_so:
self._retrans = None
s = RatpPacket(flags='RA')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
self._state = RatpState.closed
raise RatpError("Connection reset")
if r.c_an == (self._s_sn + 1) % 2:
self._retrans = None
s = RatpPacket(flags='A')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
self._time_wait_deadline = monotonic() + self._get_rto()
self._state = RatpState.time_wait
return False
else:
self._retrans = None
s = RatpPacket(flags='A')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
self._state = RatpState.closing
return False
def _h4(self, r):
logging.info("H4")
if r.c_an == (self._s_sn + 1) % 2:
self._retrans = None
self._time_wait_deadline = monotonic() + self._get_rto()
self._state = RatpState.time_wait
return False
return False
def _h5(self, r):
logging.info("H5")
if r.c_an == (self._s_sn + 1) % 2:
self._time_wait_deadline = monotonic() + self._get_rto()
self._state = RatpState.time_wait
return False
return False
def _h6(self, r):
logging.info("H6")
if not r.c_ack:
return False
if not r.c_fin:
return False
self._retrans = None
s = RatpPacket(flags='A')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
self._time_wait_deadline = monotonic() + self._get_rto()
return False
def _common_i1(self, r):
if r.c_so:
self._r_sn = r.c_sn
self._rx_buf.append(chr(r.length))
elif r.length and not r.c_syn and not r.c_rst and not r.c_fin:
self._r_sn = r.c_sn
self._rx_buf.append(r.payload)
else:
return False
# reassemble
if r.c_eor:
logging.info("Reassembling %i frames", len(self._rx_buf))
self._rx_queue.append(''.join(self._rx_buf))
self._rx_buf = []
s = RatpPacket(flags='A')
s.c_sn = r.c_an
s.c_an = (r.c_sn + 1) % 2
self._write(s)
return False
def _i1(self, r):
logging.info("I1")
return self._common_i1(r)
def _machine(self, pkt):
logging.info("State: %r", self._state)
if self._state == RatpState.listen:
self._a(pkt)
elif self._state == RatpState.syn_sent:
self._b(pkt)
elif self._state == RatpState.syn_received:
self._c1(pkt) and \
self._d1(pkt) and \
self._e(pkt) and \
self._f1(pkt) and \
self._h1(pkt)
elif self._state == RatpState.established:
self._c2(pkt) and \
self._d2(pkt) and \
self._e(pkt) and \
self._f2(pkt) and \
self._h2(pkt) and \
self._i1(pkt)
elif self._state == RatpState.fin_wait:
self._c2(pkt) and \
self._d2(pkt) and \
self._e(pkt) and \
self._f3(pkt) and \
self._h3(pkt)
elif self._state == RatpState.last_ack:
self._c2(pkt) and \
self._d3(pkt) and \
self._e(pkt) and \
self._f3(pkt) and \
self._h4(pkt)
elif self._state == RatpState.closing:
self._c2(pkt) and \
self._d3(pkt) and \
self._e(pkt) and \
self._f3(pkt) and \
self._h5(pkt)
elif self._state == RatpState.time_wait:
self._d3(pkt) and \
self._e(pkt) and \
self._f3(pkt) and \
self._h6(pkt)
elif self._state == RatpState.closed:
self._g(pkt)
def wait(self, deadline):
while deadline is None or deadline > monotonic():
pkt = self._read()
if pkt:
self._machine(pkt)
else:
self._check_rto()
self._check_time_wait()
if not self._retrans or self._rx_queue:
return
def wait1(self, deadline):
while deadline is None or deadline > monotonic():
pkt = self._read()
if pkt:
self._machine(pkt)
else:
self._check_rto()
self._check_time_wait()
if not self._retrans:
return
def listen(self):
logging.info("LISTEN")
self._state = RatpState.listen
def connect(self, timeout=5.0):
deadline = monotonic() + timeout
logging.info("CONNECT")
self._retrans = None
syn = RatpPacket(flags='S')
syn.length = self._s_mdl
self._write(syn)
self._state = RatpState.syn_sent
self.wait(deadline)
def send_one(self, data, eor=True, timeout=1.0):
deadline = monotonic() + timeout
logging.info("SEND_ONE (len=%i, eor=%r)", len(data), eor)
assert self._state == RatpState.established
assert self._retrans is None
snd = RatpPacket(flags='A')
snd.c_eor = eor
snd.c_sn = (self._s_sn + 1) % 2
snd.c_an = (self._r_sn + 1) % 2
snd.length = len(data)
snd.payload = data
self._write(snd)
self.wait1(deadline=None)
def send(self, data, timeout=1.0):
logging.info("SEND (len=%i)", len(data))
while len(data) > 255:
self.send_one(data[:255], eor=False, timeout=timeout)
data = data[255:]
self.send_one(data, eor=True, timeout=timeout)
def recv(self, timeout=1.0):
deadline = monotonic() + timeout
assert self._state == RatpState.established
if self._rx_queue:
return self._rx_queue.pop(0)
self.wait(deadline)
if self._rx_queue:
return self._rx_queue.pop(0)
def close(self, timeout=1.0):
deadline = monotonic() + timeout
logging.info("CLOSE")
if self._state == RatpState.established or self._state == RatpState.syn_received:
fin = RatpPacket(flags='FA')
fin.c_sn = (self._s_sn + 1) % 2
fin.c_an = (self._r_sn + 1) % 2
self._write(fin)
self._state = RatpState.fin_wait
while deadline > monotonic() and not self._state == RatpState.time_wait:
self.wait(deadline)
while self._state == RatpState.time_wait:
self.wait(None)
if self._state == RatpState.closed:
logging.info("CLOSE: success")
else:
logging.info("CLOSE: failure")
def abort(self):
logging.info("ABORT")
def status(self):
logging.info("STATUS")
return self._state
class SerialRatpConnection(RatpConnection):
def __init__(self, port):
super(SerialRatpConnection, self).__init__()
self.__port = port
self.__port.timeout = 0.01
self.__port.writeTimeout = None
self.__port.flushInput()
def _write_raw(self, data):
if data:
logging.debug("-> %r", bytearray(data))
return self.__port.write(data)
def _read_raw(self, size=1):
data = self.__port.read(size)
if data:
logging.debug("<- %r", bytearray(data))
return data
| gpl-2.0 | -2,351,006,769,777,201,700 | 27.015686 | 89 | 0.474524 | false |
mstritt/orbit-image-analysis | src/main/python/deeplearn/export2tensorboard.py | 1 | 1049 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
model_dir = 'D:/data/glomeruli/20180202_glomeruli_detection_noquant.pb'
log_dir = 'd:/temp/tf'
with session.Session(graph=ops.Graph()) as sess:
with gfile.FastGFile(model_dir, "rb") as f:
graph_def = graph_pb2.GraphDef()
graph_def.ParseFromString(f.read())
importer.import_graph_def(graph_def)
# pb_visual_writer = summary.FileWriter(log_dir)
# pb_visual_writer.add_graph(sess.graph)
file_writer = summary.FileWriter(log_dir, sess.graph)
print("Model Imported. Visualize by running: tensorboard --logdir={}".format(log_dir))
| gpl-3.0 | 359,202,195,924,344,060 | 32.83871 | 94 | 0.741659 | false |
DStauffman/dstauffman | dstauffman/tests/test_aerospace_quat_opt.py | 1 | 14291 | r"""
Test file for the `quat_opt` module of the "dstauffman.aerospace" library.
Notes
-----
#. Written by David C. Stauffer in February 2021.
"""
#%% Imports
import unittest
from dstauffman import HAVE_NUMPY
import dstauffman.aerospace as space
from dstauffman.numba import HAVE_NUMBA
if HAVE_NUMPY:
import numpy as np
#%% aerospace.qrot_single
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
class Test_aerospace_qrot_single(unittest.TestCase):
r"""
Tests the aerospace.qrot_single function with the following cases:
Single input case
"""
def setUp(self) -> None:
self.axis = np.array([1, 2, 3])
self.angle = np.pi/2
self.angle2 = np.pi/3
r2o2 = np.sqrt(2)/2
r3o2 = np.sqrt(3)/2
self.quat = np.array([[r2o2, 0, 0, r2o2], [0, r2o2, 0, r2o2], [0, 0, r2o2, r2o2]])
self.quat2 = np.array([[ 0.5, 0, 0, r3o2], [0, 0.5, 0, r3o2], [0, 0, 0.5, r3o2]])
def test_single_inputs(self) -> None:
for i in range(len(self.axis)):
quat = space.qrot_single(self.axis[i], self.angle)
self.assertEqual(quat.ndim, 1)
np.testing.assert_array_almost_equal(quat, self.quat[i, :])
quat = space.qrot_single(self.axis[i], self.angle2)
self.assertEqual(quat.ndim, 1)
np.testing.assert_array_almost_equal(quat, self.quat2[i, :])
def test_larger_angle(self) -> None:
quat = space.qrot_single(1, 5.1*np.pi)
self.assertGreater(quat[3], 0)
#%% aerospace.quat_from_axis_angle_single
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
class Test_aerospace_quat_from_axis_angle_single(unittest.TestCase):
r"""
Tests the aerospace.quat_from_axis_angle_single function with the following cases:
Single axis (x3)
Multiple axis
"""
def test_axis1(self) -> None:
angle = 5./180.*np.pi
quat = space.quat_from_axis_angle_single(np.array([1., 0., 0.]), angle)
exp = space.qrot_single(1, angle)
np.testing.assert_array_almost_equal(quat, exp, 14)
def test_axis2(self) -> None:
angle = 110./180.*np.pi
quat = space.quat_from_axis_angle_single(np.array([0., 1., 0.]), angle)
exp = space.qrot_single(2, angle)
np.testing.assert_array_almost_equal(quat, exp, 14)
def test_axis3(self) -> None:
angle = -45./180.*np.pi
quat = space.quat_from_axis_angle_single(np.array([0., 0., 1.]), angle)
exp = space.qrot_single(3, angle)
np.testing.assert_array_almost_equal(quat, exp, 14)
def test_multiple(self) -> None:
axis = np.sqrt([9/50, 16/50, 0.5]) # unit([3, 4, 5])
angle = 1e-6*np.sqrt(50)
quat = space.quat_from_axis_angle_single(axis, angle)
exp = space.quat_mult_single(space.quat_mult_single(space.qrot_single(1, 3e-6), \
space.qrot_single(2, 4e-6)), space.qrot_single(3, 5e-6))
np.testing.assert_array_almost_equal(quat, exp, 10)
def test_null_axis(self) -> None:
quat = space.quat_from_axis_angle_single(np.zeros(3), 0.1)
np.testing.assert_array_equal(quat, np.array([0., 0., 0., 1.]))
#%% aerospace.quat_interp_single
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
class Test_aerospace_quat_interp_single(unittest.TestCase):
r"""
Tests the aerospace.quat_interp_single function with the following cases:
TBD
"""
def setUp(self) -> None:
self.time = np.array([1., 3., 5.])
self.quat = np.vstack((space.qrot_single(1, 0), space.qrot_single(1, np.pi/2), \
space.qrot_single(1, np.pi))).T
self.ti = np.array([1., 2., 4.5, 5.])
self.qout = np.column_stack((space.qrot_single(1, 0), space.qrot_single(1, np.pi/4), \
space.qrot_single(1, 3.5/4*np.pi), space.qrot_single(1, np.pi)))
def test_nominal(self) -> None:
ix = np.array([0, 1])
qout = space.quat_interp_single(self.time[ix], self.quat[:, ix], self.ti[0])
np.testing.assert_array_almost_equal(qout, self.qout[:, 0])
ix = np.array([0, 1])
qout = space.quat_interp_single(self.time[ix], self.quat[:, ix], self.ti[1])
np.testing.assert_array_almost_equal(qout, self.qout[:, 1])
ix = np.array([1, 2])
qout = space.quat_interp_single(self.time[ix], self.quat[:, ix], self.ti[2])
np.testing.assert_array_almost_equal(qout, self.qout[:, 2])
ix = np.array([1, 2])
qout = space.quat_interp_single(self.time[ix], self.quat[:, ix], self.ti[3])
if qout[0] > 0:
np.testing.assert_array_almost_equal(qout, self.qout[:, 3])
else:
np.testing.assert_array_almost_equal(qout, -self.qout[:, 3])
def test_extra(self) -> None:
with self.assertRaises(ValueError):
space.quat_interp_single(self.time, self.quat, 10.)
#%% aerospace.quat_inv_single
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
class Test_aerospace_quat_inv_single(unittest.TestCase):
r"""
Tests the aerospace.quat_inv_single function with the following cases:
Single quat (x2 different quats)
"""
def setUp(self) -> None:
self.q1_inp = space.qrot_single(1, np.pi/2)
self.q1_out = np.array([-np.sqrt(2)/2, 0, 0, np.sqrt(2)/2])
self.q2_inp = space.qrot_single(2, np.pi/3)
self.q2_out = np.array([0, -0.5, 0, np.sqrt(3)/2])
def test_single_quat1(self) -> None:
q1_inv = space.quat_inv_single(self.q1_inp)
np.testing.assert_array_almost_equal(q1_inv, self.q1_out)
self.assertEqual(q1_inv.ndim, 1)
np.testing.assert_array_equal(q1_inv.shape, self.q1_out.shape)
def test_single_quat2(self) -> None:
q2_inv = space.quat_inv_single(self.q2_inp)
np.testing.assert_array_almost_equal(q2_inv, self.q2_out)
self.assertEqual(q2_inv.ndim, 1)
np.testing.assert_array_equal(q2_inv.shape, self.q2_out.shape)
def test_inplace(self) -> None:
q1_inv = space.quat_inv_single(self.q1_inp)
self.assertGreater(np.max(np.abs(q1_inv - self.q1_inp)), 0.1)
q1_inv = space.quat_inv_single(self.q1_inp, inplace=True)
self.assertLess(np.max(np.abs(q1_inv - self.q1_inp)), 1e-8)
#%% aerospace.quat_mult_single
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
class Test_aerospace_quat_mult_single(unittest.TestCase):
r"""
Tests the aerospace.quat_mult_single function with the following cases:
Single quat (x2 different quats)
Reverse order
Quat array times scalar (x2 orders)
"""
def setUp(self) -> None:
self.q1 = space.qrot_single(1, np.pi/2)
self.q2 = space.qrot_single(2, -np.pi)
self.q3 = space.qrot_single(3, np.pi/3)
self.q4 = np.array([ 0, -np.sqrt(2)/2, np.sqrt(2)/2, 0]) # q1*q2
self.q5 = np.array([0.5, -np.sqrt(3)/2, 0, 0]) # q2*q3
self.q6 = np.array([0.5, 0.5, 0.5, 0.5]) # q6 * q6 = q6**-1, and triggers negative scalar component
def test_nominal1(self) -> None:
quat = space.quat_mult_single(self.q1, self.q2)
self.assertEqual(quat.ndim, 1)
np.testing.assert_array_almost_equal(quat, self.q4)
np.testing.assert_array_equal(quat.shape, self.q4.shape)
def test_nominal2(self) -> None:
quat = space.quat_mult_single(self.q2, self.q3)
self.assertEqual(quat.ndim, 1)
np.testing.assert_array_almost_equal(quat, self.q5)
np.testing.assert_array_equal(quat.shape, self.q5.shape)
def test_nominal3(self) -> None:
quat = space.quat_mult_single(self.q6, self.q6)
self.assertEqual(quat.ndim, 1)
np.testing.assert_array_almost_equal(quat, space.quat_inv_single(self.q6))
np.testing.assert_array_equal(quat.shape, self.q6.shape)
def test_reverse(self) -> None:
quat1 = space.quat_mult_single(self.q2, self.q1)
quat2 = space.quat_inv_single(space.quat_mult_single(space.quat_inv_single(self.q1), space.quat_inv_single(self.q2)))
np.testing.assert_array_almost_equal(quat1, quat2)
def test_inplace(self) -> None:
quat = space.quat_mult_single(self.q1, self.q2)
self.assertGreater(np.max(np.abs(quat - self.q1)), 0.1)
quat = space.quat_mult_single(self.q1, self.q2, inplace=True)
self.assertIs(quat, self.q1)
self.assertLess(np.max(np.abs(quat - self.q4)), 1e-8)
#%% aerospace.quat_norm_single
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
class Test_aerospace_quat_norm_single(unittest.TestCase):
r"""
Tests the aerospace.quat_norm_single function with the following cases:
Single quat (x3 different quats)
"""
def setUp(self) -> None:
self.q1_inp = space.qrot_single(1, np.pi/2)
self.q1_out = np.array([np.sqrt(2)/2, 0, 0, np.sqrt(2)/2])
self.q2_inp = space.qrot_single(2, np.pi/3)
self.q2_out = np.array([0, 0.5, 0, np.sqrt(3)/2])
self.q3_inp = np.array([0.1, 0, 0, 1])
self.q3_out = np.array([0.09950372, 0, 0, 0.99503719])
def test_nominal1(self) -> None:
quat_norm = space.quat_norm_single(self.q1_inp)
np.testing.assert_array_almost_equal(quat_norm, self.q1_out)
self.assertEqual(quat_norm.ndim, 1)
np.testing.assert_array_equal(quat_norm.shape, self.q1_out.shape)
def test_nominal2(self) -> None:
quat_norm = space.quat_norm_single(self.q2_inp)
np.testing.assert_array_almost_equal(quat_norm, self.q2_out)
self.assertEqual(quat_norm.ndim, 1)
np.testing.assert_array_equal(quat_norm.shape, self.q2_out.shape)
def test_nominal3(self) -> None:
quat_norm = space.quat_norm_single(self.q3_inp)
np.testing.assert_array_almost_equal(quat_norm, self.q3_out)
self.assertEqual(quat_norm.ndim, 1)
np.testing.assert_array_equal(quat_norm.shape, self.q3_out.shape)
def test_inplace(self) -> None:
quat_norm = space.quat_norm_single(self.q3_inp)
self.assertGreater(np.max(np.abs(quat_norm - self.q3_inp)), 0.004)
quat_norm = space.quat_norm_single(self.q3_inp, inplace=True)
self.assertIs(quat_norm, self.q3_inp)
self.assertLess(np.max(np.abs(quat_norm - self.q3_inp)), 1e-8)
#%% aerospace.quat_prop_single
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
class Test_aerospace_quat_prop_single(unittest.TestCase):
r"""
Tests the aerospace.quat_prop_single function with the following cases:
Nominal case
Negative scalar
"""
def setUp(self) -> None:
self.quat = np.array([0., 0., 0., 1.])
self.delta_ang = np.array([0.01, 0.02, 0.03])
self.quat_new = np.array([0.005, 0.01, 0.015, 1.0])
self.quat_new_norm = np.array([0.00499912522962, 0.00999825045924, 0.01499737568886, 0.99982504592411])
def test_nominal(self) -> None:
quat = space.quat_prop_single(self.quat, self.delta_ang)
np.testing.assert_array_almost_equal(quat, self.quat_new, 12)
quat_norm = space.quat_norm_single(quat)
np.testing.assert_array_almost_equal(quat_norm, self.quat_new_norm, 12)
def test_negative_scalar(self) -> None:
quat = space.quat_prop_single(np.array([1., 0., 0., 0.]), self.delta_ang)
self.assertGreater(quat[3], 0)
quat = space.quat_prop_single(np.array([1., 0., 0., 0.]), -self.delta_ang)
self.assertGreater(quat[3], 0)
def test_inplace(self) -> None:
quat = space.quat_prop_single(self.quat, self.delta_ang)
self.assertGreater(np.max(np.abs(quat - self.quat)), 0.004)
quat = space.quat_prop_single(self.quat, self.delta_ang, inplace=True)
self.assertIs(quat, self.quat)
self.assertLess(np.max(np.abs(quat - self.quat_new)), 1e-8)
#%% aerospace.quat_times_vector_single
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
class Test_aerospace_quat_times_vector_single(unittest.TestCase):
r"""
Tests the aerospace.quat_times_vector_single function with the following cases:
Nominal
"""
def setUp(self) -> None:
# TODO: confirm that this is enough to test the correctness of the function
self.quat = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]).T
self.vec = np.array([[1, 0, 0], [2, 0, 0]]).T
self.out = np.array([[-1, 2], [0, 0], [0, 0]])
def test_integers(self) -> None:
# Expected to fail until numba supports @ for matrix multiplication for integers.
for i in range(2):
vec = space.quat_times_vector_single(self.quat[:, i], self.vec[:, i])
np.testing.assert_array_almost_equal(vec, self.out[:, i])
if HAVE_NUMBA:
test_integers = unittest.expectedFailure(test_integers)
def test_nominal(self) -> None:
for i in range(2):
vec = space.quat_times_vector_single(self.quat[:, i].astype(float), self.vec[:, i].astype(float))
np.testing.assert_array_almost_equal(vec, self.out[:, i].astype(float))
def test_inplace(self) -> None:
q = self.quat[:, 0].astype(float)
v = self.vec[:, 0].astype(float)
vec = space.quat_times_vector_single(q, v)
self.assertGreater(np.max(np.abs(vec - v)), 0.004)
vec = space.quat_times_vector_single(q, v, inplace=True)
self.assertIs(vec, v)
self.assertLess(np.max(np.abs(vec - self.out[:, 0])), 1e-8)
#%% aerospace.quat_to_dcm
@unittest.skipIf(not HAVE_NUMPY, 'Skipping due to missing numpy dependency.')
class Test_aerospace_quat_to_dcm(unittest.TestCase):
r"""
Tests the aerospace.quat_to_dcm function with the following cases:
Nominal case
"""
def setUp(self) -> None:
self.quat = np.array([0.5, -0.5, 0.5, 0.5])
self.dcm = np.array([\
[ 0., 0., 1.],
[-1., 0., 0.],
[ 0., -1., 0.]])
def test_nominal(self) -> None:
dcm = space.quat_to_dcm(self.quat)
np.testing.assert_array_almost_equal(dcm, self.dcm)
#%% Unit test execution
if __name__ == '__main__':
unittest.main(exit=False)
| lgpl-3.0 | 8,126,852,090,581,298,000 | 42.43769 | 125 | 0.619201 | false |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/hyperlink/_url.py | 3 | 49264 | # -*- coding: utf-8 -*-
u"""Hyperlink provides Pythonic URL parsing, construction, and rendering.
Usage is straightforward::
>>> from hyperlink import URL
>>> url = URL.from_text(u'http://github.com/mahmoud/hyperlink?utm_source=docs')
>>> url.host
u'github.com'
>>> secure_url = url.replace(scheme=u'https')
>>> secure_url.get('utm_source')[0]
u'docs'
As seen here, the API revolves around the lightweight and immutable
:class:`URL` type, documented below.
"""
import re
import string
import socket
from unicodedata import normalize
try:
from socket import inet_pton
except ImportError:
# based on https://gist.github.com/nnemkin/4966028
# this code only applies on Windows Python 2.7
import ctypes
class _sockaddr(ctypes.Structure):
_fields_ = [("sa_family", ctypes.c_short),
("__pad1", ctypes.c_ushort),
("ipv4_addr", ctypes.c_byte * 4),
("ipv6_addr", ctypes.c_byte * 16),
("__pad2", ctypes.c_ulong)]
WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA
WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA
def inet_pton(address_family, ip_string):
addr = _sockaddr()
ip_string = ip_string.encode('ascii')
addr.sa_family = address_family
addr_size = ctypes.c_int(ctypes.sizeof(addr))
if WSAStringToAddressA(ip_string, address_family, None, ctypes.byref(addr), ctypes.byref(addr_size)) != 0:
raise socket.error(ctypes.FormatError())
if address_family == socket.AF_INET:
return ctypes.string_at(addr.ipv4_addr, 4)
if address_family == socket.AF_INET6:
return ctypes.string_at(addr.ipv6_addr, 16)
raise socket.error('unknown address family')
unicode = type(u'')
try:
unichr
except NameError:
unichr = chr # py3
NoneType = type(None)
# from boltons.typeutils
def make_sentinel(name='_MISSING', var_name=None):
"""Creates and returns a new **instance** of a new class, suitable for
usage as a "sentinel", a kind of singleton often used to indicate
a value is missing when ``None`` is a valid input.
Args:
name (str): Name of the Sentinel
var_name (str): Set this name to the name of the variable in
its respective module enable pickleability.
>>> make_sentinel(var_name='_MISSING')
_MISSING
The most common use cases here in boltons are as default values
for optional function arguments, partly because of its
less-confusing appearance in automatically generated
documentation. Sentinels also function well as placeholders in queues
and linked lists.
.. note::
By design, additional calls to ``make_sentinel`` with the same
values will not produce equivalent objects.
>>> make_sentinel('TEST') == make_sentinel('TEST')
False
>>> type(make_sentinel('TEST')) == type(make_sentinel('TEST'))
False
"""
class Sentinel(object):
def __init__(self):
self.name = name
self.var_name = var_name
def __repr__(self):
if self.var_name:
return self.var_name
return '%s(%r)' % (self.__class__.__name__, self.name)
if var_name:
def __reduce__(self):
return self.var_name
def __nonzero__(self):
return False
__bool__ = __nonzero__
return Sentinel()
_unspecified = _UNSET = make_sentinel('_UNSET')
# RFC 3986 Section 2.3, Unreserved URI Characters
# https://tools.ietf.org/html/rfc3986#section-2.3
_UNRESERVED_CHARS = frozenset('~-._0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz')
# URL parsing regex (based on RFC 3986 Appendix B, with modifications)
_URL_RE = re.compile(r'^((?P<scheme>[^:/?#]+):)?'
r'((?P<_netloc_sep>//)'
r'(?P<authority>[^/?#]*))?'
r'(?P<path>[^?#]*)'
r'(\?(?P<query>[^#]*))?'
r'(#(?P<fragment>.*))?$')
_SCHEME_RE = re.compile(r'^[a-zA-Z0-9+-.]*$')
_AUTHORITY_RE = re.compile(r'^(?:(?P<userinfo>[^@/?#]*)@)?'
r'(?P<host>'
r'(?:\[(?P<ipv6_host>[^[\]/?#]*)\])'
r'|(?P<plain_host>[^:/?#[\]]*)'
r'|(?P<bad_host>.*?))?'
r'(?::(?P<port>.*))?$')
_HEX_CHAR_MAP = dict([((a + b).encode('ascii'),
unichr(int(a + b, 16)).encode('charmap'))
for a in string.hexdigits for b in string.hexdigits])
_ASCII_RE = re.compile('([\x00-\x7f]+)')
# RFC 3986 section 2.2, Reserved Characters
# https://tools.ietf.org/html/rfc3986#section-2.2
_GEN_DELIMS = frozenset(u':/?#[]@')
_SUB_DELIMS = frozenset(u"!$&'()*+,;=")
_ALL_DELIMS = _GEN_DELIMS | _SUB_DELIMS
_USERINFO_SAFE = _UNRESERVED_CHARS | _SUB_DELIMS
_USERINFO_DELIMS = _ALL_DELIMS - _USERINFO_SAFE
_PATH_SAFE = _UNRESERVED_CHARS | _SUB_DELIMS | set(u':@%')
_PATH_DELIMS = _ALL_DELIMS - _PATH_SAFE
_SCHEMELESS_PATH_SAFE = _PATH_SAFE - set(':')
_SCHEMELESS_PATH_DELIMS = _ALL_DELIMS - _SCHEMELESS_PATH_SAFE
_FRAGMENT_SAFE = _UNRESERVED_CHARS | _PATH_SAFE | set(u'/?')
_FRAGMENT_DELIMS = _ALL_DELIMS - _FRAGMENT_SAFE
_QUERY_SAFE = _UNRESERVED_CHARS | _FRAGMENT_SAFE - set(u'&=+')
_QUERY_DELIMS = _ALL_DELIMS - _QUERY_SAFE
def _make_decode_map(delims, allow_percent=False):
ret = dict(_HEX_CHAR_MAP)
if not allow_percent:
delims = set(delims) | set([u'%'])
for delim in delims:
_hexord = '{0:02X}'.format(ord(delim)).encode('ascii')
_hexord_lower = _hexord.lower()
ret.pop(_hexord)
if _hexord != _hexord_lower:
ret.pop(_hexord_lower)
return ret
def _make_quote_map(safe_chars):
ret = {}
# v is included in the dict for py3 mostly, because bytestrings
# are iterables of ints, of course!
for i, v in zip(range(256), range(256)):
c = chr(v)
if c in safe_chars:
ret[c] = ret[v] = c
else:
ret[c] = ret[v] = '%{0:02X}'.format(i)
return ret
_USERINFO_PART_QUOTE_MAP = _make_quote_map(_USERINFO_SAFE)
_USERINFO_DECODE_MAP = _make_decode_map(_USERINFO_DELIMS)
_PATH_PART_QUOTE_MAP = _make_quote_map(_PATH_SAFE)
_SCHEMELESS_PATH_PART_QUOTE_MAP = _make_quote_map(_SCHEMELESS_PATH_SAFE)
_PATH_DECODE_MAP = _make_decode_map(_PATH_DELIMS)
_QUERY_PART_QUOTE_MAP = _make_quote_map(_QUERY_SAFE)
_QUERY_DECODE_MAP = _make_decode_map(_QUERY_DELIMS)
_FRAGMENT_QUOTE_MAP = _make_quote_map(_FRAGMENT_SAFE)
_FRAGMENT_DECODE_MAP = _make_decode_map(_FRAGMENT_DELIMS)
_ROOT_PATHS = frozenset(((), (u'',)))
def _encode_path_part(text, maximal=True):
"Percent-encode a single segment of a URL path."
if maximal:
bytestr = normalize('NFC', text).encode('utf8')
return u''.join([_PATH_PART_QUOTE_MAP[b] for b in bytestr])
return u''.join([_PATH_PART_QUOTE_MAP[t] if t in _PATH_DELIMS else t
for t in text])
def _encode_schemeless_path_part(text, maximal=True):
"""Percent-encode the first segment of a URL path for a URL without a
scheme specified.
"""
if maximal:
bytestr = normalize('NFC', text).encode('utf8')
return u''.join([_SCHEMELESS_PATH_PART_QUOTE_MAP[b] for b in bytestr])
return u''.join([_SCHEMELESS_PATH_PART_QUOTE_MAP[t]
if t in _SCHEMELESS_PATH_DELIMS else t for t in text])
def _encode_path_parts(text_parts, rooted=False, has_scheme=True,
has_authority=True, joined=True, maximal=True):
"""
Percent-encode a tuple of path parts into a complete path.
Setting *maximal* to False percent-encodes only the reserved
characters that are syntactically necessary for serialization,
preserving any IRI-style textual data.
Leaving *maximal* set to its default True percent-encodes
everything required to convert a portion of an IRI to a portion of
a URI.
RFC 3986 3.3:
If a URI contains an authority component, then the path component
must either be empty or begin with a slash ("/") character. If a URI
does not contain an authority component, then the path cannot begin
with two slash characters ("//"). In addition, a URI reference
(Section 4.1) may be a relative-path reference, in which case the
first path segment cannot contain a colon (":") character.
"""
if not text_parts:
return u'' if joined else text_parts
if rooted:
text_parts = (u'',) + text_parts
# elif has_authority and text_parts:
# raise Exception('see rfc above') # TODO: too late to fail like this?
encoded_parts = []
if has_scheme:
encoded_parts = [_encode_path_part(part, maximal=maximal)
if part else part for part in text_parts]
else:
encoded_parts = [_encode_schemeless_path_part(text_parts[0])]
encoded_parts.extend([_encode_path_part(part, maximal=maximal)
if part else part for part in text_parts[1:]])
if joined:
return u'/'.join(encoded_parts)
return tuple(encoded_parts)
def _encode_query_part(text, maximal=True):
"""
Percent-encode a single query string key or value.
"""
if maximal:
bytestr = normalize('NFC', text).encode('utf8')
return u''.join([_QUERY_PART_QUOTE_MAP[b] for b in bytestr])
return u''.join([_QUERY_PART_QUOTE_MAP[t] if t in _QUERY_DELIMS else t
for t in text])
def _encode_fragment_part(text, maximal=True):
"""Quote the fragment part of the URL. Fragments don't have
subdelimiters, so the whole URL fragment can be passed.
"""
if maximal:
bytestr = normalize('NFC', text).encode('utf8')
return u''.join([_FRAGMENT_QUOTE_MAP[b] for b in bytestr])
return u''.join([_FRAGMENT_QUOTE_MAP[t] if t in _FRAGMENT_DELIMS else t
for t in text])
def _encode_userinfo_part(text, maximal=True):
"""Quote special characters in either the username or password
section of the URL.
"""
if maximal:
bytestr = normalize('NFC', text).encode('utf8')
return u''.join([_USERINFO_PART_QUOTE_MAP[b] for b in bytestr])
return u''.join([_USERINFO_PART_QUOTE_MAP[t] if t in _USERINFO_DELIMS
else t for t in text])
# This port list painstakingly curated by hand searching through
# https://www.iana.org/assignments/uri-schemes/uri-schemes.xhtml
# and
# https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml
SCHEME_PORT_MAP = {'acap': 674, 'afp': 548, 'dict': 2628, 'dns': 53,
'file': None, 'ftp': 21, 'git': 9418, 'gopher': 70,
'http': 80, 'https': 443, 'imap': 143, 'ipp': 631,
'ipps': 631, 'irc': 194, 'ircs': 6697, 'ldap': 389,
'ldaps': 636, 'mms': 1755, 'msrp': 2855, 'msrps': None,
'mtqp': 1038, 'nfs': 111, 'nntp': 119, 'nntps': 563,
'pop': 110, 'prospero': 1525, 'redis': 6379, 'rsync': 873,
'rtsp': 554, 'rtsps': 322, 'rtspu': 5005, 'sftp': 22,
'smb': 445, 'snmp': 161, 'ssh': 22, 'steam': None,
'svn': 3690, 'telnet': 23, 'ventrilo': 3784, 'vnc': 5900,
'wais': 210, 'ws': 80, 'wss': 443, 'xmpp': None}
# This list of schemes that don't use authorities is also from the link above.
NO_NETLOC_SCHEMES = set(['urn', 'about', 'bitcoin', 'blob', 'data', 'geo',
'magnet', 'mailto', 'news', 'pkcs11',
'sip', 'sips', 'tel'])
# As of Mar 11, 2017, there were 44 netloc schemes, and 13 non-netloc
def register_scheme(text, uses_netloc=True, default_port=None):
"""Registers new scheme information, resulting in correct port and
slash behavior from the URL object. There are dozens of standard
schemes preregistered, so this function is mostly meant for
proprietary internal customizations or stopgaps on missing
standards information. If a scheme seems to be missing, please
`file an issue`_!
Args:
text (unicode): Text representing the scheme.
(the 'http' in 'http://hatnote.com')
uses_netloc (bool): Does the scheme support specifying a
network host? For instance, "http" does, "mailto" does
not. Defaults to True.
default_port (int): The default port, if any, for netloc-using
schemes.
.. _file an issue: https://github.com/mahmoud/hyperlink/issues
"""
text = text.lower()
if default_port is not None:
try:
default_port = int(default_port)
except (ValueError, TypeError):
raise ValueError('default_port expected integer or None, not %r'
% (default_port,))
if uses_netloc is True:
SCHEME_PORT_MAP[text] = default_port
elif uses_netloc is False:
if default_port is not None:
raise ValueError('unexpected default port while specifying'
' non-netloc scheme: %r' % default_port)
NO_NETLOC_SCHEMES.add(text)
else:
raise ValueError('uses_netloc expected bool, not: %r' % uses_netloc)
return
def scheme_uses_netloc(scheme, default=None):
"""Whether or not a URL uses :code:`:` or :code:`://` to separate the
scheme from the rest of the URL depends on the scheme's own
standard definition. There is no way to infer this behavior
from other parts of the URL. A scheme either supports network
locations or it does not.
The URL type's approach to this is to check for explicitly
registered schemes, with common schemes like HTTP
preregistered. This is the same approach taken by
:mod:`urlparse`.
URL adds two additional heuristics if the scheme as a whole is
not registered. First, it attempts to check the subpart of the
scheme after the last ``+`` character. This adds intuitive
behavior for schemes like ``git+ssh``. Second, if a URL with
an unrecognized scheme is loaded, it will maintain the
separator it sees.
"""
if not scheme:
return False
scheme = scheme.lower()
if scheme in SCHEME_PORT_MAP:
return True
if scheme in NO_NETLOC_SCHEMES:
return False
if scheme.split('+')[-1] in SCHEME_PORT_MAP:
return True
return default
class URLParseError(ValueError):
"""Exception inheriting from :exc:`ValueError`, raised when failing to
parse a URL. Mostly raised on invalid ports and IPv6 addresses.
"""
pass
def _optional(argument, default):
if argument is _UNSET:
return default
else:
return argument
def _typecheck(name, value, *types):
"""
Check that the given *value* is one of the given *types*, or raise an
exception describing the problem using *name*.
"""
if not types:
raise ValueError('expected one or more types, maybe use _textcheck?')
if not isinstance(value, types):
raise TypeError("expected %s for %s, got %r"
% (" or ".join([t.__name__ for t in types]),
name, value))
return value
def _textcheck(name, value, delims=frozenset(), nullable=False):
if not isinstance(value, unicode):
if nullable and value is None:
return value # used by query string values
else:
str_name = "unicode" if bytes is str else "str"
exp = str_name + ' or NoneType' if nullable else str_name
raise TypeError('expected %s for %s, got %r' % (exp, name, value))
if delims and set(value) & set(delims): # TODO: test caching into regexes
raise ValueError('one or more reserved delimiters %s present in %s: %r'
% (''.join(delims), name, value))
return value
def _decode_userinfo_part(text):
return _percent_decode(text, _decode_map=_USERINFO_DECODE_MAP)
def _decode_path_part(text):
return _percent_decode(text, _decode_map=_PATH_DECODE_MAP)
def _decode_query_part(text):
return _percent_decode(text, _decode_map=_QUERY_DECODE_MAP)
def _decode_fragment_part(text):
return _percent_decode(text, _decode_map=_FRAGMENT_DECODE_MAP)
def _percent_decode(text, _decode_map=_HEX_CHAR_MAP):
"""Convert percent-encoded text characters to their normal,
human-readable equivalents.
All characters in the input text must be valid ASCII. All special
characters underlying the values in the percent-encoding must be
valid UTF-8.
Only called by field-tailored variants, e.g.,
:func:`_decode_path_part`, as every percent-encodable part of the
URL has characters which should not be percent decoded.
>>> _percent_decode(u'abc%20def')
u'abc def'
Args:
text (unicode): The ASCII text with percent-encoding present.
Returns:
unicode: The percent-decoded version of *text*, with UTF-8
decoding applied.
"""
try:
quoted_bytes = text.encode("ascii")
except UnicodeEncodeError:
return text
bits = quoted_bytes.split(b'%')
if len(bits) == 1:
return text
res = [bits[0]]
append = res.append
for item in bits[1:]:
try:
append(_decode_map[item[:2]])
append(item[2:])
except KeyError:
append(b'%')
append(item)
unquoted_bytes = b''.join(res)
try:
return unquoted_bytes.decode("utf-8")
except UnicodeDecodeError:
return text
def _resolve_dot_segments(path):
"""Normalize the URL path by resolving segments of '.' and '..'. For
more details, see `RFC 3986 section 5.2.4, Remove Dot Segments`_.
Args:
path (list): path segments in string form
Returns:
list: a new list of path segments with the '.' and '..' elements
removed and resolved.
.. _RFC 3986 section 5.2.4, Remove Dot Segments: https://tools.ietf.org/html/rfc3986#section-5.2.4
"""
segs = []
for seg in path:
if seg == u'.':
pass
elif seg == u'..':
if segs:
segs.pop()
else:
segs.append(seg)
if list(path[-1:]) in ([u'.'], [u'..']):
segs.append(u'')
return segs
def parse_host(host):
"""Parse the host into a tuple of ``(family, host)``, where family
is the appropriate :mod:`socket` module constant when the host is
an IP address. Family is ``None`` when the host is not an IP.
Will raise :class:`URLParseError` on invalid IPv6 constants.
Returns:
tuple: family (socket constant or None), host (string)
>>> parse_host('googlewebsite.com') == (None, 'googlewebsite.com')
True
>>> parse_host('::1') == (socket.AF_INET6, '::1')
True
>>> parse_host('192.168.1.1') == (socket.AF_INET, '192.168.1.1')
True
"""
if not host:
return None, u''
if u':' in host:
try:
inet_pton(socket.AF_INET6, host)
except socket.error as se:
raise URLParseError('invalid IPv6 host: %r (%r)' % (host, se))
except UnicodeEncodeError:
pass # TODO: this can't be a real host right?
else:
family = socket.AF_INET6
return family, host
try:
inet_pton(socket.AF_INET, host)
except (socket.error, UnicodeEncodeError):
family = None # not an IP
else:
family = socket.AF_INET
return family, host
class URL(object):
"""From blogs to billboards, URLs are so common, that it's easy to
overlook their complexity and power. With hyperlink's
:class:`URL` type, working with URLs doesn't have to be hard.
URLs are made of many parts. Most of these parts are officially
named in `RFC 3986`_ and this diagram may prove handy in identifying
them::
foo://user:[email protected]:8042/over/there?name=ferret#nose
\_/ \_______/ \_________/ \__/\_________/ \_________/ \__/
| | | | | | |
scheme userinfo host port path query fragment
While :meth:`~URL.from_text` is used for parsing whole URLs, the
:class:`URL` constructor builds a URL from the individual
components, like so::
>>> from hyperlink import URL
>>> url = URL(scheme=u'https', host=u'example.com', path=[u'hello', u'world'])
>>> print(url.to_text())
https://example.com/hello/world
The constructor runs basic type checks. All strings are expected
to be decoded (:class:`unicode` in Python 2). All arguments are
optional, defaulting to appropriately empty values. A full list of
constructor arguments is below.
Args:
scheme (unicode): The text name of the scheme.
host (unicode): The host portion of the network location
port (int): The port part of the network location. If
``None`` or no port is passed, the port will default to
the default port of the scheme, if it is known. See the
``SCHEME_PORT_MAP`` and :func:`register_default_port`
for more info.
path (tuple): A tuple of strings representing the
slash-separated parts of the path.
query (tuple): The query parameters, as a tuple of
key-value pairs.
fragment (unicode): The fragment part of the URL.
rooted (bool): Whether or not the path begins with a slash.
userinfo (unicode): The username or colon-separated
username:password pair.
uses_netloc (bool): Indicates whether two slashes appear
between the scheme and the host (``http://eg.com`` vs
``mailto:[email protected]``). Set automatically based on scheme.
All of these parts are also exposed as read-only attributes of
URL instances, along with several useful methods.
.. _RFC 3986: https://tools.ietf.org/html/rfc3986
.. _RFC 3987: https://tools.ietf.org/html/rfc3987
"""
def __init__(self, scheme=None, host=None, path=(), query=(), fragment=u'',
port=None, rooted=None, userinfo=u'', uses_netloc=None):
if host is not None and scheme is None:
scheme = u'http' # TODO: why
if port is None:
port = SCHEME_PORT_MAP.get(scheme)
if host and query and not path:
# per RFC 3986 6.2.3, "a URI that uses the generic syntax
# for authority with an empty path should be normalized to
# a path of '/'."
path = (u'',)
# Now that we're done detecting whether they were passed, we can set
# them to their defaults:
if scheme is None:
scheme = u''
if host is None:
host = u''
if rooted is None:
rooted = bool(host)
# Set attributes.
self._scheme = _textcheck("scheme", scheme)
if self._scheme:
if not _SCHEME_RE.match(self._scheme):
raise ValueError('invalid scheme: %r. Only alphanumeric, "+",'
' "-", and "." allowed. Did you meant to call'
' %s.from_text()?'
% (self._scheme, self.__class__.__name__))
_, self._host = parse_host(_textcheck('host', host, '/?#@'))
if isinstance(path, unicode):
raise TypeError("expected iterable of text for path, not: %r"
% (path,))
self._path = tuple((_textcheck("path segment", segment, '/?#')
for segment in path))
self._query = tuple(
(_textcheck("query parameter name", k, '&=#'),
_textcheck("query parameter value", v, '&#', nullable=True))
for (k, v) in query
)
self._fragment = _textcheck("fragment", fragment)
self._port = _typecheck("port", port, int, NoneType)
self._rooted = _typecheck("rooted", rooted, bool)
self._userinfo = _textcheck("userinfo", userinfo, '/?#@')
uses_netloc = scheme_uses_netloc(self._scheme, uses_netloc)
self._uses_netloc = _typecheck("uses_netloc",
uses_netloc, bool, NoneType)
return
@property
def scheme(self):
"""The scheme is a string, and the first part of an absolute URL, the
part before the first colon, and the part which defines the
semantics of the rest of the URL. Examples include "http",
"https", "ssh", "file", "mailto", and many others. See
:func:`~hyperlink.register_scheme()` for more info.
"""
return self._scheme
@property
def host(self):
"""The host is a string, and the second standard part of an absolute
URL. When present, a valid host must be a domain name, or an
IP (v4 or v6). It occurs before the first slash, or the second
colon, if a :attr:`~hyperlink.URL.port` is provided.
"""
return self._host
@property
def port(self):
"""The port is an integer that is commonly used in connecting to the
:attr:`host`, and almost never appears without it.
When not present in the original URL, this attribute defaults
to the scheme's default port. If the scheme's default port is
not known, and the port is not provided, this attribute will
be set to None.
>>> URL.from_text(u'http://example.com/pa/th').port
80
>>> URL.from_text(u'foo://example.com/pa/th').port
>>> URL.from_text(u'foo://example.com:8042/pa/th').port
8042
.. note::
Per the standard, when the port is the same as the schemes
default port, it will be omitted in the text URL.
"""
return self._port
@property
def path(self):
"""A tuple of strings, created by splitting the slash-separated
hierarchical path. Started by the first slash after the host,
terminated by a "?", which indicates the start of the
:attr:`~hyperlink.URL.query` string.
"""
return self._path
@property
def query(self):
"""Tuple of pairs, created by splitting the ampersand-separated
mapping of keys and optional values representing
non-hierarchical data used to identify the resource. Keys are
always strings. Values are strings when present, or None when
missing.
For more operations on the mapping, see
:meth:`~hyperlink.URL.get()`, :meth:`~hyperlink.URL.add()`,
:meth:`~hyperlink.URL.set()`, and
:meth:`~hyperlink.URL.delete()`.
"""
return self._query
@property
def fragment(self):
"""A string, the last part of the URL, indicated by the first "#"
after the :attr:`~hyperlink.URL.path` or
:attr:`~hyperlink.URL.query`. Enables indirect identification
of a secondary resource, like an anchor within an HTML page.
"""
return self._fragment
@property
def rooted(self):
"""Whether or not the path starts with a forward slash (``/``).
This is taken from the terminology in the BNF grammar,
specifically the "path-rootless", rule, since "absolute path"
and "absolute URI" are somewhat ambiguous. :attr:`path` does
not contain the implicit prefixed ``"/"`` since that is
somewhat awkward to work with.
"""
return self._rooted
@property
def userinfo(self):
"""The colon-separated string forming the username-password
combination.
"""
return self._userinfo
@property
def uses_netloc(self):
"""
"""
return self._uses_netloc
@property
def user(self):
"""
The user portion of :attr:`~hyperlink.URL.userinfo`.
"""
return self.userinfo.split(u':')[0]
def authority(self, with_password=False, **kw):
"""Compute and return the appropriate host/port/userinfo combination.
>>> url = URL.from_text(u'http://user:pass@localhost:8080/a/b?x=y')
>>> url.authority()
u'user:@localhost:8080'
>>> url.authority(with_password=True)
u'user:pass@localhost:8080'
Args:
with_password (bool): Whether the return value of this
method include the password in the URL, if it is
set. Defaults to False.
Returns:
str: The authority (network location and user information) portion
of the URL.
"""
# first, a bit of twisted compat
with_password = kw.pop('includeSecrets', with_password)
if kw:
raise TypeError('got unexpected keyword arguments: %r' % kw.keys())
host = self.host
if ':' in host:
hostport = ['[' + host + ']']
else:
hostport = [self.host]
if self.port != SCHEME_PORT_MAP.get(self.scheme):
hostport.append(unicode(self.port))
authority = []
if self.userinfo:
userinfo = self.userinfo
if not with_password and u":" in userinfo:
userinfo = userinfo[:userinfo.index(u":") + 1]
authority.append(userinfo)
authority.append(u":".join(hostport))
return u"@".join(authority)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
for attr in ['scheme', 'userinfo', 'host', 'query',
'fragment', 'port', 'uses_netloc']:
if getattr(self, attr) != getattr(other, attr):
return False
if self.path == other.path or (self.path in _ROOT_PATHS
and other.path in _ROOT_PATHS):
return True
return False
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not self.__eq__(other)
def __hash__(self):
return hash((self.__class__, self.scheme, self.userinfo, self.host,
self.path, self.query, self.fragment, self.port,
self.rooted, self.uses_netloc))
@property
def absolute(self):
"""Whether or not the URL is "absolute". Absolute URLs are complete
enough to resolve to a network resource without being relative
to a base URI.
>>> URL.from_text(u'http://wikipedia.org/').absolute
True
>>> URL.from_text(u'?a=b&c=d').absolute
False
Absolute URLs must have both a scheme and a host set.
"""
return bool(self.scheme and self.host)
def replace(self, scheme=_UNSET, host=_UNSET, path=_UNSET, query=_UNSET,
fragment=_UNSET, port=_UNSET, rooted=_UNSET, userinfo=_UNSET,
uses_netloc=_UNSET):
""":class:`URL` objects are immutable, which means that attributes
are designed to be set only once, at construction. Instead of
modifying an existing URL, one simply creates a copy with the
desired changes.
If any of the following arguments is omitted, it defaults to
the value on the current URL.
Args:
scheme (unicode): The text name of the scheme.
host (unicode): The host portion of the network location
port (int): The port part of the network location.
path (tuple): A tuple of strings representing the
slash-separated parts of the path.
query (tuple): The query parameters, as a tuple of
key-value pairs.
fragment (unicode): The fragment part of the URL.
rooted (bool): Whether or not the path begins with a slash.
userinfo (unicode): The username or colon-separated
username:password pair.
uses_netloc (bool): Indicates whether two slashes appear
between the scheme and the host (``http://eg.com`` vs
``mailto:[email protected]``)
Returns:
URL: a copy of the current :class:`URL`, with new values for
parameters passed.
"""
return self.__class__(
scheme=_optional(scheme, self.scheme),
host=_optional(host, self.host),
path=_optional(path, self.path),
query=_optional(query, self.query),
fragment=_optional(fragment, self.fragment),
port=_optional(port, self.port),
rooted=_optional(rooted, self.rooted),
userinfo=_optional(userinfo, self.userinfo),
uses_netloc=_optional(uses_netloc, self.uses_netloc)
)
@classmethod
def from_text(cls, text):
"""Whereas the :class:`URL` constructor is useful for constructing
URLs from parts, :meth:`~URL.from_text` supports parsing whole
URLs from their string form::
>>> URL.from_text(u'http://example.com')
URL.from_text(u'http://example.com')
>>> URL.from_text(u'?a=b&x=y')
URL.from_text(u'?a=b&x=y')
As you can see above, it's also used as the :func:`repr` of
:class:`URL` objects. The natural counterpart to
:func:`~URL.to_text()`. This method only accepts *text*, so be
sure to decode those bytestrings.
Args:
text (unicode): A valid URL string.
Returns:
URL: The structured object version of the parsed string.
.. note::
Somewhat unexpectedly, URLs are a far more permissive
format than most would assume. Many strings which don't
look like URLs are still valid URLs. As a result, this
method only raises :class:`URLParseError` on invalid port
and IPv6 values in the host portion of the URL.
"""
um = _URL_RE.match(_textcheck('text', text))
try:
gs = um.groupdict()
except AttributeError:
raise URLParseError('could not parse url: %r' % text)
au_text = gs['authority'] or u''
au_m = _AUTHORITY_RE.match(au_text)
try:
au_gs = au_m.groupdict()
except AttributeError:
raise URLParseError('invalid authority %r in url: %r'
% (au_text, text))
if au_gs['bad_host']:
raise URLParseError('invalid host %r in url: %r')
userinfo = au_gs['userinfo'] or u''
host = au_gs['ipv6_host'] or au_gs['plain_host']
port = au_gs['port']
if port is not None:
try:
port = int(port)
except ValueError:
if not port: # TODO: excessive?
raise URLParseError('port must not be empty: %r' % au_text)
raise URLParseError('expected integer for port, not %r' % port)
scheme = gs['scheme'] or u''
fragment = gs['fragment'] or u''
uses_netloc = bool(gs['_netloc_sep'])
if gs['path']:
path = gs['path'].split(u"/")
if not path[0]:
path.pop(0)
rooted = True
else:
rooted = False
else:
path = ()
rooted = bool(au_text)
if gs['query']:
query = ((qe.split(u"=", 1) if u'=' in qe else (qe, None))
for qe in gs['query'].split(u"&"))
else:
query = ()
return cls(scheme, host, path, query, fragment, port,
rooted, userinfo, uses_netloc)
def child(self, *segments):
"""Make a new :class:`URL` where the given path segments are a child
of this URL, preserving other parts of the URL, including the
query string and fragment.
For example::
>>> url = URL.from_text(u'http://localhost/a/b?x=y')
>>> child_url = url.child(u"c", u"d")
>>> child_url.to_text()
u'http://localhost/a/b/c/d?x=y'
Args:
segments (unicode): Additional parts to be joined and added to
the path, like :func:`os.path.join`. Special characters
in segments will be percent encoded.
Returns:
URL: A copy of the current URL with the extra path segments.
"""
segments = [_textcheck('path segment', s) for s in segments]
new_segs = _encode_path_parts(segments, joined=False, maximal=False)
new_path = self.path[:-1 if (self.path and self.path[-1] == u'')
else None] + new_segs
return self.replace(path=new_path)
def sibling(self, segment):
"""Make a new :class:`URL` with a single path segment that is a
sibling of this URL path.
Args:
segment (unicode): A single path segment.
Returns:
URL: A copy of the current URL with the last path segment
replaced by *segment*. Special characters such as
``/?#`` will be percent encoded.
"""
_textcheck('path segment', segment)
new_path = self.path[:-1] + (_encode_path_part(segment),)
return self.replace(path=new_path)
def click(self, href=u''):
"""Resolve the given URL relative to this URL.
The resulting URI should match what a web browser would
generate if you visited the current URL and clicked on *href*.
>>> url = URL.from_text(u'http://blog.hatnote.com/')
>>> url.click(u'/post/155074058790').to_text()
u'http://blog.hatnote.com/post/155074058790'
>>> url = URL.from_text(u'http://localhost/a/b/c/')
>>> url.click(u'../d/./e').to_text()
u'http://localhost/a/b/d/e'
Args:
href (unicode): A string representing a clicked URL.
Return:
URL: A copy of the current URL with navigation logic applied.
For more information, see `RFC 3986 section 5`_.
.. _RFC 3986 section 5: https://tools.ietf.org/html/rfc3986#section-5
"""
_textcheck("relative URL", href)
if href:
clicked = URL.from_text(href)
if clicked.absolute:
return clicked
else:
clicked = self
query = clicked.query
if clicked.scheme and not clicked.rooted:
# Schemes with relative paths are not well-defined. RFC 3986 calls
# them a "loophole in prior specifications" that should be avoided,
# or supported only for backwards compatibility.
raise NotImplementedError('absolute URI with rootless path: %r'
% (href,))
else:
if clicked.rooted:
path = clicked.path
elif clicked.path:
path = self.path[:-1] + clicked.path
else:
path = self.path
if not query:
query = self.query
return self.replace(scheme=clicked.scheme or self.scheme,
host=clicked.host or self.host,
port=clicked.port or self.port,
path=_resolve_dot_segments(path),
query=query,
fragment=clicked.fragment)
def to_uri(self):
u"""Make a new :class:`URL` instance with all non-ASCII characters
appropriately percent-encoded. This is useful to do in preparation
for sending a :class:`URL` over a network protocol.
For example::
>>> URL.from_text(u'https://→example.com/foo⇧bar/').to_uri()
URL.from_text(u'https://xn--example-dk9c.com/foo%E2%87%A7bar/')
Returns:
URL: A new instance with its path segments, query parameters, and
hostname encoded, so that they are all in the standard
US-ASCII range.
"""
new_userinfo = u':'.join([_encode_userinfo_part(p) for p in
self.userinfo.split(':', 1)])
new_path = _encode_path_parts(self.path, has_scheme=bool(self.scheme),
rooted=False, joined=False, maximal=True)
return self.replace(
userinfo=new_userinfo,
host=self.host.encode("idna").decode("ascii"),
path=new_path,
query=tuple([tuple(_encode_query_part(x, maximal=True)
if x is not None else None
for x in (k, v))
for k, v in self.query]),
fragment=_encode_fragment_part(self.fragment, maximal=True)
)
def to_iri(self):
u"""Make a new :class:`URL` instance with all but a few reserved
characters decoded into human-readable format.
Percent-encoded Unicode and IDNA-encoded hostnames are
decoded, like so::
>>> url = URL.from_text(u'https://xn--example-dk9c.com/foo%E2%87%A7bar/')
>>> print(url.to_iri().to_text())
https://→example.com/foo⇧bar/
.. note::
As a general Python issue, "narrow" (UCS-2) builds of
Python may not be able to fully decode certain URLs, and
the in those cases, this method will return a best-effort,
partially-decoded, URL which is still valid. This issue
does not affect any Python builds 3.4+.
Returns:
URL: A new instance with its path segments, query parameters, and
hostname decoded for display purposes.
"""
new_userinfo = u':'.join([_decode_userinfo_part(p) for p in
self.userinfo.split(':', 1)])
try:
asciiHost = self.host.encode("ascii")
except UnicodeEncodeError:
textHost = self.host
else:
try:
textHost = asciiHost.decode("idna")
except ValueError:
# only reached on "narrow" (UCS-2) Python builds <3.4, see #7
textHost = self.host
return self.replace(userinfo=new_userinfo,
host=textHost,
path=[_decode_path_part(segment)
for segment in self.path],
query=[tuple(_decode_query_part(x)
if x is not None else None
for x in (k, v))
for k, v in self.query],
fragment=_decode_fragment_part(self.fragment))
def to_text(self, with_password=False):
"""Render this URL to its textual representation.
By default, the URL text will *not* include a password, if one
is set. RFC 3986 considers using URLs to represent such
sensitive information as deprecated. Quoting from RFC 3986,
`section 3.2.1`:
"Applications should not render as clear text any data after the
first colon (":") character found within a userinfo subcomponent
unless the data after the colon is the empty string (indicating no
password)."
Args:
with_password (bool): Whether or not to include the
password in the URL text. Defaults to False.
Returns:
str: The serialized textual representation of this URL,
such as ``u"http://example.com/some/path?some=query"``.
The natural counterpart to :class:`URL.from_text()`.
.. _section 3.2.1: https://tools.ietf.org/html/rfc3986#section-3.2.1
"""
scheme = self.scheme
authority = self.authority(with_password)
path = _encode_path_parts(self.path,
rooted=self.rooted,
has_scheme=bool(scheme),
has_authority=bool(authority),
maximal=False)
query_string = u'&'.join(
u'='.join((_encode_query_part(x, maximal=False)
for x in ([k] if v is None else [k, v])))
for (k, v) in self.query)
fragment = self.fragment
parts = []
_add = parts.append
if scheme:
_add(scheme)
_add(':')
if authority:
_add('//')
_add(authority)
elif (scheme and path[:2] != '//' and self.uses_netloc):
_add('//')
if path:
if scheme and authority and path[:1] != '/':
_add('/') # relpaths with abs authorities auto get '/'
_add(path)
if query_string:
_add('?')
_add(query_string)
if fragment:
_add('#')
_add(fragment)
return u''.join(parts)
def __repr__(self):
"""Convert this URL to an representation that shows all of its
constituent parts, as well as being a valid argument to
:func:`eval`.
"""
return '%s.from_text(%r)' % (self.__class__.__name__, self.to_text())
# # Begin Twisted Compat Code
asURI = to_uri
asIRI = to_iri
@classmethod
def fromText(cls, s):
return cls.from_text(s)
def asText(self, includeSecrets=False):
return self.to_text(with_password=includeSecrets)
def __dir__(self):
try:
ret = object.__dir__(self)
except AttributeError:
# object.__dir__ == AttributeError # pdw for py2
ret = dir(self.__class__) + list(self.__dict__.keys())
ret = sorted(set(ret) - set(['fromText', 'asURI', 'asIRI', 'asText']))
return ret
# # End Twisted Compat Code
def add(self, name, value=None):
"""Make a new :class:`URL` instance with a given query argument,
*name*, added to it with the value *value*, like so::
>>> URL.from_text(u'https://example.com/?x=y').add(u'x')
URL.from_text(u'https://example.com/?x=y&x')
>>> URL.from_text(u'https://example.com/?x=y').add(u'x', u'z')
URL.from_text(u'https://example.com/?x=y&x=z')
Args:
name (unicode): The name of the query parameter to add. The
part before the ``=``.
value (unicode): The value of the query parameter to add. The
part after the ``=``. Defaults to ``None``, meaning no
value.
Returns:
URL: A new :class:`URL` instance with the parameter added.
"""
return self.replace(query=self.query + ((name, value),))
def set(self, name, value=None):
"""Make a new :class:`URL` instance with the query parameter *name*
set to *value*. All existing occurences, if any are replaced
by the single name-value pair.
>>> URL.from_text(u'https://example.com/?x=y').set(u'x')
URL.from_text(u'https://example.com/?x')
>>> URL.from_text(u'https://example.com/?x=y').set(u'x', u'z')
URL.from_text(u'https://example.com/?x=z')
Args:
name (unicode): The name of the query parameter to set. The
part before the ``=``.
value (unicode): The value of the query parameter to set. The
part after the ``=``. Defaults to ``None``, meaning no
value.
Returns:
URL: A new :class:`URL` instance with the parameter set.
"""
# Preserve the original position of the query key in the list
q = [(k, v) for (k, v) in self.query if k != name]
idx = next((i for (i, (k, v)) in enumerate(self.query)
if k == name), -1)
q[idx:idx] = [(name, value)]
return self.replace(query=q)
def get(self, name):
"""Get a list of values for the given query parameter, *name*::
>>> url = URL.from_text(u'?x=1&x=2')
>>> url.get('x')
[u'1', u'2']
>>> url.get('y')
[]
If the given *name* is not set, an empty list is returned. A
list is always returned, and this method raises no exceptions.
Args:
name (unicode): The name of the query parameter to get.
Returns:
list: A list of all the values associated with the key, in
string form.
"""
return [value for (key, value) in self.query if name == key]
def remove(self, name):
"""Make a new :class:`URL` instance with all occurrences of the query
parameter *name* removed. No exception is raised if the
parameter is not already set.
Args:
name (unicode): The name of the query parameter to remove.
Returns:
URL: A new :class:`URL` instance with the parameter removed.
"""
return self.replace(query=((k, v) for (k, v) in self.query
if k != name))
| mit | -4,986,171,367,355,831,000 | 36.006762 | 114 | 0.571382 | false |
disqus/sentry-graphite | sentry_graphite/__init__.py | 1 | 3332 | """
sentry_graphite
~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django import forms
from sentry.conf import settings
from sentry.plugins import Plugin
from pystatsd import Client
NOTSET = object()
class GraphiteConfigurationForm(forms.Form):
host = forms.CharField(max_length=64, widget=forms.TextInput(attrs={
'placeholder': 'graphite.local',
}))
port = forms.IntegerField(max_value=65535, widget=forms.TextInput(attr={
'placeholder': '8125',
}))
prefix = forms.CharField(max_length=64, widget=forms.TextInput(attrs={
'placeholder': 'sentry',
}))
class GraphiteProcessor(Plugin):
title = 'Graphite'
conf_key = 'graphite'
project_conf_form = GraphiteConfigurationForm
def __init__(self, min_level=NOTSET, include_loggers=NOTSET, exclude_loggers=NOTSET,
host=NOTSET, port=NOTSET, prefix=NOTSET, *args, **kwargs):
super(GraphiteProcessor, self).__init__(*args, **kwargs)
if min_level is NOTSET:
min_level = settings.GRAPIHTE_LEVEL
if include_loggers is NOTSET:
include_loggers = settings.GRAPHITE_INCLUDE_LOGGERS
if exclude_loggers is NOTSET:
exclude_loggers = settings.GRAPHITE_EXCLUDE_LOGGERS
if host is NOTSET:
host = settings.GRAPHITE_HOST
if port is NOTSET:
port = settings.GRAPHITE_PORT
if prefix is NOTSET:
prefix = settings.GRAPHITE_PREFIX
self.min_level = min_level
self.include_loggers = include_loggers
self.exclude_loggers = exclude_loggers
self.host = host
self.port = port
self.prefix = prefix
self.client = Client(host=self.host, port=self.port)
def record_event(self, group, event, fail_silently=True):
project = group.project
host = self.get_option('host', project) or self.host
port = self.get_option('port', project) or self.port
prefix = self.get_option('prefix', project) or self.prefix
key = '.'.join([prefix, event.message_top])
self.client.increment(key)
def should_record(self, group, event):
project = group.project
host = self.get_option('host', project) or self.host
if not host:
return False
port = self.get_option('port', project) or self.port
if not port:
return False
prefix = self.get_option('prefix', project) or self.prefix
if not prefix:
return False
min_level = self.get_option('min_level', project) or self.min_level
if min_level is not None and int(group.level) < min_level:
return False
include_loggers = self.get_option('include_loggers', project) or self.include_loggers
if include_loggers is not None and group.logger not in include_loggers:
return False
exclude_loggers = self.get_option('exclude_loggers', project) or self.exclude_loggers
if exclude_loggers and group.logger in exclude_loggers:
return False
return True
def post_process(self, group, event, is_new, is_sample, **kwargs):
if not self.should_record(group, event):
return
self.record_event(group, event)
| apache-2.0 | -4,973,079,646,618,907,000 | 31.990099 | 93 | 0.634754 | false |
dionbosschieter/NetworkMonitor | samples/sniff.py | 1 | 3578 | #! /usr/bin/env python3
"""
Example to sniff all HTTP traffic on eth0 interface:
sudo ./sniff.py eth0 "port 80"
"""
import sys
import pcap
import time
import socket
import struct
if sys.version_info[0] > 2:
IPPROTO = bytes ((0x08, 0x00))
bord = int
else:
IPPROTO = '\x08\x00'
bord = ord
protocols={socket.IPPROTO_TCP:'tcp',
socket.IPPROTO_UDP:'udp',
socket.IPPROTO_ICMP:'icmp'}
def decode_ip_packet(s):
d={}
d['version']=(bord(s[0]) & 0xf0) >> 4
d['header_len']=bord(s[0]) & 0x0f
d['tos']=bord(s[1])
d['total_len']=socket.ntohs(struct.unpack('H',s[2:4])[0])
d['id']=socket.ntohs(struct.unpack('H',s[4:6])[0])
d['flags']=(bord(s[6]) & 0xe0) >> 5
d['fragment_offset']=socket.ntohs(struct.unpack('H',s[6:8])[0] & 0x1f)
d['ttl']=bord(s[8])
d['protocol']=bord(s[9])
d['checksum']=socket.ntohs(struct.unpack('H',s[10:12])[0])
d['source_address']=pcap.ntoa(struct.unpack('i',s[12:16])[0])
d['destination_address']=pcap.ntoa(struct.unpack('i',s[16:20])[0])
if d['header_len']>5:
d['options']=s[20:4*(d['header_len']-5)]
else:
d['options']=None
d['data']=s[4*d['header_len']:]
return d
def dumphex(s):
bytes = map(lambda x: '%.2x' % x, map(bord, s))
if sys.version_info[0] > 2:
bytes = list (bytes)
for i in range(0,len(bytes)//16):
print (' %s' % ' '.join(bytes[i*16:(i+1)*16]))
print (' %s' % ' '.join(bytes[(i+1)*16:]))
def print_packet(pktlen, data, timestamp):
if not data:
return
if data[12:14]==IPPROTO:
decoded=decode_ip_packet(data[14:])
print ('\n%s.%f %s > %s' % (time.strftime('%H:%M',
time.localtime(timestamp)),
timestamp % 60,
decoded['source_address'],
decoded['destination_address']))
for key in ['version', 'header_len', 'tos', 'total_len', 'id',
'flags', 'fragment_offset', 'ttl']:
print (' %s: %d' % (key, decoded[key]))
print (' protocol: %s' % protocols[decoded['protocol']])
print (' header checksum: %d' % decoded['checksum'])
print (' data:')
dumphex(decoded['data'])
if __name__=='__main__':
if len(sys.argv) < 3:
print ('usage: sniff.py <interface> <expr>')
sys.exit(0)
p = pcap.pcapObject()
#dev = pcap.lookupdev()
dev = sys.argv[1]
net, mask = pcap.lookupnet(dev)
# note: to_ms does nothing on linux
p.open_live(dev, 1600, 0, 100)
#p.dump_open('dumpfile')
p.setfilter(' '.join(sys.argv[2:]), 0, 0)
# try-except block to catch keyboard interrupt. Failure to shut
# down cleanly can result in the interface not being taken out of promisc.
# mode
#p.setnonblock(1)
try:
while 1:
p.dispatch(1, print_packet)
# specify 'None' to dump to dumpfile, assuming you have called
# the dump_open method
# p.dispatch(0, None)
# the loop method is another way of doing things
# p.loop(1, print_packet)
# as is the next() method
# p.next() returns a (pktlen, data, timestamp) tuple
# apply(print_packet,p.next())
except KeyboardInterrupt:
print ('%s' % sys.exc_type)
print ('shutting down')
print ('%d packets received, %d packets dropped, %d packets dropped by interface' % p.stats())
# vim:set ts=4 sw=4 et: | mit | -4,214,800,356,882,184,000 | 30.672566 | 102 | 0.536333 | false |
butla/PyDAS | tests/integrated/test_service.py | 1 | 6259 | import copy
import json
import time
from urllib.parse import urljoin
import requests
from data_acquisition.consts import ACQUISITION_PATH, UPLOADER_REQUEST_PATH
from data_acquisition.resources import get_download_callback_url, get_metadata_callback_url
from data_acquisition.acquisition_request import AcquisitionRequest
from tests.consts import (TEST_AUTH_HEADER, TEST_DOWNLOAD_REQUEST, TEST_ACQUISITION_REQ,
TEST_DOWNLOAD_CALLBACK, TEST_METADATA_CALLBACK, TEST_ORG_UUID)
from tests.utils import dict_is_part_of
def test_acquisition_request(das_client, req_store_real, downloader_imposter):
resp_object = das_client.rest.submitAcquisitionRequest(
body=TEST_DOWNLOAD_REQUEST,
_request_options={'headers': {'authorization': TEST_AUTH_HEADER}}).result()
assert req_store_real.get(resp_object.id).state == 'VALIDATED'
request_to_imposter = downloader_imposter.wait_for_requests()[0]
assert json.loads(request_to_imposter.body) == {
'source': TEST_DOWNLOAD_REQUEST['source'],
'callback': get_download_callback_url('https://das.example.com', resp_object.id)
}
assert dict_is_part_of(request_to_imposter.headers, {'authorization': TEST_AUTH_HEADER})
def test_download_callback(req_store_real, das, metadata_parser_imposter):
# arrange
req_store_real.put(TEST_ACQUISITION_REQ)
req_id = TEST_ACQUISITION_REQ.id
# act
response = requests.post(
get_download_callback_url(das.url, req_id=req_id),
json=TEST_DOWNLOAD_CALLBACK,
headers={'Authorization': TEST_AUTH_HEADER})
# assert
assert response.status_code == 200
assert req_store_real.get(req_id).state == 'DOWNLOADED'
request_to_imposter = metadata_parser_imposter.wait_for_requests()[0]
proper_metadata_req = {
'orgUUID': TEST_ACQUISITION_REQ.orgUUID,
'publicRequest': TEST_ACQUISITION_REQ.publicRequest,
'source': TEST_ACQUISITION_REQ.source,
'category': TEST_ACQUISITION_REQ.category,
'title': TEST_ACQUISITION_REQ.title,
'id': req_id,
'idInObjectStore': TEST_DOWNLOAD_CALLBACK['savedObjectId'],
'callbackUrl': get_metadata_callback_url('https://das.example.com', req_id)
}
assert json.loads(request_to_imposter.body) == proper_metadata_req
assert dict_is_part_of(request_to_imposter.headers, {'authorization': TEST_AUTH_HEADER})
def test_metadata_callback(req_store_real, das):
req_store_real.put(TEST_ACQUISITION_REQ)
req_id = TEST_ACQUISITION_REQ.id
response = requests.post(
get_metadata_callback_url(das.url, req_id=req_id),
json=TEST_METADATA_CALLBACK,
headers={'Authorization': TEST_AUTH_HEADER})
assert response.status_code == 200
assert req_store_real.get(req_id).state == 'FINISHED'
def test_uploader_request(req_store_real, das, metadata_parser_imposter):
# arrange
test_uploader_req = dict(TEST_DOWNLOAD_REQUEST)
test_uploader_req.update({
'idInObjectStore': 'fake-guid/000000_1',
'objectStoreId': 'hdfs://some-fake-hdfs-path',
})
# act
response = requests.post(
urljoin(das.url, UPLOADER_REQUEST_PATH),
json=test_uploader_req,
headers={'Authorization': TEST_AUTH_HEADER})
# assert
assert response.status_code == 200
stored_request = req_store_real.get_for_org(test_uploader_req['orgUUID'])[0]
assert stored_request.state == 'DOWNLOADED'
request_to_imposter = metadata_parser_imposter.wait_for_requests()[0]
proper_metadata_req = {
'orgUUID': TEST_ACQUISITION_REQ.orgUUID,
'publicRequest': TEST_ACQUISITION_REQ.publicRequest,
'source': TEST_ACQUISITION_REQ.source,
'category': TEST_ACQUISITION_REQ.category,
'title': TEST_ACQUISITION_REQ.title,
'id': stored_request.id,
'idInObjectStore': test_uploader_req['idInObjectStore'],
'callbackUrl': get_metadata_callback_url('https://das.example.com', stored_request.id)
}
assert json.loads(request_to_imposter.body) == proper_metadata_req
assert dict_is_part_of(request_to_imposter.headers, {'authorization': TEST_AUTH_HEADER})
def test_get_requests(req_store_real, das):
test_requests = [copy.deepcopy(TEST_ACQUISITION_REQ) for _ in range(3)]
test_requests[1].id = 'qzawx'
test_requests[2].orgUUID = 'some-other-org-uuid'
for test_request in test_requests:
req_store_real.put(test_request)
response = requests.get(
urljoin(das.url, ACQUISITION_PATH),
params={'orgs': TEST_ACQUISITION_REQ.orgUUID},
headers={'Authorization': TEST_AUTH_HEADER})
assert response.status_code == 200
returned_requests = [AcquisitionRequest(**req_json) for req_json in response.json()]
assert set(returned_requests) == set(test_requests[:-1])
def test_access_to_forbidden_org(das):
# Only one organization is allowed by the User Management impostor (bound to "das" fixture).
# That's why this should fail.
response = requests.get(
urljoin(das.url, ACQUISITION_PATH),
params={'orgs': 'org-the-user-has-no-access-to'},
headers={'Authorization': TEST_AUTH_HEADER})
assert response.status_code == 403
def test_access_with_invalid_token(das):
header_with_invalid_signature = TEST_AUTH_HEADER[:-1] + 'P'
response = requests.get(
urljoin(das.url, ACQUISITION_PATH),
params={'orgs': TEST_ORG_UUID},
headers={'Authorization': header_with_invalid_signature})
assert response.status_code == 401
def test_mark_request_failed_on_failed_connection_to_external_service(
das, downloader_imposter, req_store_real):
# simulating that the external service is unavailable
downloader_imposter.destroy()
response = requests.post(
das.url + ACQUISITION_PATH,
json=TEST_DOWNLOAD_REQUEST,
headers={'Authorization': TEST_AUTH_HEADER})
req_id = response.json()['id']
start_time = time.perf_counter()
while True:
if time.perf_counter() - start_time >= 2.0:
assert False, "Request state didn't change to ERROR after some time."
elif req_store_real.get(req_id).state == 'ERROR':
break
time.sleep(0.001)
| mit | 4,670,217,954,056,264,000 | 37.635802 | 96 | 0.681099 | false |
archyufa/openstack_upstream | cloudferry_devlab/tests/functional_test.py | 1 | 11244 | # Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import unittest
from keystoneclient import exceptions as ks_exceptions
from testconfig import config as config_ini
import cloudferry_devlab.tests.config as config
from cloudferry_devlab import generate_load
from cloudferry_devlab.tests import test_exceptions
import cloudferry_devlab.tests.utils as utils
def suppress_dependency_logging():
suppressed_logs = ['iso8601.iso8601',
'keystoneclient.session',
'neutronclient.client',
'requests.packages.urllib3.connectionpool',
'glanceclient.common.http',
'paramiko.transport']
for l in suppressed_logs:
logging.getLogger(l).setLevel(logging.WARNING)
def get_option_from_config_ini(option, section='migrate'):
return config_ini.get(section, {}).get(option, 'False')
class FunctionalTest(unittest.TestCase):
def setUp(self):
super(FunctionalTest, self).setUp()
suppress_dependency_logging()
if not config_ini:
raise test_exceptions.ConfFileError('Configuration file parameter'
' --tc-file is missing or '
'the file has wrong format')
self.src_cloud = generate_load.Prerequisites(
cloud_prefix='SRC',
configuration_ini=config_ini,
config=config)
self.dst_cloud = generate_load.Prerequisites(
cloud_prefix='DST',
configuration_ini=config_ini,
config=config)
self.migration_utils = utils.MigrationUtils(config)
self.config_ini_path = config_ini['general']['configuration_ini_path']
self.cloudferry_dir = config_ini['general']['cloudferry_dir']
self.filtering_utils = utils.FilteringUtils(
os.path.join(self.cloudferry_dir, get_option_from_config_ini(
'filter_path')))
def filter_networks(self):
networks = [i['name'] for i in config.networks]
for i in config.tenants:
if 'networks' in i and not i.get('deleted'):
for j in i['networks']:
networks.append(j['name'])
return self._get_neutron_resources('networks', networks)
def filter_subnets(self):
subnets = []
admin_tenant_id = self.src_cloud.get_tenant_id(self.src_cloud.tenant)
for net in config.networks:
if not net.get('subnets'):
continue
for subnet in net['subnets']:
subnet['tenant_id'] = admin_tenant_id
subnets.append(subnet)
subnets = [i for net in config.networks if net.get('subnets')
for i in net['subnets']]
for tenant in config.tenants:
if 'networks' not in tenant or tenant.get('deleted'):
continue
for network in tenant['networks']:
if 'subnets' not in network:
continue
for subnet in network['subnets']:
subnet['tenant_id'] = self.src_cloud.get_tenant_id(
tenant['name'])
subnets.append(subnet)
env_subnets = self.src_cloud.neutronclient.list_subnets()['subnets']
filtered_subnets = {'subnets': []}
for env_subnet in env_subnets:
for subnet in subnets:
same_cidr = env_subnet['cidr'] == subnet['cidr']
same_tenant = env_subnet['tenant_id'] == subnet['tenant_id']
if same_cidr and same_tenant:
filtered_subnets['subnets'].append(env_subnet)
return filtered_subnets
def filter_routers(self):
routers = [i['router']['name'] for i in config.routers]
for tenant in config.tenants:
if tenant.get('routers'):
for router in tenant.get('routers'):
routers.append(router['router']['name'])
return self._get_neutron_resources('routers', routers)
def filter_floatingips(self):
# Now we create floating ip, after tenant networks created.
# Will be fixed with tests for floating ip associating
def get_fips(_user):
self.src_cloud.switch_user(user=_user['name'],
tenant=_user['tenant'],
password=_user['password'])
_client = self.src_cloud.neutronclient
return [_fip['floating_ip_address']
for _fip in _client.list_floatingips()['floatingips']]
for tenant in config.tenants:
fips = [fip for user in config.users
if tenant['name'] == user.get('tenant') and
user['enabled'] and not user.get('deleted')
for fip in get_fips(user)]
return set(fips)
def filter_users(self):
users = []
for user in config.users:
if user.get('deleted'):
continue
if self.src_cloud.tenant_exists(user.get('tenant')) or\
self.src_cloud.user_has_not_primary_tenants(user['name']):
users.append(user['name'])
return self._get_keystone_resources('users', users)
def filter_tenants(self):
tenants = [i['name'] for i in config.tenants]
return self._get_keystone_resources('tenants', tenants)
def filter_roles(self):
roles = [i['name'] for i in config.roles]
return self._get_keystone_resources('roles', roles)
def get_src_vm_objects_specified_in_config(self):
vms = self.migration_utils.get_all_vms_from_config()
vms_names = [vm['name'] for vm in vms if not vm.get('broken')]
opts = {'search_opts': {'all_tenants': 1}}
return [i for i in self.src_cloud.novaclient.servers.list(**opts)
if i.name in vms_names]
def filter_flavors(self, filter_only_private=False):
flavors = []
if filter_only_private:
nova_args = {'is_public': None}
else:
nova_args = None
all_flavors = config.flavors
for tenant in config.tenants:
if tenant.get('flavors'):
all_flavors += [flavor for flavor in tenant['flavors']]
for flavor in all_flavors:
if filter_only_private:
if flavor.get('is_public') is False:
flavors.append(flavor['name'])
elif 'is_public' not in flavor or flavor.get('is_public'):
flavors.append(flavor['name'])
return self._get_nova_resources('flavors', flavors, nova_args)
def filter_keypairs(self):
return self.src_cloud.get_users_keypairs()
def filter_security_groups(self):
sgs = [sg['name'] for i in config.tenants if 'security_groups' in i
for sg in i['security_groups']]
return self._get_neutron_resources('security_groups', sgs)
def filter_images(self, exclude_images_with_fields=None):
if exclude_images_with_fields is None:
exclude_images_with_fields = {}
if exclude_images_with_fields.get('broken') is None:
exclude_images_with_fields['broken'] = True
def _image_exclude_filter(images):
filtered_images_name = []
for image in images:
for key, value in exclude_images_with_fields.iteritems():
if key in image and image[key] == value:
break
else:
filtered_images_name.append(image['name'])
return filtered_images_name
all_images = self.migration_utils.get_all_images_from_config()
filtered_images = _image_exclude_filter(all_images)
image_list = self.src_cloud.glanceclient.images.list(is_public=None)
return [i for i in image_list if i.name in filtered_images]
def filter_volumes(self):
volumes = config.cinder_volumes
for tenant in config.tenants:
if 'cinder_volumes' in tenant and not tenant.get('deleted'):
volumes.extend(tenant['cinder_volumes'])
volumes.extend(config.cinder_volumes_from_images)
volumes_names = [volume.get('display_name') for volume in volumes]
opts = {'search_opts': {'all_tenants': 1}}
return [i for i in self.src_cloud.cinderclient.volumes.list(**opts)
if i.display_name in volumes_names]
def filter_health_monitors(self):
hm = self.src_cloud.neutronclient.list_health_monitors()
final_hm = [m for m in hm['health_monitors']
if self.src_cloud.tenant_exists(tenant_id=m['tenant_id'])]
return {'health_monitors': final_hm}
def filter_pools(self):
pools = self.src_cloud.neutronclient.list_pools()['pools']
final_p = [p for p in pools
if self.src_cloud.tenant_exists(tenant_id=p['tenant_id'])]
return {'pools': final_p}
def filter_lbaas_members(self):
members = self.src_cloud.neutronclient.list_members()['members']
final_m = [m for m in members
if self.src_cloud.tenant_exists(tenant_id=m['tenant_id'])]
return {'members': final_m}
def filter_vips(self):
vips = self.src_cloud.neutronclient.list_vips()['vips']
final_v = [vip for vip in vips
if self.src_cloud.tenant_exists(tenant_id=vip['tenant_id'])]
return {'vips': final_v}
def _get_neutron_resources(self, res, names):
_list = getattr(self.src_cloud.neutronclient, 'list_' + res)()
return {res: [i for i in _list[res] if i['name'] in names]}
def _get_nova_resources(self, res, names, args=None):
client = getattr(self.src_cloud.novaclient, res)
if args:
return [i for i in client.list(**args) if i.name in names]
else:
return [i for i in client.list() if i.name in names]
def _get_keystone_resources(self, res, names):
client = getattr(self.src_cloud.keystoneclient, res)
return [i for i in client.list()
if i.name in names]
def get_vms_with_fip_associated(self):
vms = config.vms
for tenant in config.tenants:
if 'vms' in tenant:
vms.extend(tenant['vms'])
return [vm['name'] for vm in vms if vm.get('fip')]
def tenant_exists(self, keystone_client, tenant_id):
try:
keystone_client.tenants.get(tenant_id)
except ks_exceptions.NotFound:
return False
return True
| apache-2.0 | 9,063,475,371,507,560,000 | 40.644444 | 79 | 0.586535 | false |
pearkes/stripe-hooks | shared/parser.py | 1 | 2270 | import stripe
from .app import app
from shared.mail import send_notification, send_receipt
from shared.helpers import CleanParseException, format_stripe_object
def parse_hook(payload):
"""Parses a dictionary representation of the stripe webhook
by requesting a new version of the event by it's ID from the stripe
API. This is done for security reasons.
See https://github.com/pearkes/stripe-hooks#security
"""
# Request the event from Stripe, raises stripe.InvalidRequestError if
# not found
event = stripe.Event.retrieve(payload.get("id"))
# Determine what type of event it is and send any nots/receipts
determine_event_type(event)
def determine_event_type(event):
"Determines what type of hook an event is"
config = app.config['email']
if config['notifications'].get(event.type):
parse_notification(event)
if config['receipts'].get(event.type):
parse_receipt(event)
def parse_notification(event):
"Parse the details of an event for a notification"
# Format the data for the email
data = format_stripe_object(event.data.object)
send_notification(event.type, data)
def parse_receipt(event):
"Parse the details of an event for a receipt"
recepient = find_email_address(event.data.object)
# A CleanParseException tells the webhook to respond
# succesfully with a message back to the stripe dashboard
if not recepient:
raise CleanParseException(
"Can't find customer email address for receipt")
# Format the data for the email
data = format_stripe_object(event.data.object)
send_receipt(event.type, recepient, data)
def find_email_address(stripe_object):
"""Looks for an email in a stripe object, returns an email or None
if there wasn't one found, which may be the case sometimes."""
# Some objects have an "email" field, this makes it easy
email = stripe_object.get("email")
if email:
return email
# Others have a customer ID, we'll need to request
# it from Stripe in this case.
customer = stripe_object.get("customer")
if customer:
full_customer = stripe.Customer.retrieve(customer)
if full_customer.email:
return full_customer.email
| mit | 6,396,329,389,875,048,000 | 29.266667 | 73 | 0.701322 | false |
thewisenerd/pymoviedb | src/pymoviedb/__main__.py | 1 | 2130 | #! /usr/bin/env python3
# Copyright (c) 2015 - thewisenerd <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import argparse
import json
import signal
import sys
from operator import itemgetter, attrgetter
import __cfg
import __pymoviedb
from __pymoviedb import __pymoviedb_init, __pymoviedb_check, __pymoviedb_do
from __helpers import _cfg_list_file, _cfg_err_file
def sigint_handler(signum, frame):
# sort back movies
n = sorted(__pymoviedb.movies.values(), key=itemgetter('base'))
__pymoviedb.movies = {}
for v in n:
__pymoviedb.movies[v['imdbID']] = v
# write moviews
with open(_cfg_list_file(), "w") as f:
json.dump(n, f, indent=2)
# write err
with open(_cfg_err_file(), "w") as f:
f.writelines(sorted(__pymoviedb.err_lines))
# exit gracefully.
exit()
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTSTP, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
if (__name__ == "__main__"):
global args
parser = argparse.ArgumentParser()
parser.add_argument("action", help="action", choices=["init", "check", "do"])
parser.add_argument("-v", "--verbose", help="be more verbose", action="store_true")
args = parser.parse_args()
if args.verbose:
__cfg.__verbose = True
if args.action == "init":
__pymoviedb_init()
elif args.action == "check":
__pymoviedb_check()
elif args.action == "do":
__pymoviedb_do()
exit()
| gpl-2.0 | 1,112,315,185,582,330,800 | 26.662338 | 85 | 0.696244 | false |
rush2catch/algorithms-leetcode | Trees/leet_104_MaxDepthOfBinaryTree.py | 1 | 1493 | # Problem: Maximum Depth of Binary Tree
# Difficulty: Easy
# Category: Tree
# Leetcode 104: https://leetcode.com/problems/maximum-depth-of-binary-tree/#/description
# Description:
"""
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
_______3______
/ \
___5__ ___1__
/ \ / \
6 _2_ 0 8
/ \
7 4
"""
from Tree import BinaryTree
class Solution(object):
def max_depth(self, root):
if root is None:
return 0
return self.get_max(root)
def get_max(self, root):
if root is None:
return 0
if root.leftChild is None and root.rightChild is None:
return 1
return max(self.get_max(root.leftChild), self.get_max(root.rightChild)) + 1
# Construct a binary tree to test
Node_3 = BinaryTree(3)
Node_3.insertLeft(5)
Node_3.insertRight(1)
Node_5 = Node_3.getLeftChild()
Node_1 = Node_3.getRightChild()
Node_5.insertLeft(6)
Node_6 = Node_5.getLeftChild()
Node_5.insertRight(2)
Node_2 = Node_5.getRightChild()
Node_2.insertLeft(7)
Node_7 = Node_2.getLeftChild()
Node_2.insertRight(4)
Node_4 = Node_2.getRightChild()
Node_1.insertLeft(0)
Node_0 = Node_1.getLeftChild()
Node_1.insertRight(8)
Node_8 = Node_1.getRightChild()
obj = Solution()
print(obj.max_depth(Node_3))
print(obj.max_depth(Node_5))
print(obj.max_depth(Node_6))
print(obj.max_depth(Node_6.getLeftChild())) | mit | 8,618,353,277,831,722,000 | 23.491803 | 114 | 0.656397 | false |
iburadempa/multirange | tests/test_most.py | 1 | 26039 | # -*- coding: utf-8 -*-
import unittest
from multirange import *
class MyRange(object):
def __init__(self, start, stop):
self.start = start
self.stop = stop
class TestMultirange(unittest.TestCase):
def test_normalize(self):
self.assertEqual(normalize(range(-1, 1)), range(-1, 1))
self.assertEqual(normalize(range(1, 1)), None)
self.assertEqual(normalize(range(1, -1)), None)
self.assertEqual(normalize(None), None)
self.assertEqual(normalize(MyRange(-1, 1)), range(-1, 1))
myrange = normalize(MyRange(-1, 1), construct=MyRange)
self.assertEqual(myrange.start, -1)
self.assertEqual(myrange.stop, 1)
myrange = normalize(MyRange(1, 1), construct=MyRange)
self.assertEqual(myrange, None)
myrange = normalize(MyRange(1, -1), construct=MyRange)
self.assertEqual(myrange, None)
myrange = normalize(None, construct=MyRange)
self.assertEqual(myrange, None)
def test_filter_normalize(self):
rs = [range(0, -1, -1), range(1, 5, 2), range(7, 7)]
self.assertEqual(list(filter_normalize(rs)), [None, range(1, 5), None])
rs = [MyRange(0, -1), MyRange(1, 5), MyRange(7, 7)]
self.assertEqual(list(filter_normalize(rs)), [None, range(1, 5), None])
n = list(filter_normalize(rs, construct=MyRange))
self.assertEqual(len(n), 3)
self.assertEqual(n[0], None)
self.assertTrue(isinstance(n[1], MyRange))
self.assertEqual(n[1].start, 1)
self.assertEqual(n[1].stop, 5)
self.assertEqual(n[2], None)
def test_filter_nonempty(self):
rs = (range(0, 10), range(10, 10), range(5, -5), range(0, 10, 2))
self.assertEqual(list(filter_nonempty(rs)), [range(0, 10), range(0, 10)])
self.assertEqual(list(filter_nonempty(rs, do_normalize=False)), [range(0, 10), range(0, 10, 2)])
self.assertEqual(list(filter_nonempty(rs, invert=True)), [None, None])
self.assertEqual(list(filter_nonempty(rs, invert=True, do_normalize=False)), [range(10, 10), range(5, -5)])
self.assertEqual(list(filter_nonempty(rs, with_position=True)), [(0, range(0, 10)), (3, range(0, 10))])
self.assertEqual(list(filter_nonempty(rs, do_normalize=False, with_position=True)), [(0, range(0, 10)), (3, range(0, 10, 2))])
self.assertEqual(list(filter_nonempty(rs, invert=True, with_position=True)), [(1, None), (2, None)])
self.assertEqual(list(filter_nonempty(rs, invert=True, do_normalize=False, with_position=True)), [(1, range(10, 10)), (2, range(5, -5))])
rs = [MyRange(0, 10), MyRange(10, 10), MyRange(5, -5), MyRange(0, 10)]
self.assertEqual(list(filter_nonempty(rs)), [range(0, 10), range(0, 10)])
ne = list(filter_nonempty(rs, construct=MyRange))
self.assertEqual(len(ne), 2)
self.assertTrue(isinstance(ne[0], MyRange))
self.assertEqual(ne[0].start, 0)
self.assertEqual(ne[0].stop, 10)
self.assertTrue(isinstance(ne[1], MyRange))
self.assertEqual(ne[1].start, 0)
self.assertEqual(ne[1].stop, 10)
def test_equal(self):
self.assertEqual(equals(None, None), True)
self.assertEqual(equals(range(0, 7, 3), range(0, 7, 3)), True)
self.assertEqual(equals(range(0, 7, 3), range(0, 7, 4)), True)
self.assertEqual(equals(range(0, 7, 3), range(0, 7)), True)
self.assertEqual(equals(range(0, 5, 3), range(0, 3, 3)), False)
self.assertEqual(equals(range(0, 5, 3), range(0, 4, 3)), False)
self.assertEqual(equals(range(0, 0), None), True)
self.assertEqual(equals(range(0, 10, 100), range(0, 1)), False)
self.assertEqual(equals(range(0, -1), None), True)
def test_filter_equal(self):
rs = [range(0, 1), range(0, 2, 3), None, range(-1, -5, -3), range(0, 2)]
self.assertEqual(list(filter_equal(rs, range(0, 2))), [range(0, 2), range(0, 2)])
self.assertEqual(list(filter_equal(rs, None)), [None, None])
self.assertEqual(list(filter_equal(rs, range(0, 2), with_position=True)), [(1, range(0, 2)), (4, range(0, 2))])
self.assertEqual(list(filter_equal(rs, None, with_position=True)), [(2, None), (3, None)])
self.assertEqual(list(filter_equal(rs, range(0, 2), do_normalize=False)), [range(0, 2, 3), range(0, 2)])
self.assertEqual(list(filter_equal(rs, range(0, 2), do_normalize=False, with_position=True)), [(1, range(0, 2, 3)), (4, range(0, 2))])
rs = [range(0, 1), MyRange(0, 2), MyRange(-1, -5), range(0, 2, 5)]
fe = list(filter_equal(rs, range(0, 2, 10)))
self.assertEqual(fe, [range(0, 2), range(0, 2)])
fe = list(filter_equal(rs, MyRange(0, 2)))
self.assertEqual(fe, [range(0, 2), range(0, 2)])
fe = list(filter_equal(rs, range(0, 2, 5), construct=MyRange))
self.assertEqual(len(fe), 2)
self.assertTrue(isinstance(fe[0], MyRange))
self.assertEqual(fe[0].start, 0)
self.assertEqual(fe[0].stop, 2)
self.assertTrue(isinstance(fe[1], MyRange))
self.assertEqual(fe[1].start, 0)
self.assertEqual(fe[1].stop, 2)
def test_is_adjacent(self):
self.assertEqual(is_adjacent(None, range(3, 7)), None)
self.assertEqual(is_adjacent(range(3, 7), range(5, -5)), None)
self.assertEqual(is_adjacent(range(3, 7), range(5, 10)), False)
self.assertEqual(is_adjacent(range(3, 7), range(7, 10)), True)
self.assertEqual(is_adjacent(range(7, 10), range(3, 7)), True)
def test_overlap(self):
self.assertEqual(overlap(None, range(3, 7)), None)
self.assertEqual(overlap(range(3, 7), None), None)
self.assertEqual(overlap(range(-10, 10), range(3, 7, 2)), range(3, 7))
self.assertEqual(overlap(range(-10, 10), range(10, 20, 3)), None)
self.assertEqual(overlap(range(10, -10), range(10, -10)), None)
self.assertEqual(overlap(range(-10, 10), range(0, 10)), range(0, 10))
self.assertEqual(overlap(range(-10, 10), range(-20, 0, 2)), range(-10, 0))
self.assertEqual(overlap(range(-10, 10), range(-20, -9)), range(-10, -9))
r1 = MyRange(3, 7)
r2 = range(-10, 5)
o = overlap(r1, r2, construct=MyRange)
self.assertTrue(isinstance(o, MyRange))
self.assertEqual(o.start, 3)
self.assertEqual(o.stop, 5)
def test_filter_overlap(self):
rs = (range(0, 10), range(10, 10), range(5, -5, 2), range(0, 10, 2))
self.assertEqual(list(filter_overlap(rs, range(8, 20))), [range(0, 10), range(0, 10, 2)])
self.assertEqual(list(filter_overlap(rs, range(8, 20), with_position=True)), [(0, range(0, 10)), (3, range(0, 10, 2))])
self.assertEqual(list(filter_overlap(rs, range(8, 20), do_normalize=True)), [range(0, 10), range(0, 10)])
self.assertEqual(list(filter_overlap(rs, range(8, 20), do_normalize=True, with_position=True)), [(0, range(0, 10)), (3, range(0, 10))])
rs = [range(0, 10, 2), MyRange(10, 10), MyRange(5, -5), MyRange(0, 10)]
self.assertEqual(list(filter_overlap(rs, range(8, 20), do_normalize=True)), [range(0, 10), range(0, 10)])
ol = list(filter_overlap(rs, MyRange(8, 20), do_normalize=True, construct=MyRange))
self.assertEqual(len(ol), 2)
self.assertTrue(isinstance(ol[0], MyRange))
self.assertEqual(ol[0].start, 0)
self.assertEqual(ol[0].stop, 10)
self.assertTrue(isinstance(ol[1], MyRange))
self.assertEqual(ol[1].start, 0)
self.assertEqual(ol[1].stop, 10)
def test_match_count(self):
self.assertEqual(match_count([], range(-1, 1)), 0)
self.assertEqual(match_count([range(3, 7), range(-5, 0, 2), range(0, 1), range(1, 5), range(-1, 1)], range(-1, 1)), 3)
def test_overlap_all(self):
self.assertEqual(overlap_all([]), None)
self.assertEqual(overlap_all([range(3, 7), range(-5, 0), range(0, 1), range(1, 5)]), None)
self.assertEqual(overlap_all([range(2, 7), range(-5, 10, 5), range(0, 4), range(1, 5)]), range(2, 4))
self.assertEqual(overlap_all([range(2, 7), range(-5, 10, 20), range(0, 4), None]), None)
self.assertEqual(overlap_all([range(2, -7), range(-5, 10), range(0, 4)]), None)
rs = [MyRange(2, 7), range(-5, 10), MyRange(0, 4), MyRange(1, 5)]
o = overlap_all(rs, construct=MyRange)
self.assertTrue(isinstance(o, MyRange))
self.assertEqual(o.start, 2)
self.assertEqual(o.stop, 4)
def test_is_disjunct(self):
rs = (range(0, 5), range(5, 10), range(10, 15, 3), range(20, 25))
self.assertEqual(is_disjunct(rs, assume_ordered_increasingly=True), True)
self.assertEqual(is_disjunct([], assume_ordered_increasingly=True), True)
self.assertEqual(is_disjunct([], assume_ordered_increasingly=False), True)
self.assertEqual(is_disjunct(rs), True)
rs = (range(10, 15), range(20, 25), range(0, 5), range(5, 10))
self.assertEqual(is_disjunct(rs), True)
rs = (range(9, 15), range(20, 25), range(0, 5), range(5, 10))
self.assertEqual(is_disjunct(rs), False)
rs = (range(9, 15), range(20, 25), None, range(5, 10))
self.assertEqual(is_disjunct(rs), False)
rs = (range(11, 15), range(20, 25), None, range(5, 10))
self.assertEqual(is_disjunct(rs), True)
rs = (range(11, 15),)
self.assertEqual(is_disjunct(rs), True)
rs = (range(11, -5), range(6, 8))
self.assertEqual(is_disjunct(rs), True)
rs = (None,)
self.assertEqual(is_disjunct(rs), True)
def test_covering_all(self):
rs = [range(3, 7), range(-5, 0), None, range(1, 5, 2)]
self.assertEqual(covering_all(rs), range(-5, 7))
self.assertEqual(covering_all([None, None]), None)
self.assertEqual(covering_all([range(0, -10), None]), None)
self.assertEqual(covering_all([range(1, 3, 3), range(2, 3)]), range(1, 3))
self.assertEqual(covering_all([range(1, 3), range(2, 5, 7)]), range(1, 5))
self.assertEqual(covering_all([range(1, 3), range(1, 3)]), range(1, 3))
rs = [MyRange(3, 7), range(-5, 0), None, MyRange(1, 5)]
o = covering_all(rs, construct=MyRange)
self.assertTrue(isinstance(o, MyRange))
self.assertEqual(o.start, -5)
self.assertEqual(o.stop, 7)
def test_contains(self):
self.assertEqual(contains(range(0, 9), range(0, 9)), True)
self.assertEqual(contains(range(3, 9), range(0, 9)), False)
self.assertEqual(contains(range(0, 6), range(0, 9)), False)
self.assertEqual(contains(range(3, 6), range(0, 9)), False)
self.assertEqual(contains(None, range(6, 9)), False)
self.assertEqual(contains(None, None), True)
self.assertEqual(contains(range(1, 1), None), True)
self.assertEqual(contains(range(1, 2), None), True)
self.assertEqual(contains(range(1, 2), None), True)
self.assertEqual(contains(range(1, 9), range(5, 9)), True)
self.assertEqual(contains(range(1, 9), range(1, 5)), True)
self.assertEqual(contains(range(1, 9), range(3, 5)), True)
def test_filter_contained(self):
rs = [range(0, 3), range(5, 8), None, range(10, 12, 3), range(10, 20)]
rs2 = [range(5, 8), None, range(10, 12, 3)]
rs2_w_pos = [(1, range(5, 8)), (2, None), (3, range(10, 12, 3))]
self.assertEqual(list(filter_contained(rs, range(3, 15))), rs2)
self.assertEqual(list(filter_contained(rs, range(5, 12))), rs2)
self.assertEqual(list(filter_contained(rs, range(5, 11))), [range(5, 8), None])
self.assertEqual(list(filter_contained(rs, range(3, 15), with_position=True)), rs2_w_pos)
self.assertEqual(list(filter_contained(rs, range(5, 12), with_position=True)), rs2_w_pos)
self.assertEqual(list(filter_contained(rs, range(5, 11), with_position=True)), [(1, range(5, 8)), (2, None)])
self.assertEqual(list(filter_contained(rs, range(3, 15), do_normalize=True)), [range(5, 8), None, range(10, 12)])
self.assertEqual(list(filter_contained(rs, range(3, 15), do_normalize=True, with_position=True)), [(1, range(5, 8)), (2, None), (3, range(10, 12))])
rs = [MyRange(0, 3), MyRange(5, 8), None, range(10, 12, 3), MyRange(10, 20)]
fc = list(filter_contained(rs, range(3, 15), do_normalize=True))
self.assertEqual(fc, [range(5, 8), None, range(10, 12)])
fc = list(filter_contained(rs, range(3, 15), do_normalize=True, construct=MyRange))
self.assertTrue(isinstance(fc, list))
self.assertEqual(len(fc), 3)
self.assertTrue(isinstance(fc[0], MyRange))
self.assertEqual(fc[0].start, 5)
self.assertEqual(fc[0].stop, 8)
self.assertEqual(fc[1], None)
self.assertTrue(isinstance(fc[2], MyRange))
self.assertEqual(fc[2].start, 10)
self.assertEqual(fc[2].stop, 12)
def test_is_covered_by(self):
rs = [range(0, 3), range(5, 8), None, range(20, 30)]
self.assertEqual(is_covered_by(rs, range(0, 30)), True)
self.assertEqual(is_covered_by(rs, range(0, 20)), False)
self.assertEqual(is_covered_by(rs, range(10, 30)), False)
self.assertEqual(is_covered_by(rs, range(-5, 35)), True)
def test_symmetric_difference(self):
self.assertEqual(symmetric_difference(None, None), (None, None))
self.assertEqual(symmetric_difference(None, range(0, 5)), (None, range(0, 5)))
self.assertEqual(symmetric_difference(range(0, 5), None), (range(0, 5), None))
self.assertEqual(symmetric_difference(range(0, 5), range(5, 10)), (range(0, 5), range(5, 10)))
self.assertEqual(symmetric_difference(range(0, 10), range(5, 10)), (range(0, 5), None))
self.assertEqual(symmetric_difference(range(0, 5), range(0, 10)), (None, range(5, 10)))
self.assertEqual(symmetric_difference(range(0, 10), range(5, 15)), (range(0, 5), range(10, 15)))
r1, r2 = symmetric_difference(range(0, 10), MyRange(5, 15), construct=MyRange)
self.assertEqual(r1.start, 0)
self.assertEqual(r1.stop, 5)
self.assertEqual(r2.start, 10)
self.assertEqual(r2.stop, 15)
def test_intermediate(self):
self.assertEqual(intermediate(None, None), None)
self.assertEqual(intermediate(range(0, 5), None), None)
self.assertEqual(intermediate(None, range(0, 5)), None)
self.assertEqual(intermediate(range(0, 5), range(0, 10)), None)
self.assertEqual(intermediate(range(0, 5), range(5, 9)), None)
self.assertEqual(intermediate(range(0, 5), range(10, 15)), range(5, 10))
self.assertEqual(intermediate(range(10, 15), range(0, 5)), range(5, 10))
self.assertEqual(intermediate(range(10, 15), range(0, 10)), None)
self.assertEqual(intermediate(None, None, assume_ordered=True), None)
self.assertEqual(intermediate(range(0, 5), None, assume_ordered=True), None)
self.assertEqual(intermediate(None, range(0, 5), assume_ordered=True), None)
self.assertEqual(intermediate(range(0, 5), range(5, 9), assume_ordered=True), None)
self.assertEqual(intermediate(range(0, 5), range(10, 15), assume_ordered=True), range(5, 10))
self.assertEqual(intermediate(range(10, 15), range(0, 5), assume_ordered=True), None)
self.assertEqual(intermediate(range(10, 15), range(0, 10), assume_ordered=True), None)
im = intermediate(MyRange(0, 5), range(10, 15), construct=MyRange)
self.assertTrue(isinstance(im, MyRange))
self.assertEqual(im.start, 5)
self.assertEqual(im.stop, 10)
def test_sort_by_start(self):
rs1 = [range(1, 3), range(5, 7, 3), range(5, -5), None, range(-2, 10)]
rs2 = [range(-2, 10), range(1, 3), range(5, 7, 3)]
self.assertEqual(sort_by_start(rs1), rs2)
def test_gaps(self):
rs = [range(0, 1), range(1, 3), range(4, 6), range(6, 6), range(8, 10)]
self.assertEqual(list(gaps(rs)), [range(3, 4), range(6, 8)])
rs = [range(4, 6), range(6, 6), range(8, 10), range(0, 1), range(1, 3)]
self.assertEqual(list(gaps(rs)), [range(3, 4), range(6, 8)])
rs = [range(4, 6), range(4, 5), range(8, 10), range(0, 1), range(1, 3)]
self.assertEqual(list(gaps(rs)), [range(3, 4), range(6, 8)])
rs = [range(4, 6), range(6, 7), range(8, 10), range(0, 1), range(1, 3)]
self.assertEqual(list(gaps(rs)), [range(3, 4), range(7, 8)])
rs = [range(4, 6), range(6, 7), None, range(0, 1), range(1, 3)]
self.assertEqual(list(gaps(rs)), [range(3, 4)])
rs = [range(4, 6), range(6, 7), None, range(0, 1), range(1, 4)]
self.assertEqual(list(gaps(rs)), [])
rs = [range(8, 6), range(10, 17), None, range(0, 1), range(1, 5), range(1, 4), range(1, 8), range(1, 7)]
self.assertEqual(list(gaps(rs)), [range(8, 10)])
rs = [range(0, 1), range(2, 4), range(4, 6), range(6, 8), range(8, 10)]
self.assertEqual(list(gaps(rs)), [range(1, 2)])
rs = [MyRange(0, 1), range(2, 4), MyRange(4, 6), range(6, 8, 5), MyRange(8, 10)]
g = list(gaps(rs, construct=MyRange))
self.assertTrue(isinstance(g, list))
self.assertEqual(len(g), 1)
self.assertTrue(isinstance(g[0], MyRange))
self.assertEqual(g[0].start, 1)
self.assertEqual(g[0].stop, 2)
def test_is_partition_of(self):
rs = [range(0, 1), range(1, 4), range(4, 6), range(6, 8), range(8, 10)]
self.assertEqual(is_partition_of(rs, assume_ordered=True), range(0, 10))
rs = [range(0, 1), range(2, 4), range(4, 6), range(6, 8), range(8, 10)]
self.assertEqual(is_partition_of(rs), None)
rs = [range(8, 10), range(2, 4), range(0, 2), range(6, 6), None, range(4, 8)]
self.assertEqual(is_partition_of(rs), range(0, 10))
rs = [MyRange(8, 10), range(2, 4), MyRange(0, 2), range(6, 6), None, range(4, 8)]
p = is_partition_of(rs, construct=MyRange)
self.assertTrue(isinstance(p, MyRange))
self.assertEqual(p.start, 0)
self.assertEqual(p.stop, 10)
def test_difference(self):
self.assertEqual(difference(range(-5, -10), range(-8, -6)), (None, None))
self.assertEqual(difference(range(-5, -10), range(-4, -6)), (None, None))
self.assertEqual(difference(range(1, 2), range(4, 3)), (range(1, 2), None))
self.assertEqual(difference(range(1, 9), range(2, 3)), (range(1, 2), range(3, 9)))
self.assertEqual(difference(range(1, 9), range(1, 3)), (None, range(3, 9)))
self.assertEqual(difference(range(1, 9), range(3, 9)), (range(1, 3), None))
self.assertEqual(difference(range(1, 9), range(-3, 9)), (None, None))
self.assertEqual(difference(range(1, 9), range(-3, 20)), (None, None))
self.assertEqual(difference(range(1, 9), range(1, 9)), (None, None))
self.assertEqual(difference(range(1, 9), range(-3, 5)), (None, range(5, 9)))
self.assertEqual(difference(range(1, 9), range(5, 15)), (range(1, 5), None))
r1, r2 = difference(MyRange(1, 9), range(3, 9, 3), construct=MyRange)
self.assertTrue(isinstance(r1, MyRange))
self.assertEqual(r1.start, 1)
self.assertEqual(r1.stop, 3)
self.assertEqual(r2, None)
def test_normalize_multi(self):
rs = []
self.assertEqual(list(normalize_multi(rs)), [])
self.assertEqual(list(normalize_multi(rs, assume_ordered_increasingly=True)), [])
rs = [range(0, 1)]
self.assertEqual(list(normalize_multi(rs)), [range(0, 1)])
self.assertEqual(list(normalize_multi(rs, assume_ordered_increasingly=True)), [range(0, 1)])
rs = [range(0, 2), range(4, 5), range(5, 7, 3)]
self.assertEqual(list(normalize_multi(rs)), [range(0, 2), range(4, 7)])
self.assertEqual(list(normalize_multi(rs, assume_ordered_increasingly=True)), [range(0, 2), range(4, 7)])
rs = [range(0, 2), range(3, 8), range(4, 5), range(5, 7)]
self.assertEqual(list(normalize_multi(rs)), [range(0, 2), range(3, 8)])
self.assertEqual(list(normalize_multi(rs, assume_ordered_increasingly=True)), [range(0, 2), range(3, 8)])
rs = [range(0, 2), range(0, 5), range(5, 7), range(8, 8)]
self.assertEqual(list(normalize_multi(rs)), [range(0, 7)])
self.assertEqual(list(normalize_multi(rs, assume_ordered_increasingly=True)), [range(0, 7)])
rs = [None, range(0, 5), range(5, 7), range(8, 20)]
self.assertEqual(list(normalize_multi(rs)), [range(0, 7), range(8, 20)])
self.assertEqual(list(normalize_multi(rs, assume_ordered_increasingly=True)), [range(0, 7), range(8, 20)])
rs = [None, range(0, 5), range(6, 8), range(8, 20)]
self.assertEqual(list(normalize_multi(rs)), [range(0, 5), range(6, 20)])
self.assertEqual(list(normalize_multi(rs, assume_ordered_increasingly=True)), [range(0, 5), range(6, 20)])
rs = [None, range(0, 5), range(6, 8), range(20, 20)]
self.assertEqual(list(normalize_multi(rs)), [range(0, 5), range(6, 8)])
self.assertEqual(list(normalize_multi(rs, assume_ordered_increasingly=True)), [range(0, 5), range(6, 8)])
rs = [None, range(0, 5), range(6, 8), range(6, 9)]
self.assertEqual(list(normalize_multi(rs)), [range(0, 5), range(6, 9)])
self.assertEqual(list(normalize_multi(rs, assume_ordered_increasingly=True)), [range(0, 5), range(6, 9)])
rs = [None, MyRange(0, 5), range(6, 8), MyRange(6, 9)]
n = list(normalize_multi(rs, construct=MyRange))
self.assertEqual(len(n), 2)
self.assertTrue(isinstance(n[0], MyRange))
self.assertEqual(n[0].start, 0)
self.assertEqual(n[0].stop, 5)
self.assertTrue(isinstance(n[1], MyRange))
self.assertEqual(n[1].start, 6)
self.assertEqual(n[1].stop, 9)
def test_difference_one_multi(self):
r = range(0, 20, 3)
mr1 = [range(2, 4), range(6, 8)]
self.assertEqual(list(difference_one_multi(r, mr1)), [range(0, 2), range(4, 6), range(8, 20)])
mr2 = [range(-2, 1), range(2, 4), range(6, 8), range(15, 25)]
self.assertEqual(list(difference_one_multi(r, mr2)), [range(1, 2), range(4, 6), range(8, 15)])
mr3 = [range(-2, 0), range(2, 4), range(6, 8), range(20, 25)]
self.assertEqual(list(difference_one_multi(r, mr3)), [range(0, 2), range(4, 6), range(8, 20)])
r = MyRange(0, 20)
mr = [MyRange(-2, 0), range(2, 4), MyRange(6, 8), range(20, 25)]
d = list(difference_one_multi(r, mr, construct=MyRange))
self.assertEqual(len(d), 3)
self.assertTrue(isinstance(d[0], MyRange))
self.assertEqual(d[0].start, 0)
self.assertEqual(d[0].stop, 2)
self.assertTrue(isinstance(d[1], MyRange))
self.assertEqual(d[1].start, 4)
self.assertEqual(d[1].stop, 6)
self.assertTrue(isinstance(d[2], MyRange))
self.assertEqual(d[2].start, 8)
self.assertEqual(d[2].stop, 20)
def test_multi_intersection(self):
mr1 = [range(2, 4), range(6, 9), range(15, 20), range(25, 30)]
mr2 = [range(-2, 10), range(15, 25), range(30, 32)]
self.assertEqual(list(multi_intersection(mr1, mr2)),
[range(2, 4), range(6, 9), range(15, 20)])
mr1 = [range(-2, 4), range(24, 30)]
mr2 = [range(-2, 10), range(15, 25)]
self.assertEqual(list(multi_intersection(mr1, mr2)),
[range(-2, 4), range(24, 25)])
mr1 = []
mr2 = [range(-2, 10), range(15, 25)]
self.assertEqual(list(multi_intersection(mr1, mr2)), [])
mr1 = [range(-2, 10), range(15, 25)]
mr2 = []
self.assertEqual(list(multi_intersection(mr1, mr2)), [])
mr1 = [range(2, 4), MyRange(6, 9), range(15, 20), range(25, 30)]
mr2 = [range(-2, 10), range(15, 25), MyRange(30, 32)]
r = list(multi_intersection(mr1, mr2, construct=MyRange))
self.assertEqual(r[0].start, 2)
self.assertEqual(r[0].stop, 4)
self.assertEqual(r[1].start, 6)
self.assertEqual(r[1].stop, 9)
self.assertEqual(r[2].start, 15)
self.assertEqual(r[2].stop, 20)
def test_multi_union(self):
mr1 = [range(2, 4), MyRange(6, 9), range(15, 20), range(25, 30)]
mr2 = [range(-2, 10), range(15, 25), MyRange(30, 32)]
self.assertEqual(list(multi_union(mr1, mr2)),
[range(-2, 10), range(15, 32)])
mr1 = [range(0, 5), range(6, 9), MyRange(15, 20), range(25, 30), range (40, 50)]
mr2 = [range(0, 6), range(15, 25), range(30, 32), range(45, 55)]
self.assertEqual(list(multi_union(mr1, mr2)),
[range(0, 9), range(15, 32), range(40, 55)])
mr1 = [MyRange(0, 1), range(3, 4), range(5, 6), range(7, 9), range (10, 20)]
mr2 = [range(1, 2)]
self.assertEqual(list(multi_union(mr1, mr2)),
[range(0, 2), range(3, 4), range(5, 6), range(7, 9), range (10, 20)])
self.assertEqual(list(multi_union(mr2, mr1)),
[range(0, 2), range(3, 4), range(5, 6), range(7, 9), range (10, 20)])
r = list(multi_union(mr1, [MyRange(0, 3)], construct=MyRange))
self.assertEqual(r[0].start, 0)
self.assertEqual(r[0].stop, 4)
self.assertEqual(r[1].start, 5)
self.assertEqual(r[1].stop, 6)
self.assertEqual(r[2].start, 7)
self.assertEqual(r[2].stop, 9)
self.assertEqual(r[3].start, 10)
self.assertEqual(r[3].stop, 20)
mr1 = []
mr2 = [range(0, 1), range(3, 4), range(5, 6), range(7, 9)]
self.assertEqual(list(multi_union(mr1, mr2)), mr2)
self.assertEqual(list(multi_union(mr2, mr1)), mr2)
mr1 = [range(0, 1), range(3, 4), range(5, 6), range(7, 9)]
mr2 = mr1
mr1a = [MyRange(0, 1), MyRange(3, 4), MyRange(5, 6), MyRange(7, 9)]
self.assertEqual(list(multi_union(mr1, mr2)), mr1)
self.assertEqual(list(multi_union(mr2, mr1)), mr1)
| mit | 3,082,697,156,588,855,000 | 55.978118 | 156 | 0.592688 | false |
AlfredoSequeida/sendanywherecli | sendanywherecli.py | 1 | 3713 | from splinter import Browser
from selenium import webdriver
from pyvirtualdisplay import Display
import pyvirtualdisplay
import pyqrcode
import argparse
import os
import time
import urllib
import sys
# allow for graceful exit
try:
# file to send
file_to_send = ''
# file receive code
file_receive_code = ''
# save directory for received file
save_receiving_dir= ''
# display setup
display = Display(visible=0, size = (1024, 768))
# display start
display.start()
# setting up browser
browser = Browser(profile = './sendanywherecliprofile')
browser = Browser()
browser.driver.set_window_size(1024, 768)
# print help if no arguments are given
if len(sys.argv) == 1:
sys.argv.append('-h')
# required arguments
parser = argparse.ArgumentParser(description='command line interface for usign send-anywhere.com')
parser.add_argument('-s','--send', help='send file(s) <file(s)>', required=False)
parser.add_argument('-r','--receive', help='receive a file <key>', required=False)
args = vars(parser.parse_args())
#functions
# waiting for file key
def wait_for_key():
print("Retrieving Share Key ...")
while not browser.find_by_id('key').text:
pass
# countdown timer
def countdown_and_check(t):
while t:
mins, secs = divmod(t, 60)
#formatign timer
timeformat = '{:02d}:{:02d}'.format(mins, secs)
print(timeformat, end='\r')
time.sleep(1)
t -= 1
# check is file has been sent, if so, quit
check_for_sent()
def receive_file(key):
#load page
load_page()
receive_key_element = browser.find_by_id('receive-key').first
receive_key_element.click()
receive_key_element.type(key, slowly = False)
browser.find_by_id('receive-btn').first.click()
receive_frame = browser.find_by_id('receive-frame').value
print ('receive_frame: ' + receive_frame)
file_to_download = urllib.URLopener()
file_to_download.retrieve("", dir + 'file.gz')
def send_file(file):
# loading page
load_page()
print('Sending File ...')
file_path = os.path.abspath(file)
# uploading file
browser.attach_file('file[]', file_path)
browser.find_by_id('send-btn').first.click()
# waiting for key
wait_for_key()
# getting file retrieval key
file_receive_code = browser.find_by_id('key').first.text
# displaying file retrieval key for user
print ('key: ' + file_receive_code)
# qr code
render_qr(file_receive_code)
# waiting for user to retrieve file
print ('press CTRL + C at any time to exit')
print ('file available for:')
countdown_and_check(600)
def render_qr(receive_key):
qr_code = pyqrcode.create('http://sendanywhe.re/' + receive_key)
print('qr code:' + qr_code.terminal())
# check if file has been sent to quit
def check_for_sent():
sent_result = browser.find_by_id('sent-result').text
if sent_result == 'Completed':
print ("Complete")
quit();
# load web widget
def load_page():
print ('Establishing Connection With Server ...')
browser.visit('https://send-anywhere.com')
if args ['send']:
file_to_send = args ['send']
send_file(file_to_send)
elif args ['receive']:
receive_file(args ['receive'])
# stoping diplay
display.stop()
# allow for graceful exit
except KeyboardInterrupt:
quit()
| mit | 8,244,624,096,845,569,000 | 23.427632 | 102 | 0.595475 | false |
avocado-framework/avocado-virt | avocado_virt/test.py | 1 | 2739 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (C) 2014 Red Hat Inc
#
# Author: Lucas Meneghel Rodrigues <[email protected]>
import os
from avocado import Test
from avocado.utils import process
from .qemu import machine
class VirtTest(Test):
def __init__(self, methodName='runTest', name=None, params=None,
base_logdir=None, job=None, runner_queue=None):
super(VirtTest, self).__init__(methodName=methodName, name=name,
params=params, base_logdir=base_logdir,
job=job, runner_queue=runner_queue)
self.vm = None
def _restore_guest_images(self):
"""
Restore any guest images defined in the command line.
"""
drive_file = self.params.get('image_path', '/plugins/virt/guest/*')
# Check if there's a compressed drive file
compressed_drive_file = drive_file + '.xz'
if os.path.isfile(compressed_drive_file):
self.log.debug('Found compressed image %s and restore guest '
'image set. Restoring image...',
compressed_drive_file)
cwd = os.getcwd()
os.chdir(os.path.dirname(compressed_drive_file))
process.run('xz --decompress --keep --force %s' %
os.path.basename(compressed_drive_file))
os.chdir(cwd)
else:
self.log.debug('Restore guest image set, but could not find '
'compressed image %s. Skipping restore...',
compressed_drive_file)
def setUp(self):
"""
Restore guest image, according to params directives.
By default, always restore.
If only the test level restore is disabled, execute one restore (job).
If both are disabled, then never restore.
"""
if not self.params.get('disable_restore_image_test',
'/plugins/virt/guest/*'):
self._restore_guest_images()
self.vm = machine.VM(params=self.params, logdir=self.logdir)
self.vm.devices.add_nodefaults()
self.vm.devices.add_vga('std')
self.vm.devices.add_vnc()
self.vm.devices.add_drive()
self.vm.devices.add_net()
| gpl-2.0 | -736,223,662,183,100,200 | 38.695652 | 78 | 0.60241 | false |
bt3gl/Numerical-Methods-for-Physics | homework6_rayleigh-taylor_instability/compressible/unsplitFluxes.py | 1 | 20308 | """
Implementation of the Colella 2nd order unsplit Godunov scheme. This
is a 2-dimensional implementation only. We assume that the grid is
uniform, but it is relatively straightforward to relax this
assumption.
There are several different options for this solver (they are all
discussed in the Colella paper).
limiter = 0 to use no limiting
= 1 to use the 2nd order MC limiter
= 2 to use the 4th order MC limiter
riemann = HLLC to use the HLLC solver
= CGF to use the Colella, Glaz, and Ferguson solver
use_flattening = 1 to use the multidimensional flattening
algorithm at shocks
delta, z0, z1 these are the flattening parameters. The default
are the values listed in Colella 1990.
j+3/2--+---------+---------+---------+
| | | |
j+1 _| | | |
| | | |
| | | |
j+1/2--+---------XXXXXXXXXXX---------+
| X X |
j _| X X |
| X X |
| X X |
j-1/2--+---------XXXXXXXXXXX---------+
| | | |
j-1 _| | | |
| | | |
| | | |
j-3/2--+---------+---------+---------+
| | | | | | |
i-1 i i+1
i-3/2 i-1/2 i+1/2 i+3/2
We wish to solve
U_t + F^x_x + F^y_y = H
we want U_{i+1/2}^{n+1/2} -- the interface values that are input to
the Riemann problem through the faces for each zone.
Taylor expanding yields
n+1/2 dU dU
U = U + 0.5 dx -- + 0.5 dt --
i+1/2,j,L i,j dx dt
dU dF^x dF^y
= U + 0.5 dx -- - 0.5 dt ( ---- + ---- - H )
i,j dx dx dy
dU dF^x dF^y
= U + 0.5 ( dx -- - dt ---- ) - 0.5 dt ---- + 0.5 dt H
i,j dx dx dy
dt dU dF^y
= U + 0.5 dx ( 1 - -- A^x ) -- - 0.5 dt ---- + 0.5 dt H
i,j dx dx dy
dt _ dF^y
= U + 0.5 ( 1 - -- A^x ) DU - 0.5 dt ---- + 0.5 dt H
i,j dx dy
+----------+-----------+ +----+----+ +---+---+
| | |
this is the monotonized this is the source term
central difference term transverse
flux term
There are two components, the central difference in the normal to the
interface, and the transverse flux difference. This is done for the
left and right sides of all 4 interfaces in a zone, which are then
used as input to the Riemann problem, yielding the 1/2 time interface
values,
n+1/2
U
i+1/2,j
Then, the zone average values are updated in the usual finite-volume
way:
n+1 n dt x n+1/2 x n+1/2
U = U + -- { F (U ) - F (U ) }
i,j i,j dx i-1/2,j i+1/2,j
dt y n+1/2 y n+1/2
+ -- { F (U ) - F (U ) }
dy i,j-1/2 i,j+1/2
Updating U_{i,j}:
-- We want to find the state to the left and right (or top and
bottom) of each interface, ex. U_{i+1/2,j,[lr]}^{n+1/2}, and use
them to solve a Riemann problem across each of the four
interfaces.
-- U_{i+1/2,j,[lr]}^{n+1/2} is comprised of two parts, the
computation of the monotonized central differences in the normal
direction (eqs. 2.8, 2.10) and the computation of the transverse
derivatives, which requires the solution of a Riemann problem in
the transverse direction (eqs. 2.9, 2.14).
-- the monotonized central difference part is computed using
the primitive variables.
-- We compute the central difference part in both directions
before doing the transverse flux differencing, since for the
high-order transverse flux implementation, we use these as
the input to the transverse Riemann problem.
"""
import numpy
import vars
import eos
import mesh.reconstruction_f as reconstruction_f
from util import runparams
from util import profile
import interface_f
def unsplitFluxes(myData, dt):
"""
unsplitFluxes returns the fluxes through the x and y interfaces by
doing an unsplit reconstruction of the interface values and then
solving the Riemann problem through all the interfaces at once
currently we assume a gamma-law EOS
grav is the gravitational acceleration in the y-direction
"""
pf = profile.timer("unsplitFluxes")
pf.begin()
myg = myData.grid
#=========================================================================
# compute the primitive variables
#=========================================================================
# Q = (rho, u, v, p)
dens = myData.getVarPtr("density")
xmom = myData.getVarPtr("x-momentum")
ymom = myData.getVarPtr("y-momentum")
ener = myData.getVarPtr("energy")
r = dens
# get the velocities
u = xmom/dens
v = ymom/dens
# get the pressure
e = (ener - 0.5*(xmom**2 + ymom**2)/dens)/dens
p = eos.pres(dens, e)
smallp = 1.e-10
p = p.clip(smallp) # apply a floor to the pressure
#=========================================================================
# compute the flattening coefficients
#=========================================================================
# there is a single flattening coefficient (xi) for all directions
use_flattening = runparams.getParam("compressible.use_flattening")
if (use_flattening):
smallp = 1.e-10
delta = runparams.getParam("compressible.delta")
z0 = runparams.getParam("compressible.z0")
z1 = runparams.getParam("compressible.z1")
xi_x = reconstruction_f.flatten(1, p, u, myg.qx, myg.qy, myg.ng, smallp, delta, z0, z1)
xi_y = reconstruction_f.flatten(2, p, v, myg.qx, myg.qy, myg.ng, smallp, delta, z0, z1)
xi = reconstruction_f.flatten_multid(xi_x, xi_y, p, myg.qx, myg.qy, myg.ng)
else:
xi = 1.0
#=========================================================================
# x-direction
#=========================================================================
# monotonized central differences in x-direction
pfa = profile.timer("limiting")
pfa.begin()
limiter = runparams.getParam("compressible.limiter")
if (limiter == 0):
limitFunc = reconstruction_f.nolimit
elif (limiter == 1):
limitFunc = reconstruction_f.limit2
else:
limitFunc = reconstruction_f.limit4
ldelta_r = xi*limitFunc(1, r, myg.qx, myg.qy, myg.ng)
ldelta_u = xi*limitFunc(1, u, myg.qx, myg.qy, myg.ng)
ldelta_v = xi*limitFunc(1, v, myg.qx, myg.qy, myg.ng)
ldelta_p = xi*limitFunc(1, p, myg.qx, myg.qy, myg.ng)
pfa.end()
# left and right primitive variable states
pfb = profile.timer("interfaceStates")
pfb.begin()
gamma = runparams.getParam("eos.gamma")
V_l = numpy.zeros((myg.qx, myg.qy, vars.nvar), dtype=numpy.float64)
V_r = numpy.zeros((myg.qx, myg.qy, vars.nvar), dtype=numpy.float64)
(V_l, V_r) = interface_f.states(1, myg.qx, myg.qy, myg.ng, myg.dx, dt,
vars.nvar,
gamma,
r, u, v, p,
ldelta_r, ldelta_u, ldelta_v, ldelta_p)
pfb.end()
# transform interface states back into conserved variables
U_xl = numpy.zeros((myg.qx, myg.qy, myData.nvar), dtype=numpy.float64)
U_xr = numpy.zeros((myg.qx, myg.qy, myData.nvar), dtype=numpy.float64)
U_xl[:,:,vars.idens] = V_l[:,:,vars.irho]
U_xl[:,:,vars.ixmom] = V_l[:,:,vars.irho]*V_l[:,:,vars.iu]
U_xl[:,:,vars.iymom] = V_l[:,:,vars.irho]*V_l[:,:,vars.iv]
U_xl[:,:,vars.iener] = eos.rhoe(V_l[:,:,vars.ip]) + \
0.5*V_l[:,:,vars.irho]*(V_l[:,:,vars.iu]**2 + V_l[:,:,vars.iv]**2)
U_xr[:,:,vars.idens] = V_r[:,:,vars.irho]
U_xr[:,:,vars.ixmom] = V_r[:,:,vars.irho]*V_r[:,:,vars.iu]
U_xr[:,:,vars.iymom] = V_r[:,:,vars.irho]*V_r[:,:,vars.iv]
U_xr[:,:,vars.iener] = eos.rhoe(V_r[:,:,vars.ip]) + \
0.5*V_r[:,:,vars.irho]*(V_r[:,:,vars.iu]**2 + V_r[:,:,vars.iv]**2)
#=========================================================================
# y-direction
#=========================================================================
# monotonized central differences in y-direction
pfa.begin()
ldelta_r = xi*limitFunc(2, r, myg.qx, myg.qy, myg.ng)
ldelta_u = xi*limitFunc(2, u, myg.qx, myg.qy, myg.ng)
ldelta_v = xi*limitFunc(2, v, myg.qx, myg.qy, myg.ng)
ldelta_p = xi*limitFunc(2, p, myg.qx, myg.qy, myg.ng)
pfa.end()
# left and right primitive variable states
pfb.begin()
(V_l, V_r) = interface_f.states(2, myg.qx, myg.qy, myg.ng, myg.dy, dt,
vars.nvar,
gamma,
r, u, v, p,
ldelta_r, ldelta_u, ldelta_v, ldelta_p)
pfb.end()
# transform interface states back into conserved variables
U_yl = numpy.zeros((myg.qx, myg.qy, myData.nvar), dtype=numpy.float64)
U_yr = numpy.zeros((myg.qx, myg.qy, myData.nvar), dtype=numpy.float64)
U_yl[:,:,vars.idens] = V_l[:,:,vars.irho]
U_yl[:,:,vars.ixmom] = V_l[:,:,vars.irho]*V_l[:,:,vars.iu]
U_yl[:,:,vars.iymom] = V_l[:,:,vars.irho]*V_l[:,:,vars.iv]
U_yl[:,:,vars.iener] = eos.rhoe(V_l[:,:,vars.ip]) + \
0.5*V_l[:,:,vars.irho]*(V_l[:,:,vars.iu]**2 + V_l[:,:,vars.iv]**2)
U_yr[:,:,vars.idens] = V_r[:,:,vars.irho]
U_yr[:,:,vars.ixmom] = V_r[:,:,vars.irho]*V_r[:,:,vars.iu]
U_yr[:,:,vars.iymom] = V_r[:,:,vars.irho]*V_r[:,:,vars.iv]
U_yr[:,:,vars.iener] = eos.rhoe(V_r[:,:,vars.ip]) + \
0.5*V_r[:,:,vars.irho]*(V_r[:,:,vars.iu]**2 + V_r[:,:,vars.iv]**2)
#=========================================================================
# apply source terms
#=========================================================================
grav = runparams.getParam("compressible.grav")
# ymom_xl[i,j] += 0.5*dt*dens[i-1,j]*grav
U_xl[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iymom] += \
0.5*dt*dens[myg.ilo-2:myg.ihi+1,myg.jlo-1:myg.jhi+2]*grav
U_xl[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iener] += \
0.5*dt*ymom[myg.ilo-2:myg.ihi+1,myg.jlo-1:myg.jhi+2]*grav
# ymom_xr[i,j] += 0.5*dt*dens[i,j]*grav
U_xr[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iymom] += \
0.5*dt*dens[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2]*grav
U_xr[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iener] += \
0.5*dt*ymom[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2]*grav
# ymom_yl[i,j] += 0.5*dt*dens[i,j-1]*grav
U_yl[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iymom] += \
0.5*dt*dens[myg.ilo-1:myg.ihi+2,myg.jlo-2:myg.jhi+1]*grav
U_yl[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iener] += \
0.5*dt*ymom[myg.ilo-1:myg.ihi+2,myg.jlo-2:myg.jhi+1]*grav
# ymom_yr[i,j] += 0.5*dt*dens[i,j]*grav
U_yr[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iymom] += \
0.5*dt*dens[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2]*grav
U_yr[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iener] += \
0.5*dt*ymom[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2]*grav
#=========================================================================
# compute transverse fluxes
#=========================================================================
pfc = profile.timer("riemann")
pfc.begin()
riemann = runparams.getParam("compressible.riemann")
if (riemann == "HLLC"):
riemannFunc = interface_f.riemann_hllc
elif (riemann == "CGF"):
riemannFunc = interface_f.riemann_cgf
else:
msg.fail("ERROR: Riemann solver undefined")
F_x = riemannFunc(1, myg.qx, myg.qy, myg.ng,
vars.nvar, vars.idens, vars.ixmom, vars.iymom, vars.iener,
gamma, U_xl, U_xr)
F_y = riemannFunc(2, myg.qx, myg.qy, myg.ng,
vars.nvar, vars.idens, vars.ixmom, vars.iymom, vars.iener,
gamma, U_yl, U_yr)
pfc.end()
#=========================================================================
# construct the interface values of U now
#=========================================================================
"""
finally, we can construct the state perpendicular to the interface
by adding the central difference part to the trasverse flux
difference.
The states that we represent by indices i,j are shown below
(1,2,3,4):
j+3/2--+----------+----------+----------+
| | | |
| | | |
j+1 -+ | | |
| | | |
| | | | 1: U_xl[i,j,:] = U
j+1/2--+----------XXXXXXXXXXXX----------+ i-1/2,j,L
| X X |
| X X |
j -+ 1 X 2 X | 2: U_xr[i,j,:] = U
| X X | i-1/2,j,R
| X 4 X |
j-1/2--+----------XXXXXXXXXXXX----------+
| | 3 | | 3: U_yl[i,j,:] = U
| | | | i,j-1/2,L
j-1 -+ | | |
| | | |
| | | | 4: U_yr[i,j,:] = U
j-3/2--+----------+----------+----------+ i,j-1/2,R
| | | | | | |
i-1 i i+1
i-3/2 i-1/2 i+1/2 i+3/2
remember that the fluxes are stored on the left edge, so
F_x[i,j,:] = F_x
i-1/2, j
F_y[i,j,:] = F_y
i, j-1/2
"""
pfd = profile.timer("transverse flux addition")
pfd.begin()
# U_xl[i,j,:] = U_xl[i,j,:] - 0.5*dt/dy * (F_y[i-1,j+1,:] - F_y[i-1,j,:])
U_xl[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:] += \
- 0.5*dt/myg.dy * (F_y[myg.ilo-3:myg.ihi+1,myg.jlo-1:myg.jhi+3,:] - \
F_y[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2,:])
# U_xr[i,j,:] = U_xr[i,j,:] - 0.5*dt/dy * (F_y[i,j+1,:] - F_y[i,j,:])
U_xr[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:] += \
- 0.5*dt/myg.dy * (F_y[myg.ilo-2:myg.ihi+2,myg.jlo-1:myg.jhi+3,:] - \
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:])
# U_yl[i,j,:] = U_yl[i,j,:] - 0.5*dt/dx * (F_x[i+1,j-1,:] - F_x[i,j-1,:])
U_yl[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:] += \
- 0.5*dt/myg.dx * (F_x[myg.ilo-1:myg.ihi+3,myg.jlo-3:myg.jhi+1,:] - \
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1,:])
# U_yr[i,j,:] = U_yr[i,j,:] - 0.5*dt/dx * (F_x[i+1,j,:] - F_x[i,j,:])
U_yr[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:] += \
- 0.5*dt/myg.dx * (F_x[myg.ilo-1:myg.ihi+3,myg.jlo-2:myg.jhi+2,:] - \
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:])
pfd.end()
#=========================================================================
# construct the fluxes normal to the interfaces
#=========================================================================
# up until now, F_x and F_y stored the transverse fluxes, now we
# overwrite with the fluxes normal to the interfaces
pfc.begin()
F_x = riemannFunc(1, myg.qx, myg.qy, myg.ng,
vars.nvar, vars.idens, vars.ixmom, vars.iymom, vars.iener,
gamma, U_xl, U_xr)
F_y = riemannFunc(2, myg.qx, myg.qy, myg.ng,
vars.nvar, vars.idens, vars.ixmom, vars.iymom, vars.iener,
gamma, U_yl, U_yr)
pfc.end()
#=========================================================================
# apply artificial viscosity
#=========================================================================
cvisc = runparams.getParam("compressible.cvisc")
(avisco_x, avisco_y) = interface_f.artificial_viscosity( \
myg.qx, myg.qy, myg.ng, myg.dx, myg.dy, \
cvisc, u, v)
# F_x = F_x + avisco_x * (U(i-1,j) - U(i,j))
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.idens] += \
avisco_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(dens[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2] - \
dens[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.ixmom] += \
avisco_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(xmom[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2] - \
xmom[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.iymom] += \
avisco_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(ymom[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2] - \
ymom[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.iener] += \
avisco_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(ener[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2] - \
ener[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
# F_y = F_y + avisco_y * (U(i,j-1) - U(i,j))
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.idens] += \
avisco_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(dens[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1] - \
dens[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.ixmom] += \
avisco_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(xmom[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1] - \
xmom[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.iymom] += \
avisco_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(ymom[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1] - \
ymom[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.iener] += \
avisco_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(ener[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1] - \
ener[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
pf.end()
return F_x, F_y
| apache-2.0 | -1,976,983,250,665,921,000 | 38.664063 | 111 | 0.429043 | false |
pmoleri/memorize-accesible | speak/voice.py | 1 | 3767 | # Speak.activity
# A simple front end to the espeak text-to-speech engine on the XO laptop
# http://wiki.laptop.org/go/Speak
#
# Copyright (C) 2008 Joshua Minor
# This file is part of Speak.activity
#
# Parts of Speak.activity are based on code from Measure.activity
# Copyright (C) 2007 Arjun Sarwal - [email protected]
#
# Speak.activity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Speak.activity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Speak.activity. If not, see <http://www.gnu.org/licenses/>.
import re, os
from gettext import gettext as _
import logging
logger = logging.getLogger('speak')
import espeak
# Lets trick gettext into generating entries for the voice names we expect espeak to have
# If espeak actually has new or different names then they won't get translated, but they
# should still show up in the interface.
expectedVoiceNames = [
_("Brazil"),
_("Swedish"),
_("Icelandic"),
_("Romanian"),
_("Swahili"),
_("Hindi"),
_("Dutch"),
_("Latin"),
_("Hungarian"),
_("Macedonian"),
_("Welsh"),
_("French"),
_("Norwegian"),
_("Russian"),
_("Afrikaans"),
_("Finnish"),
_("Default"),
_("Cantonese"),
_("Scottish"),
_("Greek"),
_("Vietnam"),
_("English"),
_("Lancashire"),
_("Italian"),
_("Portugal"),
_("German"),
_("Whisper"),
_("Croatian"),
_("Czech"),
_("Slovak"),
_("Spanish"),
_("Polish"),
_("Esperanto")
]
_allVoices = {}
_defaultVoice = None
class Voice:
def __init__(self, language, name):
self.language = language
self.name = name
friendlyname = name
friendlyname = friendlyname.replace('-test','')
friendlyname = friendlyname.replace('_test','')
friendlyname = friendlyname.replace('en-','')
friendlyname = friendlyname.replace('english-wisper','whisper')
friendlyname = friendlyname.capitalize()
self.friendlyname = _(friendlyname)
def allVoices():
if _allVoices:
return _allVoices
for language, name in espeak.voices():
voice = Voice(language, name)
_allVoices[voice.friendlyname] = voice
return _allVoices
def by_name(name):
return allVoices().get(name, defaultVoice())
def defaultVoice():
"""Try to figure out the default voice, from the current locale ($LANG).
Fall back to espeak's voice called Default."""
global _defaultVoice
if _defaultVoice:
return _defaultVoice
voices = allVoices()
def fit(a,b):
"Compare two language ids to see if they are similar."
as_ = re.split(r'[^a-z]+', a.lower())
bs = re.split(r'[^a-z]+', b.lower())
for count in range(0, min(len(as_),len(bs))):
if as_[count] != bs[count]:
count -= 1
break
return count
try:
lang = os.environ["LANG"]
except:
lang = ""
best = voices[_("Default")]
for voice in voices.values():
voiceMetric = fit(voice.language, lang)
bestMetric = fit(best.language, lang)
if voiceMetric > bestMetric:
best = voice
print "Best voice for LANG %s seems to be %s %s" % (lang, best.language, best.friendlyname)
_defaultVoice = best
return best
| gpl-2.0 | -5,171,734,326,967,312,000 | 27.11194 | 95 | 0.617202 | false |
mozman/ezdxf | tests/test_02_dxf_graphics/test_213_minsert.py | 1 | 5852 | # Copyright (c) 2020, Manfred Moitzi
# License: MIT License
from typing import cast
import pytest
import ezdxf
from ezdxf.entities import Insert, Point, Attrib
def test_mcount_property():
insert = Insert.new()
insert.grid(size=(2, 2), spacing=(10, 10))
assert insert.mcount == 4
insert.grid(size=(2, 2), spacing=(10, 0))
assert insert.mcount == 2
insert.grid(size=(2, 2), spacing=(0, 10))
assert insert.mcount == 2
insert.grid(size=(2, 2), spacing=(0, 0))
assert insert.mcount == 1
class TestSimpleBlock:
# without ATTRIB, no rotation, no extrusion
@pytest.fixture(scope='class')
def doc(self):
doc = ezdxf.new()
blk = doc.blocks.new('POINT')
blk.add_point(location=(0, 0))
return doc
@pytest.fixture
def insert(self, doc):
msp = doc.modelspace()
return msp.add_blockref('POINT', (0, 0))
@pytest.fixture
def db(self, doc):
return doc.entitydb
def test_minsert_normal_spacing(self, insert):
insert.grid(size=(2, 2), spacing=(10, 10))
minsert = list(insert.multi_insert())
assert len(minsert) == 4
assert minsert[0].dxf.insert == (0, 0)
assert minsert[1].dxf.insert == (10, 0)
assert minsert[2].dxf.insert == (0, 10)
assert minsert[3].dxf.insert == (10, 10)
def test_discard_minsert_attribs_from_virtual_insert(self, insert):
insert.grid(size=(2, 2), spacing=(10, 10))
vinsert = next(insert.multi_insert())
assert vinsert.dxf.hasattr('row_count') is False
assert vinsert.dxf.hasattr('column_count') is False
assert vinsert.dxf.hasattr('row_spacing') is False
assert vinsert.dxf.hasattr('column_spacing') is False
def test_minsert_zero_column_spacing(self, insert):
insert.grid(size=(2, 2), spacing=(10, 0))
minsert = list(insert.multi_insert())
assert len(minsert) == 2
assert minsert[0].dxf.insert == (0, 0)
assert minsert[1].dxf.insert == (0, 10)
def test_minsert_zero_row_spacing(self, insert):
insert.grid(size=(2, 2), spacing=(0, 10))
minsert = list(insert.multi_insert())
assert len(minsert) == 2
assert minsert[0].dxf.insert == (0, 0)
assert minsert[1].dxf.insert == (10, 0)
def test_explode(self, insert, db):
handle = insert.dxf.handle
insert.grid(size=(2, 2), spacing=(10, 10))
points = insert.explode()
db.purge()
assert insert.is_alive is False
assert handle not in db
assert len(points) == 4
point = cast(Point, points[3])
assert point.dxf.owner is not None, 'not assigned to a layout'
assert point.get_layout().name == 'Model'
assert point.dxf.location == (10, 10)
class TestInsertAttributes:
@pytest.fixture(scope='class')
def doc(self):
doc = ezdxf.new()
blk = doc.blocks.new('POINT')
blk.add_point(location=(0, 0))
return doc
@pytest.fixture(scope='class')
def insert(self, doc):
msp = doc.modelspace()
insert = msp.add_blockref('POINT', (0, 0))
insert.add_attrib('TEST', text='text', insert=(0, 0))
return insert
def test_attribs_transformation(self, insert):
insert.grid(size=(2, 2), spacing=(10, 10))
attribs = [i.attribs[0] for i in insert.multi_insert()]
assert len(attribs) == 4
assert len(set(id(attrib) for attrib in attribs)) == 4
assert attribs[0].dxf.insert == (0, 0)
assert attribs[1].dxf.insert == (10, 0)
assert attribs[2].dxf.insert == (0, 10)
assert attribs[3].dxf.insert == (10, 10)
def test_explode(self, insert, doc):
db = doc.entitydb
handle = insert.dxf.handle
insert.grid(size=(2, 2), spacing=(10, 10))
entities = insert.explode()
db.purge()
assert insert.is_alive is False
assert handle not in db
assert len(entities) == 8
# ATTRIB -> TEXT
attrib = cast(Attrib, entities.query('TEXT')[3])
assert attrib.dxf.owner is not None, 'not assigned to a layout'
assert attrib.get_layout().name == 'Model'
assert attrib.dxf.insert == (10, 10)
class TestRotatedInsert:
angle = 90
@pytest.fixture(scope='class')
def insert(self):
doc = ezdxf.new()
blk = doc.blocks.new('POINT')
blk.add_point(location=(0, 0))
msp = doc.modelspace()
insert = msp.add_blockref('POINT', (0, 0))
insert.dxf.rotation = self.angle
# ATTRIB is placed outside of BLOCK in WCS, INSERT rotation is not
# applied automatically:
attrib = insert.add_attrib('TEST', text='text', insert=(0, 0))
attrib.dxf.rotation = self.angle
return insert
def test_minsert_transformation(self, insert):
insert.grid(size=(2, 2), spacing=(10, 10))
minsert = list(insert.multi_insert())
assert len(minsert) == 4
# Rotated 90° counter clockwise:
assert minsert[0].dxf.insert.isclose((0, 0))
assert minsert[1].dxf.insert.isclose((0, 10))
assert minsert[2].dxf.insert.isclose((-10, 0))
assert minsert[3].dxf.insert.isclose((-10, 10))
def test_attribs_transformation(self, insert):
insert.grid(size=(2, 2), spacing=(10, 10))
attribs = [i.attribs[0] for i in insert.multi_insert()]
assert len(attribs) == 4
assert len(set(id(attrib) for attrib in attribs)) == 4
# Rotated 90° counter clockwise:
assert attribs[0].dxf.insert.isclose((0, 0))
assert attribs[1].dxf.insert.isclose((0, 10))
assert attribs[2].dxf.insert.isclose((-10, 0))
assert attribs[3].dxf.insert.isclose((-10, 10))
if __name__ == '__main__':
pytest.main([__file__])
| mit | 4,195,454,265,759,269,000 | 33.011628 | 74 | 0.595214 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_3_0_0/models/searchparameter.py | 1 | 7798 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/SearchParameter) on 2017-03-22.
# 2017, SMART Health IT.
from . import domainresource
class SearchParameter(domainresource.DomainResource):
""" Search Parameter for a resource.
A search parameter that defines a named search item that can be used to
search/filter on a resource.
"""
resource_type = "SearchParameter"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.base = None
""" The resource type(s) this search parameter applies to.
List of `str` items. """
self.chain = None
""" Chained names supported.
List of `str` items. """
self.code = None
""" Code used in URL.
Type `str`. """
self.comparator = None
""" eq | ne | gt | lt | ge | le | sa | eb | ap.
List of `str` items. """
self.component = None
""" For Composite resources to define the parts.
List of `SearchParameterComponent` items (represented as `dict` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.date = None
""" Date this was last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.derivedFrom = None
""" Original Definition for the search parameter.
Type `str`. """
self.description = None
""" Natural language description of the search parameter.
Type `str`. """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.expression = None
""" FHIRPath expression that extracts the values.
Type `str`. """
self.jurisdiction = None
""" Intended jurisdiction for search parameter (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.modifier = None
""" missing | exact | contains | not | text | in | not-in | below |
above | type.
List of `str` items. """
self.name = None
""" Name for this search parameter (computer friendly).
Type `str`. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this search parameter is defined.
Type `str`. """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.target = None
""" Types of resource (if a resource reference).
List of `str` items. """
self.type = None
""" number | date | string | token | reference | composite | quantity |
uri.
Type `str`. """
self.url = None
""" Logical URI to reference this search parameter (globally unique).
Type `str`. """
self.useContext = None
""" Context the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the search parameter.
Type `str`. """
self.xpath = None
""" XPath that extracts the values.
Type `str`. """
self.xpathUsage = None
""" normal | phonetic | nearby | distance | other.
Type `str`. """
super(SearchParameter, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SearchParameter, self).elementProperties()
js.extend([
("base", "base", str, True, None, True),
("chain", "chain", str, True, None, False),
("code", "code", str, False, None, True),
("comparator", "comparator", str, True, None, False),
("component", "component", SearchParameterComponent, True, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("derivedFrom", "derivedFrom", str, False, None, False),
("description", "description", str, False, None, True),
("experimental", "experimental", bool, False, None, False),
("expression", "expression", str, False, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("modifier", "modifier", str, True, None, False),
("name", "name", str, False, None, True),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("status", "status", str, False, None, True),
("target", "target", str, True, None, False),
("type", "type", str, False, None, True),
("url", "url", str, False, None, True),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
("xpath", "xpath", str, False, None, False),
("xpathUsage", "xpathUsage", str, False, None, False),
])
return js
from . import backboneelement
class SearchParameterComponent(backboneelement.BackboneElement):
""" For Composite resources to define the parts.
Used to define the parts of a composite search parameter.
"""
resource_type = "SearchParameterComponent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.definition = None
""" Defines how the part works.
Type `FHIRReference` referencing `SearchParameter` (represented as `dict` in JSON). """
self.expression = None
""" Subexpression relative to main expression.
Type `str`. """
super(SearchParameterComponent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(SearchParameterComponent, self).elementProperties()
js.extend([
("definition", "definition", fhirreference.FHIRReference, False, None, True),
("expression", "expression", str, False, None, True),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| bsd-3-clause | -2,993,350,882,617,828,400 | 35.269767 | 107 | 0.574506 | false |
gammapy/gamma-sky | make.py | 1 | 2880 | #!/usr/bin/env python
"""Make gamma-sky.net input data.
"""
import click
import gammasky
@click.group()
def cli():
"""The gamma-sky.net Python cli"""
pass
@cli.group()
def cat():
"""Dump catalog to JSON"""
@cli.group()
def source():
"""Dump source objects to JSON"""
@cat.command('all')
@click.pass_context
def cat_all(ctx):
"""Dump all catalogs to JSON"""
ctx.invoke(cat_tev)
ctx.invoke(cat_3fhl)
ctx.invoke(cat_3fgl)
ctx.invoke(cat_snrcat)
@cat.command('tev')
def cat_tev():
"""Dump TeV catalog to JSON"""
gammasky.make_tev_catalog_data()
@cat.command('3fhl')
def cat_3fhl():
"""Dump 3FHL catalog to JSON"""
gammasky.make_3fhl_catalog_data()
@cat.command('3fgl')
def cat_3fgl():
"""Dump 3FGL catalog to JSON"""
gammasky.make_3fgl_catalog_data()
@cat.command('snrcat')
def cat_snrcat():
"""Dump SNRCat catalog to JSON"""
gammasky.make_snrcat_catalog_data()
@source.command('all')
@click.pass_context
def source_all(ctx):
"""Dump all source objects to JSON"""
ctx.invoke(source_tev)
ctx.invoke(source_3fhl)
ctx.invoke(source_3fgl)
@source.command('tev')
@click.option('--sources', default='all', help='Either "all" or comma-separated string of source IDs')
def source_tev(sources):
"""Dump TeV source objects to JSON"""
gammasky.make_tev_source_data(sources)
@source.command('3fhl')
@click.option('--sources', default='all', help='Either "all" or comma-separated string of source IDs')
def source_3fhl(sources):
"""Dump 3FHL source objects to JSON"""
gammasky.make_3fhl_source_data(sources)
@source.command('3fgl')
@click.option('--sources', default='all', help='Either "all" or comma-separated string of source IDs')
def source_3fgl(sources):
"""Dump 3FGL source objects to JSON"""
gammasky.make_3fgl_source_data(sources)
@cli.command()
def maps():
"""Make map data"""
gammasky.make_maps_data()
@cli.group()
def fetch():
"""Fetch input data files"""
@fetch.command('cats')
def fetch_cats():
"""Fetch all source catalog files"""
gammasky.fetch_all_cats()
@fetch.command('maps')
def fetch_maps():
"""Fetch all input files to make maps"""
gammasky.fetch_all_cats()
@fetch.command('all')
def fetch_all():
"""Fetch all data files"""
gammasky.fetch_all_data()
@cli.command()
@click.pass_context
def all(ctx):
"""Generate all data for the webpage"""
ctx.invoke(cat_all)
ctx.invoke(source_all)
ctx.invoke(maps)
@cli.command('test-dataset')
@click.option('--sources', default='0')
@click.pass_context
def test_dataset(ctx, sources):
"""Dump all data needed for testing."""
ctx.invoke(cat_all)
ctx.forward(source_tev)
ctx.forward(source_3fhl)
ctx.forward(source_3fgl)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO)
cli()
| mit | -9,101,641,954,529,314,000 | 19.425532 | 102 | 0.655556 | false |
jualjiman/knowledge-base | src/knowledge_base/settings/staging.py | 1 | 2155 | # -*- coding: utf-8 -*-
"""
Django staging settings for knowledge_base project.
"""
import os
import urlparse
from . import * # noqa
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [
'kb.pythonballz.com'
]
# Application definition
INSTALLED_APPS += (
'opbeat.contrib.django',
)
MIDDLEWARE_CLASSES += (
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',
)
# Database settings
urlparse.uses_netloc.append('postgres')
url = urlparse.urlparse(os.environ['DATABASE_URL'])
DATABASES = {
'default': {
'ENGINE': {
'postgres': 'django.db.backends.postgresql_psycopg2'
}[url.scheme],
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port
}
}
# Static files and uploads
MEDIA_ROOT = os.path.realpath(os.path.join(
os.environ['DATA_DIR'], 'uploads'))
STATIC_ROOT = os.path.realpath(os.path.join(
os.environ['DATA_DIR'], 'assets'))
MEDIA_URL = '/uploads/'
STATIC_URL = '/static/'
# Opbeat
OPBEAT = {
'ORGANIZATION_ID': os.environ['OPBEAT_ORGANIZATION_ID'],
'APP_ID': os.environ['OPBEAT_APP_ID'],
'SECRET_TOKEN': os.environ['OPBEAT_SECRET_TOKEN'],
'INSTRUMENT_DJANGO_MIDDLEWARE': True,
}
# Email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_PORT = int(os.environ['EMAIL_HOST_PORT'])
EMAIL_USE_TLS = os.environ['EMAIL_USE_TLS'] == 'True'
DEFAULT_FROM_EMAIL = os.environ['DEFAULT_FROM_EMAIL']
# Haystack Connections
if 'HAYSTACK_CONNECTION_URL' in os.environ:
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': os.environ['HAYSTACK_CONNECTION_URL']
},
}
# Cache
if 'MEMCACHED_URL' in os.environ:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': os.environ['MEMCACHED_URL'],
'KEY_PREFIX': 'kb::'
}
}
| apache-2.0 | -418,241,262,382,939,200 | 22.423913 | 77 | 0.634339 | false |
kapilt/cloud-custodian | tools/c7n_azure/tests_azure/test_actions_mark-for-op.py | 1 | 2151 | # Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
from c7n_azure import utils
from c7n_azure.actions.tagging import TagDelayedAction
from mock import patch, Mock
from . import tools_tags as tools
from .azure_common import BaseTest
class ActionsMarkForOpTest(BaseTest):
existing_tags = {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}
DAYS = 10
def _get_action(self, data):
return TagDelayedAction(data=data, manager=Mock())
def test_schema_validate(self):
self.assertTrue(
self.load_policy(
tools.get_policy([
{'type': 'mark-for-op',
'op': 'delete',
'days': 10},
]),
validate=True))
@patch('c7n_azure.tags.TagHelper.update_resource_tags')
def test_mark_for_op(self, update_resource_tags):
self.patch(TagDelayedAction, 'type', 'mark-for-op')
action = self._get_action({'op': 'stop', 'days': self.DAYS})
resource = tools.get_resource(self.existing_tags)
action.process([resource])
tags = tools.get_tags_parameter(update_resource_tags)
date = (utils.now(tz=action.tz) + datetime.timedelta(days=self.DAYS)).strftime('%Y/%m/%d')
expected_value = TagDelayedAction.default_template.format(op='stop', action_date=date)
expected_tags = self.existing_tags.copy()
expected_tags.update({'custodian_status': expected_value})
self.assertEqual(tags, expected_tags)
| apache-2.0 | 8,334,312,290,813,750,000 | 35.457627 | 98 | 0.670386 | false |
RianFuro/vint | vint/linting/policy/prohibit_encoding_opt_after_scriptencoding.py | 1 | 1158 | import re
from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy_registry import register_policy
@register_policy
class ProhibitEncodingOptionAfterScriptEncoding(AbstractPolicy):
def __init__(self):
super(ProhibitEncodingOptionAfterScriptEncoding, self).__init__()
self.description = 'Set encoding before setting scriptencoding'
self.reference = ':help :scriptencoding'
self.level = Level.WARNING
self.was_scriptencoding_found = False
self.has_encoding_opt_after_scriptencoding = False
def listen_node_types(self):
return [NodeType.EXCMD]
def is_valid(self, excmd_node, lint_context):
""" Whether the specified node is valid.
This policy prohibits encoding option after scriptencoding.
"""
cmd_str = excmd_node['str']
if re.match(r':*scripte', cmd_str):
self.was_scriptencoding_found = True
if re.match(r':*set? +enc', cmd_str) and self.was_scriptencoding_found:
return False
return True
| mit | -3,076,817,734,591,732,700 | 29.473684 | 79 | 0.684801 | false |
wziyong/casperfpga | src/casperfpga.py | 1 | 17912 | """
Created on Feb 28, 2013
@author: paulp
"""
import logging
import struct
import time
import register
import sbram
import snap
import tengbe
import qdr
from attribute_container import AttributeContainer
from utils import parse_fpg
LOGGER = logging.getLogger(__name__)
# known CASPER memory-accessible devices and their associated classes and containers
CASPER_MEMORY_DEVICES = {
'xps:bram': {'class': sbram.Sbram, 'container': 'sbrams'},
'xps:qdr': {'class': qdr.Qdr, 'container': 'qdrs'},
'xps:sw_reg': {'class': register.Register, 'container': 'registers'},
'xps:tengbe_v2': {'class': tengbe.TenGbe, 'container': 'tengbes'},
'casper:snapshot': {'class': snap.Snap, 'container': 'snapshots'},}
# other devices - blocks that aren't memory devices, but about which we'd like to know
# tagged in the simulink diagram
CASPER_OTHER_DEVICES = {
'casper:bitsnap': 'bitsnap',
'casper:dec_fir': 'dec_fir',
'casper:fft': 'fft',
'casper:fft_biplex_real_2x': 'fft_biplex_real_2x',
'casper:fft_biplex_real_4x': 'fft_biplex_real_4x',
'casper:fft_wideband_real': 'fft_wideband_real',
'casper:info': 'info',
'casper:pfb_fir': 'pfb_fir',
'casper:pfb_fir_async': 'pfb_fir_async',
'casper:pfb_fir_generic': 'pfb_fir_generic',
'casper:pfb_fir_real': 'pfb_fir_real',
'casper:spead_pack': 'spead_pack',
'casper:spead_unpack': 'spead_unpack',
'casper:vacc': 'vacc',
'casper:xeng': 'xeng',
'xps:xsg': 'xps',
'xps:katadc': 'katadc',
}
class CasperFpga(object):
"""
A FPGA host board that has a CASPER design running on it. Or will soon have.
"""
def __init__(self, host):
"""
:param host: the hostname of this CasperFpga
:return:
"""
self.host = host
self.__reset_device_info()
LOGGER.debug('%s: now a CasperFpga' % self.host)
def read(self, device_name, size, offset=0):
raise NotImplementedError
def blindwrite(self, device_name, data, offset=0):
raise NotImplementedError
def listdev(self):
"""
Get a list of the memory bus items in this design.
:return: a list of memory devices
"""
raise NotImplementedError
def deprogram(self):
"""
The child class will deprogram the FPGA, we just reset out device information
:return:
"""
self.__reset_device_info()
def __reset_device_info(self):
"""
Reset information of devices this FPGA knows about.
"""
# device dictionaries:
# devices: all of them
# memory_devices: only devices on the bus
# other_devices: anything not on the bus
self.devices = {}
self.memory_devices = {}
self.other_devices = {}
# containers
for container_ in CASPER_MEMORY_DEVICES.values():
setattr(self, container_['container'], AttributeContainer())
# hold misc information about the bof file, program time, etc
self.system_info = {}
self.rcs_info = {}
def test_connection(self):
"""
Write to and read from the scratchpad to test the connection to the FPGA.
"""
for val in [0xa5a5a5, 0x000000]:
self.write_int('sys_scratchpad', val)
rval = self.read_int('sys_scratchpad')
if rval != val:
raise RuntimeError('%s: cannot write scratchpad? %i != %i' % (self.host, rval, val))
return True
# def __getattribute__(self, name):
# if name == 'registers':
# return {self.memory_devices[r].name: self.memory_devices[r] for r in self.memory_devices_memory['register']['items']}
# return object.__getattribute__(self, name)
def read_dram(self, size, offset=0):
"""
Reads data from a ROACH's DRAM. Reads are done up to 1MB at a time.
The 64MB indirect address register is automatically incremented as necessary.
It returns a string, as per the normal 'read' function.
ROACH has a fixed device name for the DRAM (dram memory).
Uses bulkread internally.
:param size: amount of data to read, in bytes
:param offset: offset at which to read, in bytes
:return: binary data string
"""
data = []
n_reads = 0
last_dram_page = -1
dram_indirect_page_size = (64*1024*1024)
#read_chunk_size = (1024*1024)
LOGGER.debug('%s: reading a total of %8i bytes from offset %8i...' %
(self.host, size, offset))
while n_reads < size:
dram_page = (offset + n_reads) / dram_indirect_page_size
local_offset = (offset + n_reads) % dram_indirect_page_size
#local_reads = min(read_chunk_size, size-n_reads, dram_indirect_page_size-(offset%dram_indirect_page_size))
local_reads = min(size - n_reads, dram_indirect_page_size - (offset % dram_indirect_page_size))
if last_dram_page != dram_page:
self.write_int('dram_controller', dram_page)
last_dram_page = dram_page
local_data = (self.bulkread('dram_memory', local_reads, local_offset))
data.append(local_data)
LOGGER.debug('%s: reading %8i bytes from indirect '
'address %4i at local offset %8i... done.' %
(self.host, local_reads, dram_page, local_offset))
n_reads += local_reads
return ''.join(data)
def write_dram(self, data, offset=0):
"""
Writes data to a ROACH's DRAM. Writes are done up to 512KiB at a time.
The 64MB indirect address register is automatically incremented as necessary.
ROACH has a fixed device name for the DRAM (dram memory) and so the user does not need to specify the write
register.
:param data: packed binary string data to write
:param offset: the offset at which to write
:return:
"""
size = len(data)
n_writes = 0
last_dram_page = -1
dram_indirect_page_size = (64*1024*1024)
write_chunk_size = (1024*512)
LOGGER.debug('%s: writing a total of %8i bytes from offset %8i...' %
(self.host, size, offset))
while n_writes < size:
dram_page = (offset+n_writes)/dram_indirect_page_size
local_offset = (offset+n_writes) % dram_indirect_page_size
local_writes = min(write_chunk_size, size-n_writes,
dram_indirect_page_size-(offset % dram_indirect_page_size))
LOGGER.debug('%s: writing %8i bytes from indirect address %4i at local offset %8i...' %
(self.host, local_writes, dram_page, local_offset))
if last_dram_page != dram_page:
self.write_int('dram_controller', dram_page)
last_dram_page = dram_page
self.blindwrite('dram_memory', data[n_writes:n_writes+local_writes], local_offset)
n_writes += local_writes
def write(self, device_name, data, offset=0):
"""
Write data, then read it to confirm a successful write.
:param device_name: memory device name to write
:param data: packed binary data string to write
:param offset: offset at which to write, in bytes
:return:
"""
self.blindwrite(device_name, data, offset)
new_data = self.read(device_name, len(data), offset)
if new_data != data:
unpacked_wrdata = struct.unpack('>L', data[0:4])[0]
unpacked_rddata = struct.unpack('>L', new_data[0:4])[0]
LOGGER.error('%s: verification of write to %s at offset %d failed. Wrote 0x%08x... '
'but got back 0x%08x...' % (self.host, device_name, offset,
unpacked_wrdata, unpacked_rddata))
raise ValueError('%s: verification of write to %s at offset %d failed. Wrote 0x%08x... '
'but got back 0x%08x...' % (self.host, device_name, offset,
unpacked_wrdata, unpacked_rddata))
def read_int(self, device_name, word_offset=0):
"""
Read an integer from memory device.
i.e. calls self.read(device_name, size=4, offset=0) and uses struct to unpack it into an integer
:param device_name: device from which to read
:param word_offset: the 32-bit word offset at which to read
:return: signed 32-bit integer
"""
data = self.read(device_name, 4, word_offset*4)
return struct.unpack('>i', data)[0]
def read_uint(self, device_name, word_offset=0):
"""
Read an unsigned integer from memory device.
:param device_name: device from which to read
:param word_offset: the 32-bit word offset at which to read
:return: unsigned 32-bit integer
"""
data = self.read(device_name, 4, word_offset*4)
return struct.unpack('>I', data)[0]
def write_int(self, device_name, integer, blindwrite=False, word_offset=0):
"""
Writes an integer to the device specified at the offset specified.
A blind write is optional.
:param device_name: device to be written
:param integer: the integer to write
:param blindwrite: True for blind write, default False
:param word_offset: the offset at which to write, in 32-bit words
:return:
"""
# careful of packing input data into 32 bit - check range: if
# negative, must be signed int; if positive over 2^16, must be unsigned
# int.
data = struct.pack('>i' if integer < 0 else '>I', integer)
if blindwrite:
self.blindwrite(device_name, data, word_offset*4)
else:
self.write(device_name, data, word_offset*4)
LOGGER.debug('%s: write_int %8x to register %s at word offset %d okay%s.' %
(self.host, integer, device_name,
word_offset, ' (blind)' if blindwrite else ''))
def get_rcs(self, rcs_block_name='rcs'):
"""Retrieves and decodes a revision control block."""
raise NotImplementedError
rv = {'user': self.read_uint(rcs_block_name + '_user')}
app = self.read_uint(rcs_block_name+'_app')
lib = self.read_uint(rcs_block_name+'_lib')
if lib & (1 << 31):
rv['compile_timestamp'] = lib & ((2 ** 31)-1)
else:
if lib & (1 << 30):
#type is svn
rv['lib_rcs_type'] = 'svn'
else:
#type is git
rv['lib_rcs_type'] = 'git'
if lib & (1 << 28):
#dirty bit
rv['lib_dirty'] = True
else:
rv['lib_dirty'] = False
rv['lib_rev'] = lib & ((2 ** 28)-1)
if app & (1 << 31):
rv['app_last_modified'] = app & ((2 ** 31)-1)
else:
if app & (1 << 30):
#type is svn
rv['app_rcs_type'] = 'svn'
else:
#type is git
rv['app_rcs_type'] = 'git'
if app & (1 << 28):
#dirty bit
rv['app_dirty'] = True
else:
rv['lib_dirty'] = False
rv['app_rev'] = app & ((2 ** 28)-1)
return rv
def __create_memory_devices(self, device_dict, memorymap_dict):
"""
Create memory devices from dictionaries of design information.
:param device_dict: raw dictionary of information from tagged blocks in Simulink design, keyed on device name
:param memorymap_dict: dictionary of information that would have been in coreinfo.tab - memory bus information
:return:
"""
# create and add memory devices to the memory device dictionary
for device_name, device_info in device_dict.items():
if device_name == '':
raise NameError('There\'s a problem somewhere, got a blank device name?')
if device_name in self.memory_devices.keys():
raise NameError('Memory device %s already exists.' % device_name)
# get the class from the known devices, if it exists there
tag = device_info['tag']
try:
known_device_class = CASPER_MEMORY_DEVICES[tag]['class']
known_device_container = CASPER_MEMORY_DEVICES[tag]['container']
except KeyError:
pass
else:
if not callable(known_device_class):
raise TypeError('%s is not a callable Memory class - that\'s a problem.' % known_device_class)
new_device = known_device_class.from_device_info(self, device_name, device_info, memorymap_dict)
if new_device.name in self.memory_devices.keys():
raise NameError('Device called %s of type %s already exists in devices list.' %
(new_device.name, type(new_device)))
self.devices[device_name] = new_device
self.memory_devices[device_name] = new_device
container = getattr(self, known_device_container)
setattr(container, device_name, new_device)
assert id(getattr(container, device_name)) == id(new_device) == id(self.memory_devices[device_name])
# allow created devices to update themselves with full device info
# link control registers, etc
for name, device in self.memory_devices.items():
try:
device.post_create_update(device_dict)
except AttributeError: # the device may not have an update function
pass
def __create_other_devices(self, device_dict):
"""
Store non-memory device information in a dictionary
:param device_dict: raw dictionary of information from tagged blocks in Simulink design, keyed on device name
:return:
"""
for device_name, device_info in device_dict.items():
if device_name == '':
raise NameError('There\'s a problem somewhere, got a blank device name?')
if device_name in self.other_devices.keys():
raise NameError('Other device %s already exists.' % device_name)
if device_info['tag'] in CASPER_OTHER_DEVICES.keys():
self.devices[device_name] = device_info
self.other_devices[device_name] = device_info
def device_names_by_container(self, container_name):
"""Return a list of devices in a certain container.
"""
return [devname for devname, container in self.memory_devices.iteritems() if container == container_name]
def devices_by_container(self, container):
"""Get devices using container type.
"""
return getattr(self, container)
def get_system_information(self, filename=None, fpg_info=None):
"""
Get information about the design running on the FPGA.
If filename is given, get it from there, otherwise query the host via KATCP.
:param filename: fpg filename
:param fpg_info: a tuple containing device_info and coreinfo dictionaries
:return: <nothing> the information is populated in the class
"""
if (filename is None) and (fpg_info is None):
raise RuntimeError('Either filename or parsed fpg data must be given.')
if filename is not None:
device_dict, memorymap_dict = parse_fpg(filename)
else:
device_dict = fpg_info[0]
memorymap_dict = fpg_info[1]
# add system registers
device_dict.update(self.__add_sys_registers())
# reset current devices and create new ones from the new design information
self.__reset_device_info()
self.__create_memory_devices(device_dict, memorymap_dict)
self.__create_other_devices(device_dict)
# populate some system information
try:
self.system_info.update(device_dict['77777'])
except KeyError:
LOGGER.warn('%s: no sys info key in design info!' % self.host)
# and RCS information if included
if '77777_git' in device_dict:
self.rcs_info['git'] = device_dict['77777_git']
if '77777_svn' in device_dict:
self.rcs_info['svn'] = device_dict['77777_svn']
def estimate_fpga_clock(self):
"""
Get the estimated clock of the running FPGA, in Mhz.
"""
firstpass = self.read_uint('sys_clkcounter')
time.sleep(2.0)
secondpass = self.read_uint('sys_clkcounter')
if firstpass > secondpass:
secondpass += (2**32)
return (secondpass - firstpass) / 2000000.0
@staticmethod
def __add_sys_registers():
standard_reg = {'tag': 'xps:sw_reg', 'mode': 'one value', 'io_dir': 'To Processor',
'io_delay': '1', 'sample_period': '1', 'sim_port': 'off', 'show_format': 'off',
'names': 'reg', 'bitwidths': '32', 'arith_types': '0', 'bin_pts': '0'}
sys_registers = {'sys_board_id': standard_reg.copy(),
'sys_rev': standard_reg.copy(),
'sys_rev_rcs': standard_reg.copy(),
'sys_scratchpad': standard_reg.copy(),
'sys_clkcounter': standard_reg.copy()}
return sys_registers
# end
| gpl-2.0 | 1,421,237,457,292,584,200 | 42.581509 | 130 | 0.566771 | false |
Matir/LoginScan | loginscan.py | 1 | 1464 | #!/usr/bin/python
#
# Copyright (C) 2011 by David Tomaschik <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Startup script
import sys
from core import config
from core import main
from core import print_verbose
if __name__ == "__main__":
# Build the config to use
use_config = config.load(sys.argv[1:])
print_verbose(use_config)
main.go(use_config)
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| mit | -673,574,686,558,348,300 | 39.666667 | 79 | 0.75888 | false |
frippe12573/geonode | geonode/catalogue/models.py | 1 | 4616 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import errno
import logging
from django.conf import settings
from django.db.models import signals
from geonode.layers.models import Layer
from geonode.catalogue import get_catalogue
from geonode.base.models import Link
LOGGER = logging.getLogger(__name__)
def catalogue_pre_delete(instance, sender, **kwargs):
"""Removes the layer from the catalogue
"""
catalogue = get_catalogue()
catalogue.remove_record(instance.uuid)
def catalogue_post_save(instance, sender, **kwargs):
"""Get information from catalogue
"""
try:
catalogue = get_catalogue()
catalogue.create_record(instance)
record = catalogue.get_record(instance.uuid)
except EnvironmentError, err:
msg = 'Could not connect to catalogue' \
'to save information for layer "%s"' % (instance.name)
if err.reason.errno == errno.ECONNREFUSED:
LOGGER.warn(msg, err)
return
else:
raise err
msg = ('Metadata record for %s does not exist,'
' check the catalogue signals.' % instance.name)
assert record is not None, msg
msg = ('Metadata record for %s should contain links.' % instance.name)
assert hasattr(record, 'links'), msg
# Create the different metadata links with the available formats
for mime, name, metadata_url in record.links['metadata']:
Link.objects.get_or_create(resource=instance.resourcebase_ptr,
url=metadata_url,
defaults=dict(
name=name,
extension='xml',
mime=mime,
link_type='metadata',
)
)
# generate and save CSW specific fields
signals.post_save.disconnect(catalogue_post_save, sender=Layer)
# generate an XML document (GeoNode's default is ISO)
md_doc = catalogue.catalogue.csw_gen_xml(instance,
'catalogue/full_metadata.xml')
instance.metadata_xml = md_doc
instance.csw_anytext = \
catalogue.catalogue.csw_gen_anytext(instance.metadata_xml)
instance.csw_wkt_geometry = instance.geographic_bounding_box.split(';')[-1]
instance.save()
signals.post_save.connect(catalogue_post_save, sender=Layer)
def catalogue_pre_save(instance, sender, **kwargs):
"""Send information to catalogue
"""
record = None
try:
catalogue = get_catalogue()
record = catalogue.get_record(instance.uuid)
except EnvironmentError, err:
msg = 'Could not connect to catalogue' \
'to save information for layer "%s"' % (instance.name)
if err.reason.errno == errno.ECONNREFUSED:
LOGGER.warn(msg, err)
else:
raise err
if record is None:
return
# Fill in the url for the catalogue
if hasattr(record.distribution, 'online'):
onlineresources = [r for r in record.distribution.online \
if r.protocol == "WWW:LINK-1.0-http--link"]
if len(onlineresources) == 1:
res = onlineresources[0]
instance.distribution_url = res.url
instance.distribution_description = res.description
else:
durl = settings.SITEURL
if durl[-1] == '/': # strip trailing slash
durl = durl[:-1]
durl = '%s%s' % (durl, instance.get_absolute_url())
instance.distribution_url = durl
instance.distribution_description = \
'Online link to the \'%s\' description on GeoNode ' % instance.title
if 'geonode.catalogue' in settings.INSTALLED_APPS:
signals.pre_save.connect(catalogue_pre_save, sender=Layer)
signals.post_save.connect(catalogue_post_save, sender=Layer)
signals.pre_delete.connect(catalogue_pre_delete, sender=Layer)
| gpl-3.0 | -4,810,848,301,429,058,000 | 34.236641 | 80 | 0.6276 | false |
bitmazk/django-multilingual-news | multilingual_news/south_migrations/0004_auto__add_field_newsentry_image_width__add_field_newsentry_image_heigh.py | 1 | 13838 | # flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'NewsEntry.image_width'
db.add_column(u'multilingual_news_newsentry', 'image_width',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'NewsEntry.image_height'
db.add_column(u'multilingual_news_newsentry', 'image_height',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'NewsEntry.image_source_url'
db.add_column(u'multilingual_news_newsentry', 'image_source_url',
self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True),
keep_default=False)
# Adding field 'NewsEntry.image_source_text'
db.add_column(u'multilingual_news_newsentry', 'image_source_text',
self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'NewsEntry.image_width'
db.delete_column(u'multilingual_news_newsentry', 'image_width')
# Deleting field 'NewsEntry.image_height'
db.delete_column(u'multilingual_news_newsentry', 'image_height')
# Deleting field 'NewsEntry.image_source_url'
db.delete_column(u'multilingual_news_newsentry', 'image_source_url')
# Deleting field 'NewsEntry.image_source_text'
db.delete_column(u'multilingual_news_newsentry', 'image_source_text')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'multilingual_news.newsentry': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'NewsEntry'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'image_float': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'image_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_source_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'image_source_url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'image_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'placeholders': ('djangocms_utils.fields.M2MPlaceholderField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'multilingual_news.newsentrytitle': {
'Meta': {'object_name': 'NewsEntryTitle'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['multilingual_news.NewsEntry']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '512'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'multilingual_news.recentplugin': {
'Meta': {'object_name': 'RecentPlugin', 'db_table': "u'cmsplugin_recentplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'current_language_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'limit': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['multilingual_news']
| mit | -2,912,040,837,918,217,000 | 76.307263 | 192 | 0.561497 | false |
google/apis-client-generator | src/googleapis/codegen/utilities/__init__.py | 1 | 3526 | #!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2011 Google Inc. All Rights Reserved.
"""Assorted utility methods for the code generator."""
__author__ = '[email protected] (Tony Aiuto)'
import re
_WORD_SPLIT_PATTERN = re.compile(r'[\._/-]+')
def CamelCase(s):
"""CamelCase a string so that it is more readable as a variable name.
Camelcases a string, begining new words after any instances of '.', '_',
'/', or '-'.
Args:
s: (str) A string.
Returns:
s, with the first letter of each word capitalized.
"""
title = lambda x: x[0].upper() + x[1:] if x else x
return ''.join([title(x) for x in _WORD_SPLIT_PATTERN.split(s)])
def UnCamelCase(phrase, separator='_'):
"""Convert CamelCased phrase into lower-case delimited words.
Args:
phrase: CamelCased phrase.
separator: The word separator to inject between lowercased words.
Returns:
lower case phrase with separators between case changes from lower
to upper or acronyms (all upper) to lower case.
"""
phrase_len = len(phrase)
if not phrase_len:
return ''
ch = phrase[0]
text_run = ch.isalnum()
last_was_separator = ch.isupper() or not text_run
caps_run = False
result = ch.lower()
# already did first index
for i in range(phrase_len - 1):
ch = phrase[i + 1]
if ch.isupper():
caps_run = text_run and last_was_separator
text_run = True
if not last_was_separator:
result += separator
last_was_separator = True
elif not ch.isalnum():
caps_run = False
text_run = False
last_was_separator = True
else:
text_run = True
last_was_separator = False
if caps_run:
result += separator
last_was_separator = True
caps_run = False
result += ch.lower()
return result
def SanitizeDomain(s):
"""Sanitize a domain name to ch aracters suitable for use in code.
We only want text characters, digits, and '.'. For now, we only allow ASCII,
characters but we may revisit that in the future if there is demand from
Endpoints customers.
Since the pattern 'my-custom-app.appspot.com' is a popular one, preserve the
'-' in a useful way.
Args:
s: (str) A domain name
Returns:
(str) A version of the domain name suitable for use in code structures
like Java package names. None if s is None.
"""
if s is None:
return None
s = s.lower().replace('-', '_')
return ''.join([c for c in s
if (c.isalnum() and ord(c) < 128) or c in ['.', '_']])
def ReversedDomainComponents(s):
"""Returns a list of domain components in reverse order.
Args:
s: (str) A string of the form "a.b.c"
Returns:
list(s) E.g. ['c', 'b', 'a']
"""
if not s:
return []
parts = s.split('.')
parts.reverse()
return parts
def NoSpaces(s):
"""Remove spaces from a string, but preserves None-ness."""
if s:
return s.replace(' ', '')
return s
| apache-2.0 | 7,937,206,098,041,840,000 | 26.123077 | 78 | 0.650879 | false |
django-de/django-de-v2 | django_de/apps/aggregator/views.py | 1 | 2922 | # -*- coding: utf-8 -*-
from django.db.models import get_model
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.template.defaultfilters import mark_safe
from django.utils.translation import ugettext as _
from django.core.mail import mail_admins
from django.views.generic.list_detail import object_list
from django_de.apps.aggregator.models import Feed, Item
from django_de.apps.aggregator.forms import NewFeedForm
def overview(request):
params = {
'queryset': Item.objects.latest_public(),
'allow_empty': True,
'template_object_name': 'item',
'template_name': 'aggregator/overview.html',
'extra_context': {
'feed_list': Feed.objects.public().order_by('title'),
},
}
return object_list(request, **params)
def add_feed(request):
if request.POST:
form = NewFeedForm(request.POST)
if form.is_valid():
form.save()
message = _('A new feed has been added and awaits activation: %s') % form.cleaned_data.get('feed_url', '')
mail_admins(_('Community: New feed added.'), message, True)
return HttpResponseRedirect('/community/add/thankyou/')
else:
form = NewFeedForm()
template_context = {
'form': form,
'feed_list': Feed.objects.public().order_by('title'),
}
return render_to_response(
'aggregator/add_feed.html',
template_context,
RequestContext(request),
)
def admin_actions(request, modelname, appname):
if not request.user.is_superuser:
return HttpResponseForbidden('Superuser only!')
model = get_model(modelname, appname)
id_list = request.POST.getlist('item_id_list')
if id_list:
for id in id_list:
obj = model.objects.get(pk=id)
# Delete Item
if request.POST.has_key('_delete'):
obj.delete()
request.user.message_set.create(message=_('"%s" was deleted') % mark_safe(obj.title))
# Open Item
elif request.POST.has_key('_markopen'):
obj.public = True
obj.save()
request.user.message_set.create(message=_('"%s" was opened') % mark_safe(obj.title))
# Close Item
elif request.POST.has_key('_markclosed'):
obj.public = False
obj.save()
request.user.message_set.create(message=_('"%s" was closed') % mark_safe(obj.title))
# Wrong Action Parameter
else:
request.user.message_set.create(message='Wrong Action Parameter')
# None Checkbox checked
else:
request.user.message_set.create(message=_('Nothing to do...'))
return HttpResponseRedirect('/admin/%s/%s/' % (modelname, appname))
| bsd-3-clause | 4,984,823,657,608,090,000 | 35.525 | 118 | 0.616016 | false |
shanot/imp | modules/isd/test/test_GaussianProcessInterpolationNumericallyNoMean.py | 2 | 10390 | #!/usr/bin/env python
# general imports
from numpy import *
from random import uniform
# imp general
import IMP
# our project
from IMP.isd import *
# unit testing framework
import IMP.test
class MockFunc:
def __init__(self, setval, evaluate, evalargs=(1,), update=None):
self.__set = setval
self.__eval = evaluate
self.__update = update
self.__evalargs = evalargs
def set_evalargs(self, evalargs):
self.__evalargs = evalargs
def __call__(self, value):
self.__set(value)
if self.__update:
self.__update()
return self.__eval(*self.__evalargs)
class Tests(IMP.test.TestCase):
"""test of the GPI with more data points, using numerical derivative
estimation. Mean function is not optimized.
"""
def setUp(self):
IMP.test.TestCase.setUp(self)
# IMP.set_log_level(IMP.TERSE)
IMP.set_log_level(0)
self.m = IMP.Model()
data = open(self.get_input_file_name('lyzexp_gpir.dat')).readlines()
data = [list(map(float, d.split())) for d in data]
self.q = [[i[0]] for i in data]
self.I = [i[1] for i in data]
self.err = [i[2] for i in data]
self.N = 10
self.G = Scale.setup_particle(IMP.Particle(self.m), 3.0)
self.G.set_nuisance_is_optimized(False)
self.Rg = Scale.setup_particle(IMP.Particle(self.m), 10.0)
self.Rg.set_nuisance_is_optimized(False)
# put d=15 so we don't use the porod region
self.d = Scale.setup_particle(IMP.Particle(self.m), 15.0)
self.d.set_nuisance_is_optimized(False)
self.s = Scale.setup_particle(IMP.Particle(self.m), 0.0)
self.s.set_nuisance_is_optimized(False)
self.A = Scale.setup_particle(IMP.Particle(self.m), 0.0)
self.A.set_nuisance_is_optimized(False)
self.mean = GeneralizedGuinierPorodFunction(
self.G, self.Rg, self.d, self.s, self.A)
self.tau = Switching.setup_particle(IMP.Particle(self.m), 1.0)
self.tau.set_nuisance_is_optimized(True)
self.lam = Scale.setup_particle(IMP.Particle(self.m), 1.)
self.lam.set_nuisance_is_optimized(True)
self.sig = Scale.setup_particle(IMP.Particle(self.m), 1.0)
self.sig.set_nuisance_is_optimized(True)
self.cov = Covariance1DFunction(self.tau, self.lam, 2.0)
self.gpi = IMP.isd.GaussianProcessInterpolation(self.q, self.I,
self.err, self.N, self.mean, self.cov, self.sig)
self.particles = [
self.G,
self.Rg,
self.d,
self.s,
self.sig,
self.tau,
self.lam]
def testCovDerivNumericTau(self):
"""
test the derivatives of the gpi numerically for Tau
"""
pnum = 5
values = linspace(1, 10)
pos = 0.1
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance,
self.gpi.get_posterior_covariance,
([pos], [pos]))
for val in values:
particle.set_nuisance(val)
ene = self.gpi.get_posterior_covariance([pos], [pos])
observed = self.gpi.get_posterior_covariance_derivative([pos],
False)[pnum - 4]
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
def testCovDerivNumericSigma(self):
"""
test the derivatives of the gpi numerically for Sigma
"""
pnum = 4
values = linspace(1, 10)
pos = 0.1
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance,
self.gpi.get_posterior_covariance,
([pos], [pos]))
for val in values:
particle.set_nuisance(val)
ene = self.gpi.get_posterior_covariance([pos], [pos])
observed = self.gpi.get_posterior_covariance_derivative([pos],
False)[pnum - 4]
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
def testCovDerivNumericLambda(self):
"""
test the derivatives of the gpi numerically for Lambda
"""
pnum = 6
values = linspace(.1, 1)
pos = 0.1
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance,
self.gpi.get_posterior_covariance,
([pos], [pos]))
for val in values:
particle.set_nuisance(val)
ene = self.gpi.get_posterior_covariance([pos], [pos])
observed = self.gpi.get_posterior_covariance_derivative([pos],
False)[pnum - 4]
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
def testCovHessianNumericSigmaSigma(self):
"""
test the hessian of the gpi numerically for Sigma and Sigma
"""
pn1 = 4
pn2 = 4
values = linspace(1, 10)
pos = 0.1
p1 = self.particles[pn1]
p2 = self.particles[pn2]
PFunc = MockFunc(p1.set_nuisance,
lambda a: self.gpi.get_posterior_covariance_derivative(
a,
False)[pn2 - 4], ([pos],))
for val in values:
p1.set_nuisance(val)
observed = self.gpi.get_posterior_covariance_hessian([pos],
False)[pn1 - 4][pn2 - 4]
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(observed / expected, 1, delta=1e-2)
def testCovHessianNumericSigmaTau(self):
"""
test the hessian of the gpi numerically for Sigma and Tau
"""
pn1 = 4
pn2 = 5
values = linspace(1, 10)
pos = 0.1
p1 = self.particles[pn1]
p2 = self.particles[pn2]
PFunc = MockFunc(p1.set_nuisance,
lambda a: self.gpi.get_posterior_covariance_derivative(
a,
False)[pn2 - 4], ([pos],))
for val in values:
p1.set_nuisance(val)
observed = self.gpi.get_posterior_covariance_hessian([pos],
False)[pn1 - 4][pn2 - 4]
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(observed / expected, 1, delta=1e-2)
def testCovHessianNumericSigmaLambda(self):
"""
test the hessian of the gpi numerically for Sigma and Lambda
"""
pn1 = 4
pn2 = 6
values = linspace(1, 10)
pos = 0.1
p1 = self.particles[pn1]
p2 = self.particles[pn2]
PFunc = MockFunc(p1.set_nuisance,
lambda a: self.gpi.get_posterior_covariance_derivative(
a,
False)[pn2 - 4], ([pos],))
for val in values:
p1.set_nuisance(val)
observed = self.gpi.get_posterior_covariance_hessian([pos],
False)[pn1 - 4][pn2 - 4]
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(observed / expected, 1, delta=1e-2)
def testCovHessianNumericTauTau(self):
"""
test the hessian of the gpi numerically for Tau and Tau
"""
pn1 = 5
pn2 = 5
values = linspace(.1, 10)
pos = 0.1
p1 = self.particles[pn1]
p2 = self.particles[pn2]
PFunc = MockFunc(p1.set_nuisance,
lambda a: self.gpi.get_posterior_covariance_derivative(
a,
False)[pn2 - 4], ([pos],))
for val in values:
p1.set_nuisance(val)
observed = self.gpi.get_posterior_covariance_hessian([pos],
False)[pn1 - 4][pn2 - 4]
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(observed, expected, delta=1e-3)
def testCovHessianNumericTauLambda(self):
"""
test the hessian of the gpi numerically for Tau and Lambda
"""
pn1 = 6
pn2 = 5
values = linspace(1, 10)
pos = 0.1
p1 = self.particles[pn1]
p2 = self.particles[pn2]
PFunc = MockFunc(p1.set_nuisance,
lambda a: self.gpi.get_posterior_covariance_derivative(
a,
False)[pn2 - 4], ([pos],))
for val in values:
p1.set_nuisance(val)
observed = self.gpi.get_posterior_covariance_hessian([pos],
False)[pn1 - 4][pn2 - 4]
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(observed / expected, 1, delta=1e-2)
def testCovHessianNumericLambdaLambda(self):
"""
test the hessian of the gpi numerically for Lambda and Lambda
"""
pn1 = 6
pn2 = 6
values = linspace(1, 10)
pos = 0.1
p1 = self.particles[pn1]
p2 = self.particles[pn2]
PFunc = MockFunc(p1.set_nuisance,
lambda a: self.gpi.get_posterior_covariance_derivative(
a,
False)[pn2 - 4], ([pos],))
for val in values:
p1.set_nuisance(val)
observed = self.gpi.get_posterior_covariance_hessian([pos],
False)[pn1 - 4][pn2 - 4]
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(observed / expected, 1, delta=1e-2)
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 | -6,258,670,012,141,494,000 | 37.624535 | 104 | 0.518864 | false |
erikrose/more-itertools | setup.py | 1 | 1967 | from re import sub
from setuptools import setup
from more_itertools import __version__
def get_long_description():
# Fix display issues on PyPI caused by RST markup
readme = open('README.rst').read()
version_lines = []
with open('docs/versions.rst') as infile:
next(infile)
for line in infile:
line = line.rstrip().replace('.. automodule:: more_itertools', '')
version_lines.append(line)
version_history = '\n'.join(version_lines)
version_history = sub(r':func:`([a-zA-Z0-9._]+)`', r'\1', version_history)
ret = readme + '\n\n' + version_history
return ret
setup(
name='more-itertools',
version=__version__,
description='More routines for operating on iterables, beyond itertools',
long_description=get_long_description(),
author='Erik Rose',
author_email='[email protected]',
license='MIT',
packages=['more_itertools'],
package_data={'more_itertools': ['py.typed', '*.pyi']},
include_package_data=True,
python_requires='>=3.5',
test_suite='tests',
url='https://github.com/erikrose/more-itertools',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries'],
keywords=['itertools', 'iterator', 'iteration', 'filter', 'peek',
'peekable', 'collate', 'chunk', 'chunked'],
)
| mit | 1,240,349,848,939,387,100 | 34.763636 | 78 | 0.614133 | false |
anish/buildbot | worker/buildbot_worker/__init__.py | 1 | 4662 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
#
# Keep in sync with master/buildbot/__init__.py
#
# We can't put this method in utility modules, because they import dependency packages
#
from __future__ import division
from __future__ import print_function
import datetime
import os
import re
from subprocess import PIPE
from subprocess import STDOUT
from subprocess import Popen
def gitDescribeToPep440(version):
# git describe produce version in the form: v0.9.8-20-gf0f45ca
# where 20 is the number of commit since last release, and gf0f45ca is the short commit id preceded by 'g'
# we parse this a transform into a pep440 release version 0.9.9.dev20 (increment last digit and add dev before 20)
VERSION_MATCH = re.compile(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\.post(?P<post>\d+))?(-(?P<dev>\d+))?(-g(?P<commit>.+))?')
v = VERSION_MATCH.search(version)
if v:
major = int(v.group('major'))
minor = int(v.group('minor'))
patch = int(v.group('patch'))
if v.group('dev'):
patch += 1
dev = int(v.group('dev'))
return "{0}.{1}.{2}-dev{3}".format(major, minor, patch, dev)
if v.group('post'):
return "{0}.{1}.{2}.post{3}".format(major, minor, patch, v.group('post'))
return "{0}.{1}.{2}".format(major, minor, patch)
return v
def mTimeVersion(init_file):
cwd = os.path.dirname(os.path.abspath(init_file))
m = 0
for root, dirs, files in os.walk(cwd):
for f in files:
m = max(os.path.getmtime(os.path.join(root, f)), m)
d = datetime.datetime.utcfromtimestamp(m)
return d.strftime("%Y.%m.%d")
def getVersionFromArchiveId(git_archive_id='$Format:%ct %d$'):
""" Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
"""
# mangle the magic string to make sure it is not replaced by git archive
if not git_archive_id.startswith('$For''mat:'):
# source was modified by git archive, try to parse the version from
# the value of git_archive_id
match = re.search(r'tag:\s*v([^,)]+)', git_archive_id)
if match:
# archived revision is tagged, use the tag
return gitDescribeToPep440(match.group(1))
# archived revision is not tagged, use the commit date
tstamp = git_archive_id.strip().split()[0]
d = datetime.datetime.utcfromtimestamp(int(tstamp))
return d.strftime('%Y.%m.%d')
return None
def getVersion(init_file):
"""
Return BUILDBOT_VERSION environment variable, content of VERSION file, git
tag or 'latest'
"""
try:
return os.environ['BUILDBOT_VERSION']
except KeyError:
pass
try:
cwd = os.path.dirname(os.path.abspath(init_file))
fn = os.path.join(cwd, 'VERSION')
with open(fn) as f:
return f.read().strip()
except IOError:
pass
version = getVersionFromArchiveId()
if version is not None:
return version
try:
p = Popen(['git', 'describe', '--tags', '--always'], stdout=PIPE, stderr=STDOUT, cwd=cwd)
out = p.communicate()[0]
if (not p.returncode) and out:
v = gitDescribeToPep440(str(out))
if v:
return v
except OSError:
pass
try:
# if we really can't find the version, we use the date of modification of the most recent file
# docker hub builds cannot use git describe
return mTimeVersion(init_file)
except Exception:
# bummer. lets report something
return "latest"
version = getVersion(__file__)
__version__ = version
| gpl-2.0 | 2,448,081,944,275,590,700 | 33.029197 | 138 | 0.639211 | false |
MagazinnikIvan/pywinauto | pywinauto/unittests/test_handleprops.py | 1 | 11592 | # GUI Application automation and testing library
# Copyright (C) 2006-2017 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handleprops.py"""
import unittest
import six
import os
import sys
sys.path.append(".")
from pywinauto.handleprops import children, classname, clientrect, contexthelpid, \
controlid, dumpwindow, exstyle, font, has_exstyle, has_style, is64bitprocess, \
is_toplevel_window, isenabled, isunicode, isvisible, iswindow, parent, processid, \
rectangle, style, text, userdata
from pywinauto.application import Application
from pywinauto.sysinfo import is_x64_OS
from pywinauto.sysinfo import is_x64_Python
from pywinauto.timings import Timings
class HandlepropsTestCases(unittest.TestCase):
"""Unit tests for the handleprops module"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.Defaults()
self.app = Application().start("notepad")
self.dlghandle = self.app.UntitledNotepad.handle
self.edit_handle = self.app.UntitledNotepad.Edit.handle
def tearDown(self):
"""Close the application after tests"""
# close the application
#self.dlg.SendMessage(win32defines.WM_CLOSE)
#self.app.UntitledNotepad.MenuSelect("File->Exit")
self.app.kill_()
def test_text(self):
"""Make sure the text method returns correct result"""
self.assertEquals("Untitled - Notepad", text(self.dlghandle))
self.assertEquals("", text(self.edit_handle))
def test_classname(self):
"""Make sure the classname method returns correct result"""
self.assertEquals("Notepad", classname(self.dlghandle))
self.assertEquals("Edit", classname(self.edit_handle))
def test_parent(self):
"""Make sure the parent method returns correct result"""
self.assertEquals(0, parent(self.dlghandle))
self.assertEquals(self.dlghandle, parent(self.edit_handle))
def test_style(self):
"""Make sure the style method returns correct result"""
self.assertEquals(0x14cf0000, style(self.dlghandle))
# will be 0x50300104 if wordwrap is on and 0x50200104 if off
self.assertTrue(
(0x50200104, 0x50300104).__contains__,
style(self.edit_handle),)
def test_exstyle(self):
"""Make sure the exstyle method returns correct result"""
self.assertEquals(0x110, exstyle(self.dlghandle))
self.assertEquals(0x200, exstyle(self.edit_handle))
def test_controlid(self):
"""Make sure the controlid method returns correct result"""
#self.assertEquals(0, controlid(self.dlghandle))
self.assertEquals(15, controlid(self.edit_handle))
def test_userdata(self):
"""Make sure the userdata method returns correct result"""
self.assertEquals(0, userdata(self.dlghandle))
self.assertEquals(0, userdata(self.edit_handle))
def test_contexthelpid(self):
"""Make sure the contexthelpid method returns correct result"""
self.assertEquals(0, contexthelpid(self.dlghandle))
self.assertEquals(0, contexthelpid(self.edit_handle))
def test_iswindow(self):
"""Make sure the iswindow method returns correct result"""
self.assertEquals(True, iswindow(self.dlghandle))
self.assertEquals(True, iswindow(self.edit_handle))
self.assertEquals(False, iswindow(1))
def test_isvisible(self):
"""Make sure the isvisible method returns correct result"""
self.assertEquals(True, isvisible(self.dlghandle))
self.assertEquals(True, isvisible(self.edit_handle))
# need to check something invisible
#self.assertEquals(False, isvisible(self.edit_handle))
def test_isunicode(self):
"""Make sure the isunicode method returns correct result"""
self.assertEquals(True, isunicode(self.dlghandle))
self.assertEquals(True, isunicode(self.edit_handle))
# need to check something not unicode
#self.assertEquals(False, isunicode(self.edit_handle))
def test_isenabled(self):
"""Make sure the isenabled method returns correct result"""
self.assertEquals(True, isenabled(self.dlghandle))
self.assertEquals(True, isenabled(self.edit_handle))
self.app.UntitledNotepad.MenuSelect("Help->About Notepad")
self.app.AboutNotepad.Wait('ready')
self.assertEquals(False, isenabled(self.dlghandle))
self.app.AboutNotepad.OK.CloseClick()
self.app.UntitledNotepad.MenuSelect("Edit->Replace")
self.assertEquals(
False,
isenabled(
self.app.Replace.ChildWindow(
title_re = "Replace.*",
class_name = "Button",
enabled_only = False).handle))
self.app.Replace.Cancel.Click()
def test_clientrect(self):
"""Make sure clientrect() function works"""
self.assertEquals(0, clientrect(self.dlghandle).left)
self.assertEquals(0, clientrect(self.edit_handle).left)
self.assertEquals(0, clientrect(self.dlghandle).top)
self.assertEquals(0, clientrect(self.edit_handle).top)
self.assertEquals(True,
rectangle(self.dlghandle).right > clientrect(self.dlghandle).right)
self.assertEquals(True,
rectangle(self.edit_handle).right > clientrect(self.edit_handle).right)
self.assertEquals(True,
rectangle(self.dlghandle).bottom > clientrect(self.dlghandle).bottom)
self.assertEquals(True,
rectangle(self.edit_handle).bottom > clientrect(self.edit_handle).bottom)
def test_rectangle(self):
"""Make sure rectangle() function works"""
dlgrect = rectangle(self.dlghandle)
self.assertEquals(True, dlgrect.left < dlgrect.right)
self.assertEquals(True, dlgrect.top < dlgrect.bottom)
editrect = rectangle(self.edit_handle)
self.assertEquals(True, editrect.left < editrect.right)
self.assertEquals(True, editrect.top < editrect.bottom)
def test_font(self):
"""Make sure font() function works"""
dlgfont = font(self.dlghandle)
self.assertEquals(True, isinstance(dlgfont.lfFaceName, six.string_types))
editfont = font(self.edit_handle)
self.assertEquals(True, isinstance(editfont.lfFaceName, six.string_types))
def test_processid(self):
"""Make sure processid() function works"""
self.assertEquals(self.app.process, processid(self.dlghandle))
self.assertEquals(self.app.process, processid(self.edit_handle))
def test_children(self):
"""Make sure the children method returns correct result"""
self.assertEquals(2, len(children(self.dlghandle)))
self.assertEquals([], children(self.edit_handle))
def test_has_style(self):
"""Make sure the has_style method returns correct result"""
self.assertEquals(True, has_style(self.dlghandle, 0xf0000))
self.assertEquals(True, has_style(self.edit_handle, 0x4))
self.assertEquals(False, has_style(self.dlghandle, 4))
self.assertEquals(False, has_style(self.edit_handle, 1))
def test_has_exstyle(self):
"""Make sure the has_exstyle method returns correct result"""
self.assertEquals(True, has_exstyle(self.dlghandle, 0x10))
self.assertEquals(True, has_exstyle(self.edit_handle, 0x200))
self.assertEquals(False, has_exstyle(self.dlghandle, 4))
self.assertEquals(False, has_exstyle(self.edit_handle, 0x10))
def test_is_toplevel_window(self):
"""Make sure is_toplevel_window() function works"""
self.assertEquals(True, is_toplevel_window(self.dlghandle))
self.assertEquals(False, is_toplevel_window(self.edit_handle))
self.app.UntitledNotepad.MenuSelect("Edit->Replace")
self.assertEquals(True, is_toplevel_window(self.app.Replace.handle))
self.assertEquals(False, is_toplevel_window(self.app.Replace.Cancel.handle))
self.app.Replace.Cancel.Click()
def test_is64bitprocess(self):
"""Make sure a 64-bit process detection returns correct results"""
if is_x64_OS():
# Test a 32-bit app running on x64
expected_is64bit = False
if is_x64_Python():
exe32bit = os.path.join(os.path.dirname(__file__),
r"..\..\apps\MFC_samples\RowList.exe")
app = Application().start(exe32bit, timeout=20)
pid = app.RowListSampleApplication.process_id()
res_is64bit = is64bitprocess(pid)
try:
self.assertEquals(expected_is64bit, res_is64bit)
finally:
# make sure to close an additional app we have opened
app.kill_()
# setup expected for a 64-bit app on x64
expected_is64bit = True
else:
# setup expected for a 32-bit app on x86
expected_is64bit = False
# test native Notepad app
res_is64bit = is64bitprocess(self.app.UntitledNotepad.process_id())
self.assertEquals(expected_is64bit, res_is64bit)
def test_dumpwindow(self):
"""Make sure dumpwindow() function works"""
dlgdump = dumpwindow(self.dlghandle)
for key, item in dlgdump.items():
self.assertEquals(item, globals()[key](self.dlghandle))
editdump = dumpwindow(self.edit_handle)
for key, item in editdump.items():
self.assertEquals(item, globals()[key](self.edit_handle))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -7,068,823,466,650,455,000 | 41.092937 | 87 | 0.659161 | false |
kevin-coder/tensorflow-fork | tensorflow/python/tpu/tensor_tracer.py | 1 | 67356 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""A utility to trace tensor values on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import re
import sys
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.ops import tpu_ops
_TRACER_LOG_PREFIX = ' [>>>TT>>>]'
_DEVICE_TYPE_TPU = 'tpu'
_DEVICE_TYPE_CPU = 'cpu'
_TRACE_MODE_NAN_INF = 'nan-inf'
_TRACE_MODE_PART_TENSOR = 'part-tensor'
_TRACE_MODE_PART_TENSOR_SIZE = 3
_TRACE_MODE_FULL_TENSOR = 'full-tensor'
_TRACE_MODE_NORM = 'norm'
_TRACE_MODE_MAX_ABS = 'max-abs'
_SUBMODE_BRIEF = 'brief'
_SUBMODE_DETAILED = 'detailed'
_REASON_OUTSIDE_OP_RANGE = 'not-traced-outside-op-range'
_REASON_UNSAFE_OP = 'not-traced-unsafe-op'
_REASON_WHILELOOP_OP = 'not-traced-special-whileloop-op'
_REASON_UNSAFE_SCALAR = 'not-traced-unsafe-scalar'
_REASON_SKIP_SCALAR = 'not-traced-scalar'
_REASON_LESS_INTERESTING_OP = 'not-traced-less-interesting-op'
_REASON_DEVICE_MISMATCH = 'not-traced-device-mismatch'
_REASON_DYNAMIC_SHAPE = 'not-traced-dynamic-shape'
_REASON_SCALAR_GET_TRACED = 'traced-scalar'
_REASON_TENSOR_GET_TRACED = 'traced-tensor'
_REASON_USER_INCLUDED = 'traced-user-included'
_REASON_USER_EXCLUDED = 'not-traced-user-excluded'
_REASON_NOT_EXECUTED = 'not-traced-not-in-exec-path'
_REASON_NON_NUMERIC_TENSOR = 'not-traced-non-numeric-tensor'
_REASON_FEEDS_WHILELOOP_OP = 'not-traced-feeds-special-whileloop-op'
_MARKER_SECTION_BEGIN = '!!!!!!! section-begin:'
_MARKER_SECTION_END = '!!!!!!! section-end:'
_SECTION_NAME_CONFIG = 'configuration'
_SECTION_NAME_REASON = 'reason'
_SECTION_NAME_OP_LIST = 'op-list'
_SECTION_NAME_TENSOR_LIST = 'tensor-list'
_SECTION_NAME_CACHE_INDEX_MAP = 'cache-index-map'
_SECTION_NAME_GRAPH = 'graph'
_FIELD_NAME_VERSION = 'version:'
_FIELD_NAME_DEVICE = 'device:'
_FIELD_NAME_TRACE_MODE = 'trace-mode:'
_FIELD_NAME_SUBMODE = 'submode:'
_FIELD_NAME_NUM_REPLICAS = 'num-replicas:'
_FIELD_NAME_NUM_REPLICAS_PER_HOST = 'num-replicas-per-host:'
_FIELD_NAME_NUM_HOSTS = 'num-hosts:'
_FIELD_NAME_NUM_OPS = 'number-of-ops:'
_FIELD_NAME_NUM_TENSORS = 'number-of-tensors:'
_FIELD_NAME_NUM_CACHE_INDICES = 'number-of-indices:'
_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED = 'topological-sort-succeed:'
_FLAGS_ENV_VAR = 'TENSOR_TRACER_FLAGS'
_FLAG_SINGLE_QUOTE_PAT = re.compile(r"\s*--([^=]+)='([^']*)'")
_FLAG_DOUBLE_QUOTE_PAT = re.compile(r'\s*--([^=]+)="([^"]*)"')
_FLAG_NO_QUOTE_PAT = re.compile(r'\s*--([^=]+)=(\S*)')
_FLAG_NO_EQUAL_PAT = re.compile(r'\s*--([^=]+)\s*')
_FLAG_NAME_ENABLE = 'enable'
_FLAG_NAME_TRACE_MODE = 'trace_mode'
_FLAG_NAME_USE_COMPACT_TRACE = 'compact_trace'
_FLAG_NAME_TRACE_SCALAR_OPS = 'trace_scalar'
_FLAG_NAME_TRACE_BEFORE_OPS = 'trace_before_included_ops'
_FLAG_NAME_TRACE_AFTER_OPS = 'trace_after_included_ops'
_FLAG_NAME_SUBMODE = 'submode'
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS = 'include_less_interesting_ops'
_FLAG_NAME_EXCLUDED_OPNAMES = 'excluded_opnames'
_FLAG_NAME_EXCLUDED_OPTYPES = 'excluded_optypes'
_FLAG_NAME_INCLUDED_OPNAMES = 'included_opnames'
_FLAG_NAME_INCLUDED_OPTYPES = 'included_optypes'
_FLAG_NAME_TRACE_DIR = 'trace_dir'
_FLAG_NAME_REPORT_FILE = 'report_file'
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR = 'use_test_undeclared_outputs_dir'
_FLAG_NAME_OP_RANGE = 'op_range'
# Folder to dump the pre (before tensor tracer updates) and post graphs (after
# tensor tracer updates).
_FLAG_DUMP_BEFORE_AFTER_GRAPHS = 'dump_graphs'
_OP_RANGE_PAT = re.compile(r'(\d+):(\d+)')
_OUTPUT_STREAM_ESCAPE = 'file://'
_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR = 'TEST_UNDECLARED_OUTPUTS_DIR'
_TENSOR_TRACER_COLLECTION = 'tensor_tracer_variables'
_TENSOR_TRACER_CHECKPOINT = 'tensor_tracer_checkpoint'
_TRACE_FILE_NAME = 'trace.all'
_COMPACT_TRACE_FILE_PREFIX = 'compact_trace.'
_COMPACT_TRACE_ENTRY_INIT_VALUE = -1.0
_TENSOR_TRACER_STORAGE = 'tensor_tracer_storage'
_TENSOR_VALUES_CACHE = 'tensor_values_cache'
_REPLICA_ID_TAG = '#replica-id: '
def tensor_tracepoint(tensor, checkpoint_name):
"""Adds a checkpoint with the given checkpoint name for the given tensor.
The tensor will be added to the list of tensors that will be traced by the
tensor tracer.
Args:
tensor: the tensor object for which the tracing is requested.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided tensor.
"""
tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)
tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION,
(tensor, checkpoint_name))
return tensor
def keras_layer_tracepoint(layer, checkpoint_name):
"""An interface for adding the tensor outputs of a keras layer.
Encapsulates tensor_tracepoint.
Args:
layer: A keras layer.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided layer.
"""
try:
outputs = layer.output
if tensor_util.is_tensor(outputs):
tensor_tracepoint(outputs, '%s' % (checkpoint_name))
else:
idx = 0
for output_tensor in outputs:
if tensor_util.is_tensor(outputs):
tensor_tracepoint(output_tensor, '%s_%d' % (checkpoint_name, idx))
idx += 1
except AttributeError:
pass
except RuntimeError:
pass
return layer
def _trace_files_need_precreated(output_dir):
"""Return True if trace files must be pre-created by users."""
if not output_dir.startswith('/'):
return False
if len(output_dir) < 5:
return False
if output_dir[2] != 'n':
return False
if output_dir[3] != 's':
return False
if output_dir[1] != 'c':
return False
if output_dir[4] != '/':
return False
return True
def _get_tensor_values_cache(graph=None):
"""Returns the variable that implements tensor-value caching."""
graph = graph or ops.get_default_graph()
collection = graph.get_collection(_TENSOR_TRACER_STORAGE)
if len(collection) == 1:
return collection[0]
elif not collection:
raise RuntimeError('%s has not been created'%_TENSOR_VALUES_CACHE)
else:
raise RuntimeError('Multiple %s created'%_TENSOR_VALUES_CACHE)
return None
def _create_tensor_values_cache(graph, num_tensors):
"""Creates a variable as the cache to store intermediate tensor values."""
graph = graph or ops.get_default_graph()
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
_TENSOR_VALUES_CACHE,
shape=[num_tensors],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
_COMPACT_TRACE_ENTRY_INIT_VALUE),
trainable=False,
use_resource=True,
collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])
class TensorTracer(object):
"""A software construct for tracing tensor values in a TF graph on TPU.
This utility is disabled by default. It can be enabled by setting
the TENSOR_TRACER_FLAGS env variable as:
export TENSOR_TRACER_FLAGS="--enable=1"
If it is enabled, it will trace the output tensor values of
selected Ops in the graph. It has two outputs: (1) the traces and (2)
a report. The traces are dumped to a specified local file on the TPU
host. The report is printed to the log.info of the TPU job.
By passing options via the env variable, users can change:
(1) the trace mode (e.g., detecting NaN/Inf, printing partial or
full tensor values)
(2) which Ops to be traced (via op.name or op.type)
(3) output trace file path.
"""
# The set of graphs that are rewritten by tensor tracer.
_traced_graphs = set()
@staticmethod
def _match_next_flag(flags, pos):
"""Returns the match for the next TensorTracer flag.
Args:
flags: a string that contains the flags.
pos: where in flags to start the search.
Returns:
A pair where the first element is the regular-expression
match found and the second element indicates if the match
has a value.
"""
match = _FLAG_DOUBLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_SINGLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_EQUAL_PAT.match(flags, pos)
if match:
# The flag is found but is not given a value.
return match, False
# The flag is not found.
return None, False
@staticmethod
def validate_flag_names():
"""Validates if the TensorTrace flags passed are valid."""
valid_flag_names = [_FLAG_NAME_ENABLE, _FLAG_NAME_TRACE_MODE,
_FLAG_NAME_USE_COMPACT_TRACE,
_FLAG_NAME_TRACE_SCALAR_OPS,
_FLAG_NAME_TRACE_BEFORE_OPS,
_FLAG_NAME_TRACE_AFTER_OPS,
_FLAG_NAME_SUBMODE,
_FLAG_NAME_EXCLUDED_OPNAMES,
_FLAG_NAME_EXCLUDED_OPTYPES,
_FLAG_NAME_INCLUDED_OPNAMES,
_FLAG_NAME_INCLUDED_OPTYPES,
_FLAG_NAME_TRACE_DIR,
_FLAG_NAME_REPORT_FILE,
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR,
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS,
_FLAG_NAME_OP_RANGE,
_FLAG_DUMP_BEFORE_AFTER_GRAPHS]
tensor_tracer_flags = os.environ.get(_FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return
pos = 0
while True:
match, _ = TensorTracer._match_next_flag(tensor_tracer_flags, pos)
if not match:
break
flag_name = match.group(1)
if flag_name not in valid_flag_names:
raise ValueError(
'The flag name "%s" passed via the environment variable "%s" '
'is invalid. Valid flag names are:'
'\n%s'%(flag_name, _FLAGS_ENV_VAR, valid_flag_names))
pos = match.end()
@staticmethod
def print_flag_values():
"""Prints all TensorTracer flags passed via environment variables."""
tensor_tracer_flags = os.environ.get(_FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return 'Env variable "%s" is not set'%_FLAGS_ENV_VAR
result = 'Env variable "%s" is set to "%s"\n'%(_FLAGS_ENV_VAR,
tensor_tracer_flags)
result += 'Individual flag value:\n'
pos = 0
while True:
match, has_value = TensorTracer._match_next_flag(
tensor_tracer_flags, pos)
if not match:
break
flag_name = match.group(1)
if has_value:
flag_value = match.group(2)
else:
flag_value = None
result += ' %s: %s\n'%(flag_name, flag_value)
pos = match.end()
result += '\n'
return result
@staticmethod
def flag_value_as_int(wanted_flag_name, default_value):
"""Returns the int value of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
default_value: the default value for the flag, if not provided.
Returns:
the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
flag_int_value = default_value
found, flag_value = TensorTracer.get_flag_value(wanted_flag_name)
if found:
try:
flag_int_value = int(flag_value)
except ValueError:
logging.warning('Cannot convert %s to int for flag %s' % (
flag_int_value, wanted_flag_name))
return flag_int_value
@staticmethod
def get_flag_value(wanted_flag_name):
"""Returns the value of a TensorTracer flags.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
A pair where the first element indicates if the flag is
found and the second element is the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
tensor_tracer_flags = os.getenv(_FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return False, None
pos = 0
while True:
match, has_value = TensorTracer._match_next_flag(
tensor_tracer_flags, pos)
if not match:
return False, None
flag_name = match.group(1)
if has_value:
flag_value = match.group(2)
else:
flag_value = None
if flag_name == wanted_flag_name:
return True, flag_value
pos = match.end()
raise RuntimeError('Should not reach here.')
@staticmethod
def flag_value_to_re_list(flag_name):
"""Converts list of strings to compiled RE."""
re_list = []
found, flag_value = TensorTracer.get_flag_value(flag_name)
if not found or not flag_value:
return re_list
list_of_values = flag_value.split()
for v in list_of_values:
r = re.compile(v)
re_list.append(r)
return re_list
@staticmethod
def _is_flag_on(flag_name):
"""Returns True if the given flag is on."""
found, flag_value = TensorTracer.get_flag_value(flag_name)
if not found:
return False
if flag_value is None:
return True
# Depends on the flag value.
flag_value = flag_value.lower()
enabled = flag_value in ['1', 't', 'true', 'y', 'yes']
return enabled
@staticmethod
def is_enabled():
"""Returns True if TensorTracer is enabled."""
return TensorTracer._is_flag_on(_FLAG_NAME_ENABLE)
@staticmethod
def use_test_undeclared_outputs_dir():
"""Decides the output directory of the report and trace files.
Args:
None.
Returns:
True if the output files should be written to the
test-undeclared-outputs-directory defined via an
env variable.
"""
return TensorTracer._is_flag_on(
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)
@staticmethod
def use_compact_trace():
return TensorTracer._is_flag_on(
_FLAG_NAME_USE_COMPACT_TRACE)
@staticmethod
def check_device_type(device_type):
"""Checks if the given device type is valid."""
if device_type not in [_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU]:
raise ValueError('Invalid device_type "%s"'%device_type)
@staticmethod
def check_trace_mode(trace_mode):
"""Checks if the given trace mode is valid."""
valid_trace_modes = [_TRACE_MODE_NAN_INF, _TRACE_MODE_PART_TENSOR,
_TRACE_MODE_FULL_TENSOR, _TRACE_MODE_NORM,
_TRACE_MODE_MAX_ABS]
if trace_mode not in valid_trace_modes:
raise ValueError('Invalid trace mode "%s" given to the Tensor_Tracer.'
'Valid trace modes are: %s'%(trace_mode,
valid_trace_modes))
@staticmethod
def check_submode(submode):
"""Checks if the given submode is valid."""
if not submode:
return
valid_submodes = [_SUBMODE_DETAILED, _SUBMODE_BRIEF]
if submode not in valid_submodes:
raise ValueError('Invalid submode "%s" given to the Tensor_Tracer.'
'Valid submodes are: %s'%(submode,
valid_submodes))
@staticmethod
def loop_cond_op(op):
return op.type in ('LoopCond', 'RefLoopCond')
@staticmethod
def while_loop_op(op):
"""Returns true if op is one of the special ops of in a while loop.
Args:
op: A tf.Operation.
Returns:
True if the given op is one of [Switch, Merge, Enter, Exit,
NextIteration, LoopCond], which are all building blocks for TF while
loops.
"""
return (control_flow_util.IsLoopSwitch(op) or
control_flow_util.IsLoopMerge(op) or
control_flow_util.IsLoopEnter(op) or
control_flow_util.IsLoopExit(op) or
TensorTracer.loop_cond_op(op) or
op.type in ('RefNextIteration', 'NextIteration'))
@staticmethod
def unsafe_op(op):
"""Returns True if this op is not safe to be traced."""
if control_flow_util.IsInCond(op):
return True
# Reasons for not including following op types:
# Assign: cause incorrect result with CPU tracing.
if op.type in ['Assign']:
return True
return False
@staticmethod
def device_mismatch(device_type, op):
if device_type == _DEVICE_TYPE_TPU:
# pylint: disable=protected-access
return tpu._TPU_REPLICATE_ATTR not in op.node_def.attr
# pylint: enable=protected-access
return False
@staticmethod
def unsafe_scalar_trace(op):
"""Return true if scalar output tensor from Op is not safe to be traced."""
# Tracing the following causes cycle in the graph on TPU.
if op.type in ['LoopCond', 'Enter', 'Merge', 'Const',
'Switch', 'Less', 'ReadVariableOp']:
return True
# Tracing the following will cause casting-issue
# with the norm tracing mode or other compilation issues on CPU.
if op.type in ['VarHandleOp', 'IteratorToStringHandle',
'IteratorGetNext', 'OneShotIterator',
'IteratorV2', 'MakeIterator',
'BatchDatasetV2', 'MapDataset',
'FixedLengthRecordDataset', 'TakeDataset', 'ZipDataset',
'Placeholder', 'PlaceholderWithDefault', 'StridedSlice']:
return True
return False
@staticmethod
def less_interesting_op(op):
"""Returns True if the given Op is not an interesting one to be traced."""
found, _ = TensorTracer.get_flag_value(
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS)
if found:
# users force to include all ops.
return False
# Following ops are highly unlikey to cause bugs.
return op.type in ['Const', 'Identity', 'Cast', 'Shape']
@staticmethod
def reason(op_idx, details):
"""Returns reason why the Op at op_idx is traced or not."""
return '%d %s'%(op_idx, details)
@staticmethod
def topological_sort(g):
"""Performs topological sort on the given graph.
Args:
g: the graph.
Returns:
A pair where the first element indicates if the topological
sort succeeded (True if there is no cycle found; False if a
cycle is found) and the second element is either the sorted
list of nodes or the cycle of nodes found.
"""
def _is_loop_edge(op):
"""Returns true if the op is the end of a while-loop creating a cycle."""
return op.type in ['NextIteration']
def _in_op_degree(op):
"""Returns the number of incoming edges to the given op.
The edge calculation skips the edges that come from 'NextIteration' ops.
NextIteration creates a cycle in the graph. We break cycles by treating
this op as 'sink' and ignoring all outgoing edges from it.
Args:
op: Tf.Operation
Returns:
the number of incoming edges.
"""
count = 0
for op in op.control_inputs + [in_tensor.op for in_tensor in op.inputs]:
if not _is_loop_edge(op):
count += 1
return count
sorted_ops = []
op_in_degree = {op: _in_op_degree(op) for op in g.get_operations()}
frontier = [op for (op, degree) in op_in_degree.items() if degree == 0]
while frontier:
op = frontier.pop()
# Remove the op from graph, and remove its outgoing edges.
sorted_ops.append(op)
if _is_loop_edge(op):
continue
# pylint: disable=protected-access
consumers = list(op._control_outputs)
# pylint: enable=protected-access
for out_tensor in op.outputs:
consumers += [consumer_op for consumer_op in out_tensor.consumers()]
for consumer in consumers:
# For each deleted edge shift the bucket of the vertex.
op_in_degree[consumer] -= 1
if op_in_degree[consumer] == 0:
frontier.append(consumer)
if op_in_degree[consumer] < 0:
raise ValueError('consumer:%s degree mismatch'%consumer.name)
left_ops = set([op for (op, degree) in op_in_degree.items() if degree > 0])
if left_ops:
return (False, left_ops)
else:
assert len(g.get_operations()) == len(sorted_ops)
return (True, sorted_ops)
@staticmethod
def _make_op_and_tensor_maps(op_list):
"""Creates various maps and lists from op_list.
Args:
op_list: a list of Ops
Returns:
opname_idx_map: a map from Op's name to its index in op_list.
tensor_list: a list of output tensors of the Ops in op_list.
tensorname_idx_map: a map from output tensor name to its index
in tensor_list.
"""
opname_idx_map = {}
tensor_list = []
tensorname_idx_map = {}
for op_id, op in enumerate(op_list):
if op.name in opname_idx_map:
raise ValueError('Duplicated Op name: %s'%op.name)
opname_idx_map[op.name] = op_id
for output_tensor in op.outputs:
if output_tensor.name not in tensorname_idx_map:
tensor_list.append(output_tensor)
tensorname_idx_map[output_tensor.name] = len(tensor_list)-1
return (opname_idx_map, tensor_list, tensorname_idx_map)
def __init__(self):
"""Initializes a TensorTracer.
Sets the various member fields from the flags (if given) or the defaults.
"""
self._version = 'use-outside-compilation'
self._device_type = None
TensorTracer.validate_flag_names()
found, self._trace_mode = TensorTracer.get_flag_value(_FLAG_NAME_TRACE_MODE)
if not found or not self._trace_mode:
self._trace_mode = _TRACE_MODE_NAN_INF
TensorTracer.check_trace_mode(self._trace_mode)
found, self._submode = TensorTracer.get_flag_value(_FLAG_NAME_SUBMODE)
if not found or not self._submode:
self._submode = _SUBMODE_DETAILED
TensorTracer.check_submode(self._submode)
self._part_tensor_size = _TRACE_MODE_PART_TENSOR_SIZE
self._instrument_records = {}
self._set_trace_dir()
self._set_report_file()
self._set_op_range()
self._set_excluded_opnames()
self._set_excluded_optypes()
self._set_included_opnames()
self._set_included_optypes()
self._num_replicas = None
self._num_replicas_per_host = None
self._num_hosts = None
self._replica_id = None
self._included_op_full_names = set()
self._trace_scalar_ops = TensorTracer._is_flag_on(
_FLAG_NAME_TRACE_SCALAR_OPS)
# _trace_ops_before_included and _trace_ops_after_included denotes to depth
# of tracing relative to the ops given in --included_opnames or
# --included_optypes
# For example, in the below graph
# op1 --> op2 --> op3 --> op4 --> op5
# If --included_opnames=op3 then only op3 will be traced.
# If also --trace_before_included_ops=2 (_trace_ops_before_included), then
# op1 and op2 will be traced as they are at most 2 hops apart from an
# included op. Similarly, if --trace_after_included_ops=2, then op4 and op5
# will also be traced.
self._trace_ops_before_included = TensorTracer.flag_value_as_int(
_FLAG_NAME_TRACE_BEFORE_OPS, 0)
self._trace_ops_after_included = TensorTracer.flag_value_as_int(
_FLAG_NAME_TRACE_AFTER_OPS, 0)
_, self._graph_dump_path = TensorTracer.get_flag_value(
_FLAG_DUMP_BEFORE_AFTER_GRAPHS)
def _add_replica_id_to_graph(self):
"""Adds nodes for computing the replica ID to the graph."""
if self._num_replicas:
with ops.control_dependencies(None):
# Uses None as dependency to run outside of TPU graph rewrites.
self._replica_id = tpu_ops.tpu_replicated_input(
list(range(self._num_replicas)),
name='tt_replica_id')
else:
self._replica_id = 'unknown'
def _set_trace_dir(self):
found, self._trace_dir = TensorTracer.get_flag_value(_FLAG_NAME_TRACE_DIR)
if found and self._trace_dir \
and TensorTracer.use_test_undeclared_outputs_dir():
raise ValueError('Cannot not use --%s and --%s at the same time'
%(_FLAG_NAME_TRACE_DIR,
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR))
if TensorTracer.use_test_undeclared_outputs_dir():
self._trace_dir = os.environ.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
def _set_report_file(self):
"""Sets the path of the output report file."""
found, self._report_file_path = TensorTracer.get_flag_value(
_FLAG_NAME_REPORT_FILE)
if found and self._report_file_path \
and TensorTracer.use_test_undeclared_outputs_dir():
if os.path.isabs(self._report_file_path):
raise ValueError('If use_test_undeclared_outputs_dir is set,'
'report_file_path cannot be an absolute path (%s)'
%self._report_file_path)
outputs_dir = os.environ.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
self._report_file_path = os.path.join(outputs_dir,
self._report_file_path)
if not self._report_file_path:
self._report_file = None
return
try:
self._report_file = gfile.Open(self._report_file_path, 'w')
except IOError as e:
raise e
def _close_report_file(self):
if self._report_file:
self._report_file.close()
def _set_op_range(self):
"""Sets the index range of the Ops that we will consider tracing."""
found, op_range = TensorTracer.get_flag_value(_FLAG_NAME_OP_RANGE)
if not found or not op_range:
self._op_range = (-1, -1) # this means including all ops.
return
match = _OP_RANGE_PAT.match(op_range)
if not match:
self._op_range = (-1, -1) # this means including all ops.
return
self._op_range = (int(match.group(1)), int(match.group(2)))
def _inside_op_range(self, idx):
"""Return True if the given index is inside the selected range."""
if idx < self._op_range[0]:
return False
return self._op_range[1] < 0 or idx <= self._op_range[1]
def _set_excluded_opnames(self):
self._excluded_opname_re_list = TensorTracer.flag_value_to_re_list(
_FLAG_NAME_EXCLUDED_OPNAMES)
def _set_excluded_optypes(self):
self._excluded_optype_re_list = TensorTracer.flag_value_to_re_list(
_FLAG_NAME_EXCLUDED_OPTYPES)
def _set_included_opnames(self):
self._included_opname_re_list = TensorTracer.flag_value_to_re_list(
_FLAG_NAME_INCLUDED_OPNAMES)
def _set_included_optypes(self):
self._included_optype_re_list = TensorTracer.flag_value_to_re_list(
_FLAG_NAME_INCLUDED_OPTYPES)
def _is_user_included_op(self, op):
"""Checks whether the op is included in the tensor tracer flags.
Args:
op: tf Operation
Returns:
True, if the op is included.
An op is included if:
- Its op name is given in included_opnames
- Its op type is given in included_optypes
- The op is at most _trace_ops_before_included hops before an included op
- The op is at most _trace_ops_after_included hops after an included op
"""
def _is_op_or_any_neighbor_included(op, check_before=0, check_after=0):
"""Helper function to check if op is included or not."""
if op.name in self._included_op_full_names:
return True
for opname_re in self._included_opname_re_list:
if opname_re.match(op.name):
self._included_op_full_names.add(op.name)
return True
if check_after > 0:
for out_tensor in op.outputs:
for consumer in out_tensor.consumers():
if _is_op_or_any_neighbor_included(consumer, check_after - 1, 0):
self._included_op_full_names.add(op.name)
return True
if check_before > 0:
for input_tensor in op.inputs:
if _is_op_or_any_neighbor_included(input_tensor.op,
0,
check_before - 1):
self._included_op_full_names.add(op.name)
return True
return False
# check_after and check_before are swapped below, as below operation
# checks the distance from an arbitrary op to included ops.
return _is_op_or_any_neighbor_included(op,
self._trace_ops_after_included,
self._trace_ops_before_included)
def _is_user_excluded_op(self, op):
for opname_re in self._excluded_opname_re_list:
if opname_re.match(op.name):
return True
for optype_re in self._excluded_optype_re_list:
if optype_re.match(op.type):
return True
return False
def _use_tensor_values_cache(self):
"""Returns True if immediate tensors should be first saved to a cache."""
if self._trace_mode not in set([_TRACE_MODE_NAN_INF,
_TRACE_MODE_NORM, _TRACE_MODE_MAX_ABS]):
return False
if self._trace_dir and _trace_files_need_precreated(self._trace_dir):
return True
if TensorTracer.use_compact_trace():
return True
return False
def _save_tensor_value_to_cache_op(self, graph, cache_idx, updates):
"""Returns an Op that will save the given updates to an entry in the cache."""
cache = _get_tensor_values_cache(graph)
indices = constant_op.constant([cache_idx])
return state_ops.scatter_update(cache, indices, updates).op
def _write_report(self, content):
"""Writes the given content to the report."""
line = '%s %s'%(_TRACER_LOG_PREFIX, content)
if self._report_file:
self._report_file.write(line)
else:
logging.info(line)
def _write_config_section(self):
"""Writes the config section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_CONFIG))
self._write_report('%s %s\n'%(_FIELD_NAME_VERSION, self._version))
self._write_report('%s %s\n'%(_FIELD_NAME_DEVICE, self._device_type))
self._write_report('%s %s\n'%(_FIELD_NAME_TRACE_MODE, self._trace_mode))
self._write_report('%s %s\n'%(_FIELD_NAME_SUBMODE, self._submode))
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS, self._num_replicas))
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS_PER_HOST,
self._num_replicas_per_host))
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_HOSTS, self._num_hosts))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_CONFIG))
def _write_reason_section(self):
"""Writes the reason section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_REASON))
for key in sorted(self._instrument_records):
self._write_report('"%s" %s\n'%(key, self._instrument_records[key]))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_REASON))
def _write_op_list_section(self, op_list):
"""Writes the Op-list section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_OP_LIST))
self._write_report('%s %d\n'%(_FIELD_NAME_NUM_OPS, len(op_list)))
for i in range(0, len(op_list)):
op = op_list[i]
line = '%d "%s" %s'%(i, op.name, op.type)
for out_tensor in op.outputs:
if out_tensor.name not in self._tensorname_idx_map:
raise ValueError(
'out_tensor %s is not in tensorname_idx_map'%out_tensor.name)
line += ' %d'%self._tensorname_idx_map[out_tensor.name]
line += '\n'
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_OP_LIST))
def _write_tensor_list_section(self, tensor_list, opname_idx_map):
"""Writes the tensor-list section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_TENSOR_LIST))
self._write_report('%s %d\n'%(_FIELD_NAME_NUM_TENSORS, len(tensor_list)))
for i in range(0, len(tensor_list)):
tensor = tensor_list[i]
line = '%d "%s"'%(i, tensor.name)
for consumer_op in tensor.consumers():
if consumer_op.name not in opname_idx_map:
raise ValueError(
'consumer_op %s is not in opname_idx_map'%consumer_op.name)
line += ' %d'%opname_idx_map[consumer_op.name]
line += '\n'
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_TENSOR_LIST))
def _write_cache_index_map_section(self):
"""Writes the mapping from cache index to tensor index to the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_CACHE_INDEX_MAP))
self._write_report('%s %d\n'%(_FIELD_NAME_NUM_CACHE_INDICES,
len(self._cache_idx_to_tensor_idx)))
for cache_idx in range(0, len(self._cache_idx_to_tensor_idx)):
tensor_idx = self._cache_idx_to_tensor_idx[cache_idx]
line = '%d %d\n'%(cache_idx, tensor_idx)
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_CACHE_INDEX_MAP))
def _write_graph_section(self, succeed, sorted_or_cycle):
"""Writes the graph section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_GRAPH))
self._write_report('%s %s\n'%(_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED,
succeed))
l = list(sorted_or_cycle)
for i in range(0, len(l)):
self._write_report('%d "%s"\n'%(i, l[i].name))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_GRAPH))
def _preprocess_traced_tensor(self, tensor):
"""Computes NAN/Norm/Max on TPUs before sending to CPU.
Args:
tensor: The tensor to be traced.
Returns:
A tensor that should be input to the trace_function.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _detect_nan_inf(tensor):
"""Trace function for detecting any NaN/Inf in the tensor."""
if tensor.dtype.is_floating:
mask = math_ops.reduce_any(
gen_math_ops.logical_or(
gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))
output_tensor = control_flow_ops.cond(mask,
lambda: constant_op.constant(1.0),
lambda: constant_op.constant(0.0))
else:
output_tensor = constant_op.constant(0.0)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _show_norm(tensor):
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = linalg_ops.norm(tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _show_max_abs(tensor):
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = math_ops.reduce_max(math_ops.abs(tensor))
zero = constant_op.constant(0, dtypes.float32)
output_tensor = gen_math_ops.maximum(zero, output_tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
if self._trace_mode == _TRACE_MODE_NAN_INF:
return _detect_nan_inf(tensor)
if self._trace_mode == _TRACE_MODE_PART_TENSOR:
return tensor
if self._trace_mode == _TRACE_MODE_FULL_TENSOR:
return tensor
if self._trace_mode == _TRACE_MODE_NORM:
return _show_norm(tensor)
if self._trace_mode == _TRACE_MODE_MAX_ABS:
return _show_max_abs(tensor)
raise RuntimeError(
'Tensor trace fun for %s is not yet implemented' % self._trace_mode)
def _make_tensor_trace_fun(self, tensor_name):
"""Makes the tensor tracing function called by outside compilation.
Args:
tensor_name: name of the tensor being traced.
Returns:
A function to be passed as the first argument to outside compilation.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _print_tensor(tensor_name, num_elements, tensor, output_tensor):
"""Prints a tensor value to a file.
Args:
tensor_name: name of the tensor being traced.
num_elements: number of elements to print (-1 means print all).
tensor: the tensor needs to be returned.
output_tensor: the tensor needs to be printed.
Returns:
The same tensor passed via the "tensor" argument.
Raises:
ValueError: If tensor_name is not already in
self._tensorname_idx_map.
"""
if self._submode == _SUBMODE_BRIEF:
if tensor_name not in self._tensorname_idx_map:
raise ValueError(
'Tensor name %s is not in the tensorname_idx_map'%tensor_name)
msg = '%d'%self._tensorname_idx_map[tensor_name]
else:
msg = '"%s"'%tensor_name
if self._trace_dir:
output_path = os.path.join(self._trace_dir, _TRACE_FILE_NAME)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
return logging_ops.print_v2(msg, array_ops.shape(output_tensor),
'@', self._replica_id,
'\n', output_tensor, '\n',
summarize=num_elements,
output_stream=output_stream)
def _show_part_tensor(tensor):
"""Trace function for printing part of the tensor."""
return _print_tensor(tensor_name, self._part_tensor_size,
tensor, tensor)
def _show_full_tensor(tensor):
"""Trace function for printing the entire tensor."""
return _print_tensor(tensor_name, -1, tensor, tensor)
if self._trace_mode == _TRACE_MODE_PART_TENSOR:
return _show_part_tensor
# The input tensor has a shape of "[1]" for _TRACE_MODE_NAN_INF,
# _TRACE_MODE_NORM, and _TRACE_MODE_MAX_ABS, as related computations are
# performed within TPUs and only their results are transferred to CPU.
# Simply, print the full tensor for these trace modes.
if self._trace_mode in [
_TRACE_MODE_NAN_INF, _TRACE_MODE_NORM, _TRACE_MODE_FULL_TENSOR,
_TRACE_MODE_MAX_ABS
]:
return _show_full_tensor
raise RuntimeError('Tensor trace fun for %s is not yet implemented'
%self._trace_mode)
def _skip_op(self, op_id, op, user_included, user_excluded,
in_exec_path=True):
"""Returns True if we should not trace Op."""
if TensorTracer.while_loop_op(op):
self._instrument_records[op.name] = TensorTracer.reason(
op_id, _REASON_WHILELOOP_OP)
return True
if TensorTracer.unsafe_op(op):
self._instrument_records[op.name] = TensorTracer.reason(
op_id, _REASON_UNSAFE_OP)
return True
if TensorTracer.device_mismatch(self._device_type, op):
self._instrument_records[op.name] = TensorTracer.reason(
op_id, _REASON_DEVICE_MISMATCH)
return True
if not in_exec_path:
self._instrument_records[op.name] = TensorTracer.reason(
op_id, _REASON_NOT_EXECUTED)
return True
if not self._inside_op_range(op_id):
self._instrument_records[op.name] = TensorTracer.reason(
op_id, _REASON_OUTSIDE_OP_RANGE)
return True
if TensorTracer.less_interesting_op(op):
self._instrument_records[op.name] = TensorTracer.reason(
op_id, _REASON_LESS_INTERESTING_OP)
return True
if user_included:
self._instrument_records[op.name] = TensorTracer.reason(
op_id, _REASON_USER_INCLUDED)
return False
if user_excluded:
self._instrument_records[op.name] = TensorTracer.reason(
op_id, _REASON_USER_EXCLUDED)
return True
return False
def _skip_tensor(self, op_id, out_tensor, user_included,
user_excluded):
"""Returns True if we should not trace out_tensor."""
# Skips a tensor if the tensor has a non-numeric type.
# Note: we cannot use check_ops.is_numeric_tensor(out_tensor)
# because it also excludes tensors with dtypes, bool, and
# float32_ref, which we actually want to trace.
non_numeric_tensor_types = set([dtypes.variant, dtypes.resource,
dtypes.string])
if out_tensor.dtype in non_numeric_tensor_types:
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_NON_NUMERIC_TENSOR)
return True
# Skip a tensor if it feeds a special while loop op.
if [consumer for consumer in out_tensor.consumers() if
TensorTracer.while_loop_op(consumer)]:
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_FEEDS_WHILELOOP_OP)
return True
if user_included:
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_USER_INCLUDED)
return False
if user_excluded:
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_USER_EXCLUDED)
return True
if not out_tensor.get_shape().is_fully_defined():
# If trace mode is nan-inf, norm or max, then the tensor will be reduced
# to a scalar before the outside compilation call.
if self._trace_mode in [
_TRACE_MODE_NAN_INF, _TRACE_MODE_NORM, _TRACE_MODE_MAX_ABS
]:
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_TENSOR_GET_TRACED)
return False
else:
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_DYNAMIC_SHAPE)
return True
rank = len(out_tensor.shape)
if rank < 1:
# scalar
if self._trace_scalar_ops:
if TensorTracer.unsafe_scalar_trace(out_tensor.op):
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_UNSAFE_SCALAR)
return True
else:
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_SCALAR_GET_TRACED)
return False
else:
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_SKIP_SCALAR)
return True
else:
# tensor
self._instrument_records[out_tensor.name] = TensorTracer.reason(
op_id, _REASON_TENSOR_GET_TRACED)
return False
def _filter_execution_path_operations(self, operations, fetches):
"""Returns the set of ops in the execution path to compute given fetches."""
# If no fetch provided, then return all operations.
if fetches is None:
return set(operations)
# Convert to list, if a single element is provided.
if not isinstance(fetches, (list, tuple)):
fetches = [fetches]
# If a tensor is given as fetch, convert it to op.
op_fetches = []
for fetch in fetches:
if isinstance(fetch, ops.Operation):
op_fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
op_fetches.append(fetch.op)
else:
raise RuntimeError('Given fetch:%s is neither a tensor nor an op.'
%fetch)
execution_path_operations = set(op_fetches)
traverse_stack = list(op_fetches)
while True:
if not traverse_stack:
break
head_op = traverse_stack.pop()
input_ops = [tensor_input.op for tensor_input in head_op.inputs]
input_ops.extend(head_op.control_inputs)
for input_op in input_ops:
if input_op not in execution_path_operations:
# Filter out loop condition operations, tracing them causes a cycle.
# Trace only the loop-body.
if TensorTracer.loop_cond_op(input_op):
continue
execution_path_operations.add(input_op)
traverse_stack.append(input_op)
return execution_path_operations
def _determine_traced_tensors(self, graph, ops_in_exec_path):
"""Determines the tensors that will be traced."""
self._traced_tensorname_to_cache_idx_map = {}
self._cache_idx_to_tensor_idx = []
operations = graph.get_operations()
checkpoint_operations = self._get_checkpoints(graph)
for op_id, op in enumerate(operations):
if checkpoint_operations and op.name not in checkpoint_operations:
continue
user_included = self._is_user_included_op(op)
user_excluded = self._is_user_excluded_op(op)
in_exec_path = op in ops_in_exec_path
if self._skip_op(op_id, op, user_included, user_excluded, in_exec_path):
continue
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
if self._skip_tensor(op_id, out_tensor, user_included,
user_excluded):
continue
tensor_name = out_tensor.name
if tensor_name in self._traced_tensorname_to_cache_idx_map:
raise ValueError(
'Tensor name %s should not be already in '
'traced_tensorname_to_cache_idx_map'%tensor_name)
if tensor_name not in self._tensorname_idx_map:
raise ValueError(
'Tensor name %s is not in the tensorname_idx_map'%tensor_name)
tensor_idx = self._tensorname_idx_map[tensor_name]
cache_idx = len(self._traced_tensorname_to_cache_idx_map)
self._traced_tensorname_to_cache_idx_map[tensor_name] = cache_idx
self._cache_idx_to_tensor_idx.append(tensor_idx)
if len(self._traced_tensorname_to_cache_idx_map) != len(
self._cache_idx_to_tensor_idx):
raise RuntimeError('len(self._traced_tensorname_to_cache_idx_map) != '
'len(self._cache_idx_to_tensor_idx')
def _check_trace_files(self):
"""Checks if any requirements for trace files are satisfied."""
if not self._trace_dir:
# traces will be written to stderr. No need to check trace files.
return
if _trace_files_need_precreated(self._trace_dir):
for replica_id in range(0, self._num_replicas):
trace_file_path = os.path.join(
self._trace_dir,
_COMPACT_TRACE_FILE_PREFIX) + '%d'%replica_id
if not gfile.Exists(trace_file_path):
raise RuntimeError(
'%s must be pre-created with the '
'appropriate properties.'%trace_file_path)
else:
if not gfile.Exists(self._trace_dir):
gfile.MkDir(self._trace_dir)
if not gfile.Exists(self._trace_dir):
raise RuntimeError('Failed to create %s'%self._trace_dir)
def _pre_tracing(self, graph, fetches):
"""Work needs to be done prior to TPU or CPU tracing."""
self._check_trace_files()
operations = graph.get_operations()
(opname_idx_map, tensor_list, self._tensorname_idx_map) = (
TensorTracer._make_op_and_tensor_maps(operations))
self._write_config_section()
self._write_op_list_section(operations)
self._write_tensor_list_section(tensor_list, opname_idx_map)
# Filter out the operations that won't be executed.
# if fetches=None, then ops_in_exec_path = set(operations)
ops_in_exec_path = self._filter_execution_path_operations(operations,
fetches)
self._determine_traced_tensors(graph, ops_in_exec_path)
self._write_cache_index_map_section()
# Does the topological sort before adding any nodes to the graph.
(succeed, sorted_or_cycle) = TensorTracer.topological_sort(graph)
if self._use_tensor_values_cache():
_create_tensor_values_cache(graph,
len(self._cache_idx_to_tensor_idx))
return (ops_in_exec_path, succeed, sorted_or_cycle)
def _post_tracing(self, succeed, sorted_or_cycle):
"""Work needs to be done after TPU or CPU tracing."""
self._write_reason_section()
self._write_graph_section(succeed, sorted_or_cycle)
self._close_report_file()
def _get_checkpoints(self, graph):
"""Returns the list of Ops that produce the tensors traced with API.
Args:
graph: the graph of Ops.
Returns:
A set of operation names which should be traced.
"""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_TENSOR_TRACER_CHECKPOINT))
checkpoint_operations = set()
tensor_tracer_variables = graph.get_collection(_TENSOR_TRACER_COLLECTION)
for (tensor, checkpoint_name) in tensor_tracer_variables:
self._write_report('%s %s\n'%(tensor.name, checkpoint_name))
checkpoint_operations.add(tensor.op.name)
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_TENSOR_TRACER_CHECKPOINT))
return checkpoint_operations
def _generate_flush_cache_op(self, graph, start_replica, on_tpu):
"""Generates an Op that will flush the cache to file.
Args:
graph: the graph of Ops
start_replica: the ID of the first replica being flushed by this Op.
on_tpu: if the graph is executed on TPU.
Returns:
The Op to flush the cache to file.
"""
def _make_flush_fun(replica_id):
"""Makes a function for flushing the cache for the given replica."""
def _fun():
"""A function that flushes the cache to a file."""
def _flush_fun(cache):
"""Flushes the cache to a file."""
if isinstance(replica_id, str):
replica_id_str = replica_id
else:
replica_id_str = '%d'%replica_id
if self._trace_dir:
output_path = os.path.join(self._trace_dir,
_COMPACT_TRACE_FILE_PREFIX) \
+ replica_id_str
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
new_step_line = _REPLICA_ID_TAG + replica_id_str
print_op = logging_ops.print_v2(
new_step_line, '\n',
cache, '\n',
summarize=-1,
output_stream=output_stream)
with ops.control_dependencies([print_op]):
return constant_op.constant(0).op
cache = _get_tensor_values_cache(graph)
if on_tpu:
flush_op = tpu.outside_compilation(_flush_fun, cache.value())
else:
flush_op = _flush_fun(cache.value())
with ops.control_dependencies([flush_op]):
reset_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE,
dtype=cache.dtype,
shape=cache.shape)
assign_op = state_ops.assign(cache, reset_value).op
with ops.control_dependencies([assign_op]):
return flush_op.outputs[0]
return _fun
def _f(replica_id):
return _make_flush_fun(replica_id)
def _eq(x):
return math_ops.equal(x, self._replica_id)
def _do_nothing():
return constant_op.constant(0)
return control_flow_ops.case({\
_eq(start_replica): _f(start_replica), \
_eq(start_replica+1): _f(start_replica+1), \
_eq(start_replica+2): _f(start_replica+2), \
_eq(start_replica+3): _f(start_replica+3), \
_eq(start_replica+4): _f(start_replica+4), \
_eq(start_replica+5): _f(start_replica+5), \
_eq(start_replica+6): _f(start_replica+6), \
_eq(start_replica+7): _f(start_replica+7), \
},
default=_do_nothing,
exclusive=True).op
def _flush_tensor_values_cache(self, graph, tensor_fetches, op_fetches,
on_tpu):
"""Flushes the intermediate tensor values in the graph to the cache.
Args:
graph: the graph of Ops
tensor_fetches: list of tensor results returned by the model_fn.
op_fetches: list of ops that are returned by the model_fn, e.g., train_op.
on_tpu: if the graph is executed on TPU.
Returns:
An identical copy of tensor_fetches.
"""
# Add a dependency to op and tensor fetches to make sure that all tracing
# ops are executed before flushing trace results.
with ops.control_dependencies(op_fetches +
[tensor.op for tensor in tensor_fetches]):
flush_cache_op_list = []
for host in range(self._num_hosts):
start_replica = host * 8
flush_op = self._generate_flush_cache_op(graph, start_replica, on_tpu)
flush_cache_op_list.append(flush_op)
return control_flow_ops.tuple(tensor_fetches,
control_inputs=flush_cache_op_list)
def _process_tensor_fetches(self, tensor_fetches):
"""Check that tensor_fetches is not empty and have valid tensors."""
# If none or empty list.
if tensor_fetches is None:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'None.')
if not isinstance(tensor_fetches, (list, tuple)):
tensor_fetches = [tensor_fetches]
elif not tensor_fetches:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'empty list.')
fetches = []
for fetch in tensor_fetches:
if isinstance(fetch, ops.Tensor):
fetches.append(fetch)
else:
raise RuntimeError('Given tensor_fetch:%s is not a tensor.' % fetch)
return fetches
def _process_op_fetches(self, op_fetches):
"""Check that op_fetches have valid ops."""
if op_fetches is None:
return []
if not isinstance(op_fetches, (list, tuple)):
op_fetches = [op_fetches]
fetches = []
for fetch in op_fetches:
if isinstance(fetch, ops.Operation):
fetches.append(fetch)
else:
logging.warning('Ignoring the given op_fetch:%s, which is not an op.' %
fetch)
return fetches
def _convert_fetches_to_input_format(self, input_fetches, current_fetches):
"""Changes current_fetches' format, so that it matches input_fetches."""
if isinstance(input_fetches, ops.Tensor):
if len(current_fetches) != 1:
raise RuntimeError('Tensor tracer input/output fetches do not match.')
return current_fetches[0]
else:
if len(current_fetches) != len(current_fetches):
raise RuntimeError('Tensor tracer input/output fetches do not match.')
elif isinstance(input_fetches, tuple):
return tuple(current_fetches)
else:
return current_fetches
def _get_op_control_flow_context(self, op):
"""Returns the control flow of the given op.
Args:
op: tf.Operation for which the control flow context is requested.
Returns:
op_control_flow_context: which the is control flow context of the given
op. If the operation type is LoopExit, returns the outer control flow
context.
"""
# pylint: disable=protected-access
op_control_flow_context = op._control_flow_context
# pylint: enable=protected-access
if control_flow_util.IsLoopExit(op):
op_control_flow_context = op_control_flow_context.outer_context
return op_control_flow_context
def _trace_execution(self, graph,
tensor_fetches,
op_fetches=None,
on_tpu=True):
"""Commong tracing function for both CPU and TPUs.
The caller function should set _device_type, _num_replicas,
_num_replicas_per_host, _num_hosts and _replica_id before calling
_trace_execution.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
on_tpu: True if executing on TPU.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
def _cast_unsupported_dtypes(tensor):
"""Casts tensor to a supported type."""
if tensor.dtype.__eq__(dtypes.int64):
# outside-compilation doesn't support int64 input yet.
return math_ops.cast(tensor, dtypes.int32)
if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(
dtypes.float16):
# Since host can't handle bf16, convert tensor to f32.
return math_ops.cast(tensor, dtypes.float32)
return tensor
TensorTracer.check_device_type(self._device_type)
# Check in_tensor_fetches, and op_fetches and convert them to lists.
processed_t_fetches = self._process_tensor_fetches(tensor_fetches)
op_fetches = self._process_op_fetches(op_fetches)
all_fetches = op_fetches + [tensor.op for tensor in processed_t_fetches]
# Filter the set of ops that will be executed, and topological sort.
(exec_op_set, succeed, sorted_or_cycle) = self._pre_tracing(graph,
all_fetches)
tensor_fetch_set = set(processed_t_fetches)
tracing_ops = []
# pylint: disable=protected-access
current_control_flow_context = graph._get_control_flow_context()
# pylint: enable=protected-access
sorted_exec_op_list = list(exec_op_set)
sorted_exec_op_list.sort(key=lambda op: op.name)
# Trace ops only if they are in the execution path.
for op in sorted_exec_op_list:
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
tensor_name = out_tensor.name
if tensor_name not in self._traced_tensorname_to_cache_idx_map:
continue
# Create the list of consumers before calling _preprocess_traced_tensor.
# Otherwise, adding control input below, will introduce a cycle in the
# graph.
consumers = out_tensor.consumers()
# Not all consumers may be in the exec path. Filter out the consumers
# to keep the graph simpler.
consumers = [cop for cop in consumers if cop in exec_op_set]
# If there is no consumer of the tensor, there is no need to trace it;
# unless the tensor itself is one of the fetches.
is_a_fetched_tensor = out_tensor in tensor_fetch_set
if (not consumers) and (not is_a_fetched_tensor):
continue
op_control_flow_context = self._get_op_control_flow_context(op)
# pylint: disable=protected-access
graph._set_control_flow_context(op_control_flow_context)
# pylint: enable=protected-access
processed_out_tensor = self._preprocess_traced_tensor(out_tensor)
if on_tpu:
processed_out_tensor = _cast_unsupported_dtypes(processed_out_tensor)
if self._use_tensor_values_cache():
cache_idx = self._traced_tensorname_to_cache_idx_map[tensor_name]
trace_op = self._save_tensor_value_to_cache_op(graph,
cache_idx,
processed_out_tensor)
elif on_tpu:
trace_op = tpu.outside_compilation(
self._make_tensor_trace_fun(tensor_name), processed_out_tensor)
else:
trace_fun = self._make_tensor_trace_fun(tensor_name)
trace_op = trace_fun(processed_out_tensor)
if is_a_fetched_tensor:
tracing_ops.append(trace_op)
continue
# Add it to all consumers, as some consumers may not be executed if they
# are in a control flow.
for consumer_op in consumers:
# pylint: disable=protected-access
consumer_op._add_control_input(trace_op)
# pylint: enable=protected-access
# pylint: disable=protected-access
graph._set_control_flow_context(current_control_flow_context)
# pylint: enable=protected-access
if tracing_ops:
# If we are tracing a fetched tensor, their dependency is stored in
# tracing_ops.
processed_t_fetches = control_flow_ops.tuple(processed_t_fetches,
control_inputs=tracing_ops)
if self._use_tensor_values_cache():
processed_t_fetches = self._flush_tensor_values_cache(graph,
processed_t_fetches,
op_fetches,
on_tpu=on_tpu)
self._post_tracing(succeed, sorted_or_cycle)
# processed_t_fetches is a list at this point. Convert it to the same
# format as given in tensor_fetches.
return self._convert_fetches_to_input_format(tensor_fetches,
processed_t_fetches)
def trace_tpu(self, graph,
tensor_fetches,
op_fetches=None,
num_replicas=None,
num_replicas_per_host=None,
num_hosts=None):
"""Traces the tensors generated by TPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
num_replicas: number of replicas used on the TPU.
num_replicas_per_host: number of replicas per TPU host.
num_hosts: total number of TPU hosts.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If num_replicas_per_host > 8.
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._device_type = _DEVICE_TYPE_TPU
self._num_replicas = num_replicas
self._num_replicas_per_host = num_replicas_per_host
self._num_hosts = num_hosts
if self._num_replicas is not None:
if self._num_replicas_per_host is None:
self._num_replicas_per_host = 8
if self._num_hosts is None:
self._num_hosts = num_replicas // self._num_replicas_per_host + \
(num_replicas % self._num_replicas_per_host > 0)
if self._num_replicas_per_host > 8:
# Checks for the assumption in _generate_flush_cache_op().
raise RuntimeError('num_replicas_per_host (%d) is '
'greater than 8'%self._num_replicas_per_host)
if self._graph_dump_path:
graph_io.write_graph(graph, self._graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
self._add_replica_id_to_graph()
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=True)
if self._graph_dump_path:
graph_io.write_graph(graph, self._graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
def trace_cpu(self, graph, tensor_fetches, op_fetches=None):
"""Traces the tensors generated by CPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the CPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._device_type = _DEVICE_TYPE_CPU
self._num_replicas = 1
self._num_replicas_per_host = 1
self._num_hosts = 1
self._replica_id = 0
if self._graph_dump_path:
graph_io.write_graph(graph, self._graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=False)
if self._graph_dump_path:
graph_io.write_graph(graph, self._graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
| apache-2.0 | 968,700,902,319,309,300 | 37.911612 | 82 | 0.626032 | false |
elliterate/capybara.py | capybara/tests/session/test_assert_selector.py | 1 | 6373 | import pytest
import re
import capybara
from capybara.exceptions import ElementNotFound
class TestAssertSelector:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_does_not_raise_if_the_given_selector_is_on_the_page(self, session):
session.assert_selector("xpath", "//p")
session.assert_selector("css", "p a#foo")
session.assert_selector("//p[contains(.,'est')]")
def test_raises_if_the_given_selector_is_not_on_the_page(self, session):
with pytest.raises(ElementNotFound):
session.assert_selector("xpath", "//abbr")
with pytest.raises(ElementNotFound):
session.assert_selector("css", "p a#doesnotexist")
with pytest.raises(ElementNotFound):
session.assert_selector("//p[contains(.,'thisstringisnotonpage')]")
def test_uses_default_selector(self, session):
capybara.default_selector = "css"
with pytest.raises(ElementNotFound):
session.assert_selector("p a#doesnotexist")
session.assert_selector("p a#foo")
def test_respects_scopes(self, session):
with session.scope("//p[@id='first']"):
session.assert_selector(".//a[@id='foo']")
with pytest.raises(ElementNotFound):
session.assert_selector(".//a[@id='red']")
def test_is_true_if_the_content_is_on_the_page_the_given_number_of_times(self, session):
session.assert_selector("//p", count=3)
session.assert_selector("//p//a[@id='foo']", count=1)
session.assert_selector("//p[contains(.,'est')]", count=1)
def test_raises_if_the_content_is_on_the_page_the_given_number_of_times(self, session):
with pytest.raises(ElementNotFound):
session.assert_selector("//p", count=6)
with pytest.raises(ElementNotFound):
session.assert_selector("//p//a[@id='foo']", count=2)
with pytest.raises(ElementNotFound):
session.assert_selector("//p[contains(.,'est')]", count=5)
def test_raises_if_the_content_is_not_on_the_page_at_all(self, session):
with pytest.raises(ElementNotFound):
session.assert_selector("//abbr", count=2)
with pytest.raises(ElementNotFound):
session.assert_selector("//p//a[@id='doesnotexist']", count=1)
def test_discards_all_matches_where_the_given_string_is_not_contained(self, session):
session.assert_selector("//p//a", text="Redirect", count=1)
with pytest.raises(ElementNotFound):
session.assert_selector("//p", text="Doesnotexist")
def test_discards_all_matches_where_the_given_regex_is_not_matched(self, session):
session.assert_selector("//p//a", text=re.compile("re[dab]i", re.IGNORECASE), count=1)
with pytest.raises(ElementNotFound):
session.assert_selector("//p//a", text=re.compile("Red$"))
@pytest.mark.requires("js")
def test_finds_element_if_it_appears_before_given_wait_duration(self, session):
with capybara.using_wait_time(0.1):
session.visit("/with_js")
session.click_link("Click me")
session.assert_selector("css", "a#has-been-clicked", text="Has been clicked", wait=0.9)
class TestAssertNoSelector:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_html")
def test_raises_an_error_if_the_given_selector_is_on_the_page(self, session):
with pytest.raises(ElementNotFound):
session.assert_no_selector("xpath", "//p")
with pytest.raises(ElementNotFound):
session.assert_no_selector("css", "p a#foo")
with pytest.raises(ElementNotFound):
session.assert_no_selector("//p[contains(.,'est')]")
def test_is_true_if_the_given_selector_is_not_on_the_page(self, session):
session.assert_no_selector("xpath", "//abbr")
session.assert_no_selector("css", "p a#doesnotexist")
session.assert_no_selector("//p[contains(.,'thisstringisnotonpage')]")
def test_uses_the_default_selector(self, session):
capybara.default_selector = "css"
session.assert_no_selector("p a#doesnotexist")
with pytest.raises(ElementNotFound):
session.assert_no_selector("p a#foo")
def test_respects_scopes(self, session):
with session.scope("//p[@id='first']"):
with pytest.raises(ElementNotFound):
session.assert_no_selector(".//a[@id='foo']")
session.assert_no_selector(".//a[@id='red']")
def test_raises_an_error_if_the_content_is_on_the_page_the_given_number_of_times(self, session):
with pytest.raises(ElementNotFound):
session.assert_no_selector("//p", count=3)
with pytest.raises(ElementNotFound):
session.assert_no_selector("//p//a[@id='foo']", count=1)
with pytest.raises(ElementNotFound):
session.assert_no_selector("//p[contains(.,'est')]", count=1)
def test_is_true_if_the_content_is_on_the_page_the_wrong_number_of_times(self, session):
session.assert_no_selector("//p", count=6)
session.assert_no_selector("//p//a[@id='foo']", count=2)
session.assert_no_selector("//p[contains(.,'est')]", count=5)
def test_is_true_if_the_content_is_not_on_the_page_at_all(self, session):
session.assert_no_selector("//abbr", count=2)
session.assert_no_selector("//p//a[@id='doesnotexist']", count=1)
def test_discards_all_matches_where_the_given_string_is_contained(self, session):
with pytest.raises(ElementNotFound):
session.assert_no_selector("//p//a", text="Redirect", count=1)
session.assert_no_selector("//p", text="Doesnotexist")
def test_discards_all_matches_where_the_given_regex_is_matched(self, session):
with pytest.raises(ElementNotFound):
session.assert_no_selector("//p//a", text=re.compile(r"re[dab]i", re.IGNORECASE), count=1)
session.assert_no_selector("//p//a", text=re.compile(r"Red$"))
@pytest.mark.requires("js")
def test_does_not_find_element_if_it_appears_after_given_wait_duration(self, session):
session.visit("/with_js")
session.click_link("Click me")
session.assert_no_selector("css", "a#has-been-clicked", text="Has been clicked", wait=0.1)
| mit | 7,209,807,862,391,950,000 | 45.860294 | 102 | 0.640515 | false |
TGDiamond/Diamond | qa/rpc-tests/test_framework.py | 1 | 3043 | #!/usr/bin/env python
# Copyright (c) 2014 The Diamond Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-diamondrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-diamondrpc"))
import shutil
import tempfile
import traceback
from diamondrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class DiamondTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self, nodes):
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self, tmp_directory):
print("Initializing test directory "+tmp_directory)
initialize_chain(tmp_directory)
def setup_network(self, tmp_directory):
nodes = start_nodes(2, tmp_directory)
connect_nodes(nodes[1], 0)
sync_blocks(nodes)
return nodes
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave diamondds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing diamondd/diamond-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain(self.options.tmpdir)
nodes = self.setup_network(self.options.tmpdir)
self.run_test(nodes)
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+e.message)
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.nocleanup:
print("Cleaning up")
stop_nodes(nodes)
wait_diamondds()
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
| mit | -6,690,186,431,457,588,000 | 32.076087 | 103 | 0.612882 | false |
obriencj/python-promises | tests/multithread.py | 1 | 1321 | # This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
"""
Unit-tests for python-promises multithreading support
:author: Christopher O'Brien <[email protected]>
:license: LGPL v.3
"""
from promises.multithread import ThreadExecutor, ProxyThreadExecutor
from .multiprocess import TestProcessExecutor
class TestThreadExecutor(TestProcessExecutor):
"""
Create promises which will deliver in a separate thread.
"""
def executor(self):
return ThreadExecutor()
class TestProxyThreadExecutor(TestProcessExecutor):
"""
Create transparent proxy promises which will deliver in a separate
thread.
"""
def executor(self):
return ProxyThreadExecutor()
#
# The end.
| lgpl-3.0 | -3,424,985,148,856,857,000 | 26.520833 | 70 | 0.746404 | false |
pamfilos/invenio | modules/websearch/lib/websearchadminlib.py | 1 | 168088 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio WebSearch Administrator Interface."""
__revision__ = "$Id$"
import cgi
import random
import time
import sys
from invenio.dateutils import strftime
import os
import traceback
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.config import \
CFG_CACHEDIR, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_URL,\
CFG_WEBCOMMENT_ALLOW_COMMENTS, \
CFG_WEBSEARCH_SHOW_COMMENT_COUNT, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_SHOW_REVIEW_COUNT, \
CFG_BIBRANK_SHOW_CITATION_LINKS, \
CFG_INSPIRE_SITE, \
CFG_CERN_SITE
from invenio.bibrankadminlib import \
write_outcome, \
modify_translations, \
get_def_name, \
get_name, \
get_languages, \
addadminbox, \
tupletotable, \
createhiddenform
from invenio.dbquery import \
run_sql, \
get_table_update_time
from invenio.websearch_external_collections import \
external_collections_dictionary, \
external_collection_sort_engine_by_name, \
external_collection_get_state, \
external_collection_get_update_state_list, \
external_collection_apply_changes
from invenio.websearch_external_collections_utils import \
get_collection_descendants
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_STATES_NAME
#from invenio.bibformat_elements import bfe_references
#from invenio.bibformat_engine import BibFormatObject
from invenio.bibdocfile import BibRecDocs
from invenio.messages import gettext_set_language
#from invenio.bibrank_citation_searcher import get_cited_by
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL
from invenio.errorlib import register_exception
from invenio.intbitset import intbitset
from invenio.bibrank_citation_searcher import get_cited_by, get_cited_by_count
from invenio.bibrecord import record_get_field_instances
def getnavtrail(previous = ''):
"""Get the navtrail"""
navtrail = """<a class="navtrail" href="%s/help/admin">Admin Area</a> """ % (CFG_SITE_URL,)
navtrail = navtrail + previous
return navtrail
def fix_collection_scores():
"""
Re-calculate and re-normalize de scores of the collection relationship.
"""
for id_dad in intbitset(run_sql("SELECT id_dad FROM collection_collection")):
for index, id_son in enumerate(run_sql("SELECT id_son FROM collection_collection WHERE id_dad=%s ORDER BY score DESC", (id_dad, ))):
run_sql("UPDATE collection_collection SET score=%s WHERE id_dad=%s AND id_son=%s", (index * 10 + 10, id_dad, id_son[0]))
def perform_modifytranslations(colID, ln, sel_type='', trans=[], confirm=-1, callback='yes'):
"""Modify the translations of a collection
sel_type - the nametype to modify
trans - the translations in the same order as the languages from get_languages()"""
output = ''
subtitle = ''
sitelangs = get_languages()
if sel_type in ('r', 'v', 'l'):
table = 'collectionbox'
identifier_column = "id_collection"
else:
table = 'collection'
identifier_column = None
if type(trans) is str:
trans = [trans]
if confirm in ["2", 2] and colID:
finresult = modify_translations(colID, sitelangs, sel_type, trans, table, identifier_column)
col_dict = dict(get_def_name('', "collection"))
if colID and col_dict.has_key(int(colID)):
colID = int(colID)
subtitle = """<a name="3">3. Modify translations for collection '%s'</a> <small>[<a href="%s/help/admin/websearch-admin-guide#3.3">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
if sel_type == '':
sel_type = get_col_nametypes()[0][0]
header = ['Language', 'Translation']
actions = []
types = get_col_nametypes()
types.extend([('v', '"Focus on" box'), ('r', '"Narrow by" box'), ('l', '"Latest additions" box')])
if len(types) > 1:
text = """
<span class="adminlabel">Name type</span>
<select name="sel_type" class="admin_w200">
"""
for (key, value) in types:
text += """<option value="%s" %s>%s""" % (key, key == sel_type and 'selected="selected"' or '', value)
trans_names = get_name(colID, ln, key, "collection")
if trans_names and trans_names[0][0]:
text += ": %s" % trans_names[0][0]
text += "</option>"
text += """</select>"""
output += createhiddenform(action="modifytranslations#3",
text=text,
button="Select",
colID=colID,
ln=ln,
confirm=0)
if confirm in [-1, "-1", 0, "0"]:
trans = []
for (key, value) in sitelangs:
try:
trans_names = get_name(colID, key, sel_type, table, identifier_column)
trans.append(trans_names[0][0])
except StandardError, e:
trans.append('')
for nr in range(0, len(sitelangs)):
actions.append(["%s" % (sitelangs[nr][1],)])
actions[-1].append('<input type="text" name="trans" size="30" value="%s"/>' % trans[nr])
text = tupletotable(header=header, tuple=actions)
output += createhiddenform(action="modifytranslations#3",
text=text,
button="Modify",
colID=colID,
sel_type=sel_type,
ln=ln,
confirm=2)
if sel_type and len(trans) and confirm in ["2", 2]:
output += write_outcome(finresult)
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifytranslations", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyrankmethods(colID, ln, func='', rnkID='', confirm=0, callback='yes'):
"""Modify which rank methods is visible to the collection
func - remove or add rank method
rnkID - the id of the rank method."""
output = ""
subtitle = ""
col_dict = dict(get_def_name('', "collection"))
rnk_dict = dict(get_def_name('', "rnkMETHOD"))
if colID and col_dict.has_key(int(colID)):
colID = int(colID)
if func in ["0", 0] and confirm in ["1", 1]:
finresult = attach_rnk_col(colID, rnkID)
elif func in ["1", 1] and confirm in ["1", 1]:
finresult = detach_rnk_col(colID, rnkID)
subtitle = """<a name="9">9. Modify rank options for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.9">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """
<dl>
<dt>The rank methods enabled for the collection '%s' is:</dt>
""" % col_dict[colID]
rnkmethods = get_col_rnk(colID, ln)
output += """<dd>"""
if not rnkmethods:
output += """No rank methods"""
else:
for id, name in rnkmethods:
output += """%s, """ % name
output += """</dd>
</dl>
"""
rnk_list = get_def_name('', "rnkMETHOD")
rnk_dict_in_col = dict(get_col_rnk(colID, ln))
rnk_list = filter(lambda x: not rnk_dict_in_col.has_key(x[0]), rnk_list)
if rnk_list:
text = """
<span class="adminlabel">Enable:</span>
<select name="rnkID" class="admin_w200">
<option value="-1">- select rank method -</option>
"""
for (id, name) in rnk_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["0", 0] and confirm in ["0", 0] and int(rnkID) == int(id)) and 'selected="selected"' or '' , name)
text += """</select>"""
output += createhiddenform(action="modifyrankmethods#9",
text=text,
button="Enable",
colID=colID,
ln=ln,
func=0,
confirm=1)
if confirm in ["1", 1] and func in ["0", 0] and int(rnkID) != -1:
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["0", 0]:
output += """<b><span class="info">Please select a rank method.</span></b>"""
coll_list = get_col_rnk(colID, ln)
if coll_list:
text = """
<span class="adminlabel">Disable:</span>
<select name="rnkID" class="admin_w200">
<option value="-1">- select rank method-</option>
"""
for (id, name) in coll_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["1", 1] and confirm in ["0", 0] and int(rnkID) == int(id)) and 'selected="selected"' or '' , name)
text += """</select>"""
output += createhiddenform(action="modifyrankmethods#9",
text=text,
button="Disable",
colID=colID,
ln=ln,
func=1,
confirm=1)
if confirm in ["1", 1] and func in ["1", 1] and int(rnkID) != -1:
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["1", 1]:
output += """<b><span class="info">Please select a rank method.</span></b>"""
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifyrankmethods", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addcollectiontotree(colID, ln, add_dad='', add_son='', rtype='', mtype='', callback='yes', confirm=-1):
"""Form to add a collection to the tree.
add_dad - the dad to add the collection to
add_son - the collection to add
rtype - add it as a regular or virtual
mtype - add it to the regular or virtual tree."""
output = ""
output2 = ""
subtitle = """Attach collection to tree <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#2.2">?</a>]</small>""" % (CFG_SITE_URL)
col_dict = dict(get_def_name('', "collection"))
if confirm not in [-1, "-1"] and not (add_son and add_dad and rtype):
output2 += """<b><span class="info">All fields must be filled.</span></b><br /><br />
"""
elif add_son and add_dad and rtype:
add_son = int(add_son)
add_dad = int(add_dad)
if confirm not in [-1, "-1"]:
if add_son == add_dad:
output2 += """<b><span class="info">Cannot add a collection as a pointer to itself.</span></b><br /><br />
"""
elif check_col(add_dad, add_son):
res = add_col_dad_son(add_dad, add_son, rtype)
output2 += write_outcome(res)
if res[0] == 1:
output2 += """<b><span class="info"><br /> The collection will appear on your website after the next webcoll run. You can either run it manually or wait until bibsched does it for you.</span></b><br /><br />
"""
else:
output2 += """<b><span class="info">Cannot add the collection '%s' as a %s subcollection of '%s' since it will either create a loop, or the association already exists.</span></b><br /><br />
""" % (col_dict[add_son], (rtype=="r" and 'regular' or 'virtual'), col_dict[add_dad])
add_son = ''
add_dad = ''
rtype = ''
tree = get_col_tree(colID)
col_list = col_dict.items()
col_list.sort(compare_on_val)
output = show_coll_not_in_tree(colID, ln, col_dict)
text = """
<span class="adminlabel">Attach collection:</span>
<select name="add_son" class="admin_w200">
<option value="">- select collection -</option>
"""
for (id, name) in col_list:
if id != colID:
text += """<option value="%s" %s>%s</option>""" % (id, str(id)==str(add_son) and 'selected="selected"' or '', name)
text += """
</select><br />
<span class="adminlabel">to parent collection:</span>
<select name="add_dad" class="admin_w200">
<option value="">- select parent collection -</option>
"""
for (id, name) in col_list:
text += """<option value="%s" %s>%s</option>
""" % (id, str(id)==add_dad and 'selected="selected"' or '', name)
text += """</select><br />
"""
text += """
<span class="adminlabel">with relationship:</span>
<select name="rtype" class="admin_w200">
<option value="">- select relationship -</option>
<option value="r" %s>Regular (Narrow by...)</option>
<option value="v" %s>Virtual (Focus on...)</option>
</select>
""" % ((rtype=="r" and 'selected="selected"' or ''), (rtype=="v" and 'selected="selected"' or ''))
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/addcollectiontotree" % CFG_SITE_URL,
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
output += output2
#output += perform_showtree(colID, ln)
body = [output]
if callback:
return perform_index(colID, ln, mtype="perform_addcollectiontotree", content=addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_addcollection(colID, ln, colNAME='', dbquery='', callback="yes", confirm=-1):
"""form to add a new collection.
colNAME - the name of the new collection
dbquery - the dbquery of the new collection"""
output = ""
subtitle = """Create new collection <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#2.1">?</a>]</small>""" % (CFG_SITE_URL)
text = """
<span class="adminlabel">Default name</span>
<input class="admin_w200" type="text" name="colNAME" value="%s" /><br />
""" % colNAME
output = createhiddenform(action="%s/admin/websearch/websearchadmin.py/addcollection" % CFG_SITE_URL,
text=text,
colID=colID,
ln=ln,
button="Add collection",
confirm=1)
if colNAME and confirm in ["1", 1]:
res = add_col(colNAME, '')
output += write_outcome(res)
if res[0] == 1:
output += perform_addcollectiontotree(colID=colID, ln=ln, add_son=res[1], callback='')
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please give the collection a name.</span></b>"""
body = [output]
if callback:
return perform_index(colID, ln=ln, mtype="perform_addcollection", content=addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifydbquery(colID, ln, dbquery='', callback='yes', confirm=-1):
"""form to modify the dbquery of the collection.
dbquery - the dbquery of the collection."""
subtitle = ''
output = ""
col_dict = dict(get_def_name('', "collection"))
if colID and col_dict.has_key(int(colID)):
colID = int(colID)
subtitle = """<a name="1">1. Modify collection query for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.1">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
if confirm == -1:
res = run_sql("SELECT dbquery FROM collection WHERE id=%s" , (colID,))
dbquery = res[0][0]
if not dbquery:
dbquery = ''
reg_sons = len(get_col_tree(colID, 'r'))
vir_sons = len(get_col_tree(colID, 'v'))
if reg_sons > 1:
if dbquery:
output += "Warning: This collection got subcollections, and should because of this not have a collection query, for further explanation, check the WebSearch Guide<br />"
elif reg_sons <= 1:
if not dbquery:
output += "Warning: This collection does not have any subcollections, and should because of this have a collection query, for further explanation, check the WebSearch Guide<br />"
text = """
<span class="adminlabel">Query</span>
<input class="admin_w200" type="text" name="dbquery" value="%s" /><br />
""" % cgi.escape(dbquery, 1)
output += createhiddenform(action="modifydbquery",
text=text,
button="Modify",
colID=colID,
ln=ln,
confirm=1)
if confirm in ["1", 1]:
res = modify_dbquery(colID, dbquery)
if res:
if dbquery == "":
text = """<b><span class="info">Query removed for this collection.</span></b>"""
else:
text = """<b><span class="info">Query set for this collection.</span></b>"""
else:
text = """<b><span class="info">Sorry, could not change query.</span></b>"""
output += text
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifydbquery", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifycollectiontree(colID, ln, move_up='', move_down='', move_from='', move_to='', delete='', rtype='', callback='yes', confirm=0):
"""to modify the collection tree: move a collection up and down, delete a collection, or change the father of the collection.
colID - the main collection of the tree, the root
move_up - move this collection up (is not the collection id, but the place in the tree)
move_up - move this collection down (is not the collection id, but the place in the tree)
move_from - move this collection from the current positon (is not the collection id, but the place in the tree)
move_to - move the move_from collection and set this as it's father. (is not the collection id, but the place in the tree)
delete - delete this collection from the tree (is not the collection id, but the place in the tree)
rtype - the type of the collection in the tree, regular or virtual"""
colID = int(colID)
tree = get_col_tree(colID, rtype)
col_dict = dict(get_def_name('', "collection"))
subtitle = """Modify collection tree: %s <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#2.3">?</a>] <a href="%s/admin/websearch/websearchadmin.py/showtree?colID=%s&ln=%s">Printer friendly version</a></small>""" % (col_dict[colID], CFG_SITE_URL, CFG_SITE_URL, colID, ln)
fin_output = ""
output = ""
try:
if move_up:
move_up = int(move_up)
switch = find_last(tree, move_up)
if switch and switch_col_treescore(tree[move_up], tree[switch]):
output += """<b><span class="info">Moved the %s collection '%s' up and '%s' down.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_up][0]], col_dict[tree[switch][0]])
else:
output += """<b><span class="info">Could not move the %s collection '%s' up and '%s' down.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_up][0]], col_dict[tree[switch][0]])
elif move_down:
move_down = int(move_down)
switch = find_next(tree, move_down)
if switch and switch_col_treescore(tree[move_down], tree[switch]):
output += """<b><span class="info">Moved the %s collection '%s' down and '%s' up.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_down][0]], col_dict[tree[switch][0]])
else:
output += """<b><span class="info">Could not move the %s collection '%s' up and '%s' down.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_up][0]],col_dict[tree[switch][0]])
elif delete:
delete = int(delete)
if confirm in [0, "0"]:
if col_dict[tree[delete][0]] != col_dict[tree[delete][3]]:
text = """<b>Do you want to remove the %s collection '%s' and its subcollections in the %s collection '%s'.</b>
""" % ((tree[delete][4]=="r" and 'regular' or 'virtual'), col_dict[tree[delete][0]], (rtype=="r" and 'regular' or 'virtual'), col_dict[tree[delete][3]])
else:
text = """<b>Do you want to remove all subcollections of the %s collection '%s'.</b>
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[delete][3]])
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifycollectiontree#tree" % CFG_SITE_URL,
text=text,
button="Confirm",
colID=colID,
delete=delete,
rtype=rtype,
ln=ln,
confirm=1)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/index?mtype=perform_modifycollectiontree#tree" % CFG_SITE_URL,
text="<b>To cancel</b>",
button="Cancel",
colID=colID,
ln=ln)
else:
if remove_col_subcol(tree[delete][0], tree[delete][3], rtype):
if col_dict[tree[delete][0]] != col_dict[tree[delete][3]]:
output += """<b><span class="info">Removed the %s collection '%s' and its subcollections in subdirectory '%s'.</span></b><br /><br />
""" % ((tree[delete][4]=="r" and 'regular' or 'virtual'), col_dict[tree[delete][0]], col_dict[tree[delete][3]])
else:
output += """<b><span class="info">Removed the subcollections of the %s collection '%s'.</span></b><br /><br />
""" % ((rtype=="r" and 'regular' or 'virtual'), col_dict[tree[delete][3]])
else:
output += """<b><span class="info">Could not remove the collection from the tree.</span></b><br /><br />
"""
delete = ''
elif move_from and not move_to:
move_from_rtype = move_from[0]
move_from_id = int(move_from[1:len(move_from)])
text = """<b>Select collection to place the %s collection '%s' under.</b><br /><br />
""" % ((move_from_rtype=="r" and 'regular' or 'virtual'), col_dict[tree[move_from_id][0]])
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/index?mtype=perform_modifycollectiontree#tree" % CFG_SITE_URL,
text=text,
button="Cancel",
colID=colID,
ln=ln)
elif move_from and move_to:
move_from_rtype = move_from[0]
move_from_id = int(move_from[1:len(move_from)])
move_to_rtype = move_to[0]
move_to_id = int(move_to[1:len(move_to)])
tree_from = get_col_tree(colID, move_from_rtype)
tree_to = get_col_tree(colID, move_to_rtype)
if confirm in [0, '0']:
if move_from_id == move_to_id and move_from_rtype == move_to_rtype:
output += """<b><span class="info">Cannot move to itself.</span></b><br /><br />
"""
elif tree_from[move_from_id][3] == tree_to[move_to_id][0] and move_from_rtype==move_to_rtype:
output += """<b><span class="info">The collection is already there.</span></b><br /><br />
"""
elif check_col(tree_to[move_to_id][0], tree_from[move_from_id][0]) or (tree_to[move_to_id][0] == 1 and tree_from[move_from_id][3] == tree_to[move_to_id][0] and move_from_rtype != move_to_rtype):
text = """<b>Move %s collection '%s' to the %s collection '%s'.</b>
""" % ((tree_from[move_from_id][4]=="r" and 'regular' or 'virtual'), col_dict[tree_from[move_from_id][0]], (tree_to[move_to_id][4]=="r" and 'regular' or 'virtual'), col_dict[tree_to[move_to_id][0]])
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifycollectiontree#tree" % CFG_SITE_URL,
text=text,
button="Confirm",
colID=colID,
move_from=move_from,
move_to=move_to,
ln=ln,
rtype=rtype,
confirm=1)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/index?mtype=perform_modifycollectiontree#tree" % CFG_SITE_URL,
text="""<b>To cancel</b>""",
button="Cancel",
colID=colID,
ln=ln)
else:
output += """<b><span class="info">Cannot move the collection '%s' and set it as a subcollection of '%s' since it will create a loop.</span></b><br /><br />
""" % (col_dict[tree_from[move_from_id][0]], col_dict[tree_to[move_to_id][0]])
else:
if (move_to_id != 0 and move_col_tree(tree_from[move_from_id], tree_to[move_to_id])) or (move_to_id == 0 and move_col_tree(tree_from[move_from_id], tree_to[move_to_id], move_to_rtype)):
output += """<b><span class="info">Moved %s collection '%s' to the %s collection '%s'.</span></b><br /><br />
""" % ((move_from_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_from[move_from_id][0]], (move_to_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_to[move_to_id][0]])
else:
output += """<b><span class="info">Could not move %s collection '%s' to the %s collection '%s'.</span></b><br /><br />
""" % ((move_from_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_from[move_from_id][0]], (move_to_rtype=="r" and 'regular' or 'virtual'), col_dict[tree_to[move_to_id][0]])
move_from = ''
move_to = ''
else:
output += """
"""
except StandardError, e:
register_exception()
return """<b><span class="info">An error occured.</span></b>
"""
output += """<table border ="0" width="100%">
<tr><td width="50%">
<b>Narrow by collection:</b>
</td><td width="50%">
<b>Focus on...:</b>
</td></tr><tr><td valign="top">
"""
tree = get_col_tree(colID, 'r')
output += create_colltree(tree, col_dict, colID, ln, move_from, move_to, 'r', "yes")
output += """</td><td valign="top">
"""
tree = get_col_tree(colID, 'v')
output += create_colltree(tree, col_dict, colID, ln, move_from, move_to, 'v', "yes")
output += """</td>
</tr>
</table>
"""
body = [output]
if callback:
return perform_index(colID, ln, mtype="perform_modifycollectiontree", content=addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showtree(colID, ln):
"""create collection tree/hiarchy"""
col_dict = dict(get_def_name('', "collection"))
subtitle = "Collection tree: %s" % col_dict[int(colID)]
output = """<table border ="0" width="100%">
<tr><td width="50%">
<b>Narrow by collection:</b>
</td><td width="50%">
<b>Focus on...:</b>
</td></tr><tr><td valign="top">
"""
tree = get_col_tree(colID, 'r')
output += create_colltree(tree, col_dict, colID, ln, '', '', 'r', '')
output += """</td><td valign="top">
"""
tree = get_col_tree(colID, 'v')
output += create_colltree(tree, col_dict, colID, ln, '', '', 'v', '')
output += """</td>
</tr>
</table>
"""
body = [output]
return addadminbox(subtitle, body)
def perform_addportalbox(colID, ln, title='', body='', callback='yes', confirm=-1):
"""form to add a new portalbox
title - the title of the portalbox
body - the body of the portalbox"""
col_dict = dict(get_def_name('', "collection"))
colID = int(colID)
subtitle = """<a name="5.1"></a>Create new portalbox"""
text = """
<span class="adminlabel">Title</span>
<textarea cols="50" rows="1" class="admin_wvar" type="text" name="title">%s</textarea><br />
<span class="adminlabel">Body</span>
<textarea cols="50" rows="10" class="admin_wvar" type="text" name="body">%s</textarea><br />
""" % (cgi.escape(title), cgi.escape(body))
output = createhiddenform(action="addportalbox#5.1",
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
if body and confirm in [1, "1"]:
res = add_pbx(title, body)
output += write_outcome(res)
if res[1] == 1:
output += """<b><span class="info"><a href="addexistingportalbox?colID=%s&ln=%s&pbxID=%s#5">Add portalbox to collection</a></span></b>""" % (colID, ln, res[1])
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Body field must be filled.</span></b>
"""
body = [output]
return perform_showportalboxes(colID, ln, content=addadminbox(subtitle, body))
def perform_addexistingportalbox(colID, ln, pbxID=-1, score=0, position='', sel_ln='', callback='yes', confirm=-1):
"""form to add an existing portalbox to a collection.
colID - the collection to add the portalbox to
pbxID - the portalbox to add
score - the importance of the portalbox.
position - the position of the portalbox on the page
sel_ln - the language of the portalbox"""
subtitle = """<a name="5.2"></a>Add existing portalbox to collection"""
output = ""
colID = int(colID)
res = get_pbx()
pos = get_pbx_pos()
lang = dict(get_languages())
col_dict = dict(get_def_name('', "collection"))
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
col_pbx = get_col_pbx(colID)
col_pbx = dict(map(lambda x: (x[0], x[5]), col_pbx))
if len(res) > 0:
text = """
<span class="adminlabel">Portalbox</span>
<select name="pbxID" class="admin_w200">
<option value="-1">- Select portalbox -</option>
"""
for (id, t_title, t_body) in res:
text += """<option value="%s" %s>%s - %s...</option>\n""" % \
(id, id == int(pbxID) and 'selected="selected"' or '',
t_title[:40], cgi.escape(t_body[0:40 - min(40, len(t_title))]))
text += """</select><br />
<span class="adminlabel">Language</span>
<select name="sel_ln" class="admin_w200">
<option value="">- Select language -</option>
"""
listlang = lang.items()
listlang.sort()
for (key, name) in listlang:
text += """<option value="%s" %s>%s</option>
""" % (key, key == sel_ln and 'selected="selected"' or '', name)
text += """</select><br />
<span class="adminlabel">Position</span>
<select name="position" class="admin_w200">
<option value="">- Select position -</option>
"""
listpos = pos.items()
listpos.sort()
for (key, name) in listpos:
text += """<option value="%s" %s>%s</option>""" % (key, key==position and 'selected="selected"' or '', name)
text += "</select>"
output += createhiddenform(action="addexistingportalbox#5.2",
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
else:
output = """No existing portalboxes to add, please create a new one.
"""
if pbxID > -1 and position and sel_ln and confirm in [1, "1"]:
pbxID = int(pbxID)
res = add_col_pbx(colID, pbxID, sel_ln, position, '')
output += write_outcome(res)
elif pbxID > -1 and confirm not in [-1, "-1"]:
output += """<b><span class="info">All fields must be filled.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_deleteportalbox(colID, ln, pbxID=-1, callback='yes', confirm=-1):
"""form to delete a portalbox which is not in use.
colID - the current collection.
pbxID - the id of the portalbox"""
subtitle = """<a name="5.3"></a>Delete an unused portalbox"""
output = ""
colID = int(colID)
if pbxID not in [-1, "-1"] and confirm in [1, "1"]:
ares = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), ares))
if pbx_dict.has_key(int(pbxID)):
pname = pbx_dict[int(pbxID)]
ares = delete_pbx(int(pbxID))
else:
return """<b><span class="info">This portalbox does not exist</span></b>"""
res = get_pbx()
col_dict = dict(get_def_name('', "collection"))
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
col_pbx = get_col_pbx()
col_pbx = dict(map(lambda x: (x[0], x[5]), col_pbx))
if len(res) > 0:
text = """
<span class="adminlabel">Portalbox</span>
<select name="pbxID" class="admin_w200">
"""
text += """<option value="-1">- Select portalbox -"""
for (id, t_title, t_body) in res:
if not col_pbx.has_key(id):
text += """<option value="%s" %s>%s - %s...""" % (id, id == int(pbxID) and 'selected="selected"' or '', t_title, cgi.escape(t_body[0:10]))
text += "</option>"
text += """</select><br />"""
output += createhiddenform(action="deleteportalbox#5.3",
text=text,
button="Delete",
colID=colID,
ln=ln,
confirm=1)
if pbxID not in [-1, "-1"]:
pbxID = int(pbxID)
if confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Choose a portalbox to delete.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_modifyportalbox(colID, ln, pbxID=-1, score='', position='', sel_ln='', title='', body='', callback='yes', confirm=-1):
"""form to modify a portalbox in a collection, or change the portalbox itself.
colID - the id of the collection.
pbxID - the portalbox to change
score - the score of the portalbox connected to colID which should be changed.
position - the position of the portalbox in collection colID to change."""
subtitle = ""
output = ""
colID = int(colID)
res = get_pbx()
pos = get_pbx_pos()
lang = dict(get_languages())
col_dict = dict(get_def_name('', "collection"))
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
col_pbx = get_col_pbx(colID)
col_pbx = dict(map(lambda x: (x[0], x[5]), col_pbx))
if pbxID not in [-1, "-1"]:
pbxID = int(pbxID)
subtitle = """<a name="5.4"></a>Modify portalbox '%s' for this collection""" % pbx_dict[pbxID]
col_pbx = get_col_pbx(colID)
if not (score and position) and not (body and title):
for (id_pbx, id_collection, tln, score, position, title, body) in col_pbx:
if id_pbx == pbxID:
break
output += """Collection (presentation) specific values (Changes implies only to this collection.)<br />"""
text = """
<span class="adminlabel">Position</span>
<select name="position" class="admin_w200">
"""
listpos = pos.items()
listpos.sort()
for (key, name) in listpos:
text += """<option value="%s" %s>%s""" % (key, key==position and 'selected="selected"' or '', name)
text += "</option>"
text += """</select><br />"""
output += createhiddenform(action="modifyportalbox#5.4",
text=text,
button="Modify",
colID=colID,
pbxID=pbxID,
score=score,
title=title,
body=cgi.escape(body, 1),
sel_ln=sel_ln,
ln=ln,
confirm=3)
if pbxID > -1 and score and position and confirm in [3, "3"]:
pbxID = int(pbxID)
res = modify_pbx(colID, pbxID, sel_ln, score, position, '', '')
res2 = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), res2))
output += write_outcome(res)
output += """<br />Portalbox (content) specific values (any changes appears everywhere the portalbox is used.)"""
text = """
<span class="adminlabel">Title</span>
<textarea cols="50" rows="1" class="admin_wvar" type="text" name="title">%s</textarea><br />
""" % cgi.escape(title)
text += """
<span class="adminlabel">Body</span>
<textarea cols="50" rows="10" class="admin_wvar" type="text" name="body">%s</textarea><br />
""" % cgi.escape(body)
output += createhiddenform(action="modifyportalbox#5.4",
text=text,
button="Modify",
colID=colID,
pbxID=pbxID,
sel_ln=sel_ln,
score=score,
position=position,
ln=ln,
confirm=4)
if pbxID > -1 and confirm in [4, "4"]:
pbxID = int(pbxID)
res = modify_pbx(colID, pbxID, sel_ln, '', '', title, body)
output += write_outcome(res)
else:
output = """No portalbox to modify."""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_switchpbxscore(colID, id_1, id_2, sel_ln, ln):
"""Switch the score of id_1 and id_2 in collection_portalbox.
colID - the current collection
id_1/id_2 - the id's to change the score for.
sel_ln - the language of the portalbox"""
output = ""
res = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
res = switch_pbx_score(colID, id_1, id_2, sel_ln)
output += write_outcome(res)
return perform_showportalboxes(colID, ln, content=output)
def perform_showportalboxes(colID, ln, callback='yes', content='', confirm=-1):
"""show the portalboxes of this collection.
colID - the portalboxes to show the collection for."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
subtitle = """<a name="5">5. Modify portalboxes for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.5">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = ""
pos = get_pbx_pos()
output = """<dl>
<dt>Portalbox actions (not related to this collection)</dt>
<dd><a href="addportalbox?colID=%s&ln=%s#5.1">Create new portalbox</a></dd>
<dd><a href="deleteportalbox?colID=%s&ln=%s#5.3">Delete an unused portalbox</a></dd>
<dt>Collection specific actions</dt>
<dd><a href="addexistingportalbox?colID=%s&ln=%s#5.2">Add existing portalbox to collection</a></dd>
</dl>
""" % (colID, ln, colID, ln, colID, ln)
header = ['Position', 'Language', '', 'Title', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
pos_list = pos.items()
pos_list.sort()
if len(get_col_pbx(colID)) > 0:
for (key, value) in sitelangs:
for (pos_key, pos_value) in pos_list:
res = get_col_pbx(colID, key, pos_key)
i = 0
for (pbxID, colID_pbx, tln, score, position, title, body) in res:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchpbxscore?colID=%s&ln=%s&id_1=%s&id_2=%s&sel_ln=%s&rand=%s#5"><img border="0" src="%s/img/smallup.gif" title="Move portalbox up" alt="up" /></a>""" % (CFG_SITE_URL, colID, ln, pbxID, res[i - 1][0], tln, random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(res):
move += """<a href="%s/admin/websearch/websearchadmin.py/switchpbxscore?colID=%s&ln=%s&id_1=%s&id_2=%s&sel_ln=%s&rand=%s#5"><img border="0" src="%s/img/smalldown.gif" title="Move portalbox down" alt="down" /></a>""" % (CFG_SITE_URL, colID, ln, pbxID, res[i][0], tln, random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append(["%s" % (i==1 and pos[position] or ''), "%s" % (i==1 and lang[tln] or ''), move, "%s" % title])
for col in [(('Modify', 'modifyportalbox'), ('Remove', 'removeportalbox'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&pbxID=%s&sel_ln=%s#5.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, pbxID, tln, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&pbxID=%s&sel_ln=%s#5.5">%s</a>' % (CFG_SITE_URL, function, colID, ln, pbxID, tln, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No portalboxes exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showportalboxes", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_removeportalbox(colID, ln, pbxID='', sel_ln='', callback='yes', confirm=0):
"""form to remove a portalbox from a collection.
colID - the current collection, remove the portalbox from this collection.
sel_ln - remove the portalbox with this language
pbxID - remove the portalbox with this id"""
subtitle = """<a name="5.5"></a>Remove portalbox"""
output = ""
col_dict = dict(get_def_name('', "collection"))
res = get_pbx()
pbx_dict = dict(map(lambda x: (x[0], x[1]), res))
if colID and pbxID and sel_ln:
colID = int(colID)
pbxID = int(pbxID)
if confirm in ["0", 0]:
text = """Do you want to remove the portalbox '%s' from the collection '%s'.""" % (pbx_dict[pbxID], col_dict[colID])
output += createhiddenform(action="removeportalbox#5.5",
text=text,
button="Confirm",
colID=colID,
pbxID=pbxID,
sel_ln=sel_ln,
confirm=1)
elif confirm in ["1", 1]:
res = remove_pbx(colID, pbxID, sel_ln)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showportalboxes(colID, ln, content=output)
def perform_switchfmtscore(colID, type, id_1, id_2, ln):
"""Switch the score of id_1 and id_2 in the table type.
colID - the current collection
id_1/id_2 - the id's to change the score for.
type - like "format" """
fmt_dict = dict(get_def_name('', "format"))
res = switch_score(colID, id_1, id_2, type)
output = write_outcome(res)
return perform_showoutputformats(colID, ln, content=output)
def perform_switchfldscore(colID, id_1, id_2, fmeth, ln):
"""Switch the score of id_1 and id_2 in collection_field_fieldvalue.
colID - the current collection
id_1/id_2 - the id's to change the score for."""
fld_dict = dict(get_def_name('', "field"))
res = switch_fld_score(colID, id_1, id_2)
output = write_outcome(res)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_switchfldvaluescore(colID, id_1, id_fldvalue_1, id_fldvalue_2, ln):
"""Switch the score of id_1 and id_2 in collection_field_fieldvalue.
colID - the current collection
id_1/id_2 - the id's to change the score for."""
name_1 = run_sql("SELECT name from fieldvalue where id=%s", (id_fldvalue_1, ))[0][0]
name_2 = run_sql("SELECT name from fieldvalue where id=%s", (id_fldvalue_2, ))[0][0]
res = switch_fld_value_score(colID, id_1, id_fldvalue_1, id_fldvalue_2)
output = write_outcome(res)
return perform_modifyfield(colID, fldID=id_1, ln=ln, content=output)
def perform_addnewfieldvalue(colID, fldID, ln, name='', value='', callback="yes", confirm=-1):
"""form to add a new fieldvalue.
name - the name of the new fieldvalue
value - the value of the new fieldvalue
"""
output = ""
subtitle = """<a name="7.4"></a>Add new value"""
text = """
<span class="adminlabel">Display name</span>
<input class="admin_w200" type="text" name="name" value="%s" /><br />
<span class="adminlabel">Search value</span>
<input class="admin_w200" type="text" name="value" value="%s" /><br />
""" % (name, value)
output = createhiddenform(action="%s/admin/websearch/websearchadmin.py/addnewfieldvalue" % CFG_SITE_URL,
text=text,
colID=colID,
fldID=fldID,
ln=ln,
button="Add",
confirm=1)
if name and value and confirm in ["1", 1]:
res = add_fldv(name, value)
output += write_outcome(res)
if res[0] == 1:
res = add_col_fld(colID, fldID, 'seo', res[1])
if res[0] == 0:
output += "<br />" + write_outcome(res)
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please fill in name and value.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID=fldID, ln=ln, content=output)
def perform_modifyfieldvalue(colID, fldID, fldvID, ln, name='', value='', callback="yes", confirm=-1):
"""form to modify a fieldvalue.
name - the name of the fieldvalue
value - the value of the fieldvalue
"""
if confirm in [-1, "-1"]:
res = get_fld_value(fldvID)
(id, name, value) = res[0]
output = ""
subtitle = """<a name="7.4"></a>Modify existing value"""
output = """<dl>
<dt><b><span class="info">Warning: Modifications done below will also inflict on all places the modified data is used.</span></b></dt>
</dl>"""
text = """
<span class="adminlabel">Display name</span>
<input class="admin_w200" type="text" name="name" value="%s" /><br />
<span class="adminlabel">Search value</span>
<input class="admin_w200" type="text" name="value" value="%s" /><br />
""" % (name, value)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifyfieldvalue" % CFG_SITE_URL,
text=text,
colID=colID,
fldID=fldID,
fldvID=fldvID,
ln=ln,
button="Update",
confirm=1)
output += createhiddenform(action="%s/admin/websearch/websearchadmin.py/modifyfieldvalue" % CFG_SITE_URL,
text="Delete value and all associations",
colID=colID,
fldID=fldID,
fldvID=fldvID,
ln=ln,
button="Delete",
confirm=2)
if name and value and confirm in ["1", 1]:
res = update_fldv(fldvID, name, value)
output += write_outcome(res)
#if res:
# output += """<b><span class="info">Operation successfully completed.</span></b>"""
#else:
# output += """<b><span class="info">Operation failed.</span></b>"""
elif confirm in ["2", 2]:
res = delete_fldv(fldvID)
output += write_outcome(res)
elif confirm not in ["-1", -1]:
output += """<b><span class="info">Please fill in name and value.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID=fldID, ln=ln, content=output)
def perform_removefield(colID, ln, fldID='', fldvID='', fmeth='', callback='yes', confirm=0):
"""form to remove a field from a collection.
colID - the current collection, remove the field from this collection.
sel_ln - remove the field with this language
fldID - remove the field with this id"""
if fmeth == "soo":
field = "sort option"
elif fmeth == "sew":
field = "search field"
elif fmeth == "seo":
field = "search option"
else:
field = "field"
subtitle = """<a name="6.4"><a name="7.4"><a name="8.4"></a>Remove %s""" % field
output = ""
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
res = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), res))
if colID and fldID:
colID = int(colID)
fldID = int(fldID)
if fldvID and fldvID != "None":
fldvID = int(fldvID)
if confirm in ["0", 0]:
text = """Do you want to remove the %s '%s' %s from the collection '%s'.""" % (field, fld_dict[fldID], (fldvID not in["", "None"] and "with value '%s'" % fldv_dict[fldvID] or ''), col_dict[colID])
output += createhiddenform(action="removefield#6.5",
text=text,
button="Confirm",
colID=colID,
fldID=fldID,
fldvID=fldvID,
fmeth=fmeth,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fld(colID, fldID, fldvID)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_removefieldvalue(colID, ln, fldID='', fldvID='', fmeth='', callback='yes', confirm=0):
"""form to remove a field from a collection.
colID - the current collection, remove the field from this collection.
sel_ln - remove the field with this language
fldID - remove the field with this id"""
subtitle = """<a name="7.4"></a>Remove value"""
output = ""
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
res = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), res))
if colID and fldID:
colID = int(colID)
fldID = int(fldID)
if fldvID and fldvID != "None":
fldvID = int(fldvID)
if confirm in ["0", 0]:
text = """Do you want to remove the value '%s' from the search option '%s'.""" % (fldv_dict[fldvID], fld_dict[fldID])
output += createhiddenform(action="removefieldvalue#7.4",
text=text,
button="Confirm",
colID=colID,
fldID=fldID,
fldvID=fldvID,
fmeth=fmeth,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fld(colID, fldID, fldvID)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID=fldID, ln=ln, content=output)
def perform_rearrangefieldvalue(colID, fldID, ln, callback='yes', confirm=-1):
"""rearrang the fieldvalues alphabetically
colID - the collection
fldID - the field to rearrange the fieldvalue for
"""
subtitle = "Order values alphabetically"
output = ""
col_fldv = get_col_fld(colID, 'seo', fldID)
col_fldv = dict(map(lambda x: (x[1], x[0]), col_fldv))
fldv_names = get_fld_value()
fldv_names = map(lambda x: (x[0], x[1]), fldv_names)
if not col_fldv.has_key(None):
vscore = len(col_fldv)
for (fldvID, name) in fldv_names:
if col_fldv.has_key(fldvID):
run_sql("UPDATE collection_field_fieldvalue SET score_fieldvalue=%s WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (vscore, colID, fldID, fldvID))
vscore -= 1
output += write_outcome((1, ""))
else:
output += write_outcome((0, (0, "No values to order")))
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID, ln, content=output)
def perform_rearrangefield(colID, ln, fmeth, callback='yes', confirm=-1):
"""rearrang the fields alphabetically
colID - the collection
"""
subtitle = "Order fields alphabetically"
output = ""
col_fld = dict(map(lambda x: (x[0], x[1]), get_col_fld(colID, fmeth)))
fld_names = get_def_name('', "field")
if len(col_fld) > 0:
score = len(col_fld)
for (fldID, name) in fld_names:
if col_fld.has_key(fldID):
run_sql("UPDATE collection_field_fieldvalue SET score=%s WHERE id_collection=%s and id_field=%s", (score, colID, fldID))
score -= 1
output += write_outcome((1, ""))
else:
output += write_outcome((0, (0, "No fields to order")))
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_addexistingfieldvalue(colID, fldID, fldvID=-1, ln=CFG_SITE_LANG, callback='yes', confirm=-1):
"""form to add an existing fieldvalue to a field.
colID - the collection
fldID - the field to add the fieldvalue to
fldvID - the fieldvalue to add"""
subtitle = """</a><a name="7.4"></a>Add existing value to search option"""
output = ""
if fldvID not in [-1, "-1"] and confirm in [1, "1"]:
fldvID = int(fldvID)
ares = add_col_fld(colID, fldID, 'seo', fldvID)
colID = int(colID)
fldID = int(fldID)
lang = dict(get_languages())
res = get_def_name('', "field")
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(res)
col_fld = dict(map(lambda x: (x[0], x[1]), get_col_fld(colID, 'seo')))
fld_value = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), fld_value))
text = """
<span class="adminlabel">Value</span>
<select name="fldvID" class="admin_w200">
<option value="-1">- Select value -</option>
"""
res = run_sql("SELECT id,name,value FROM fieldvalue ORDER BY name")
for (id, name, value) in res:
text += """<option value="%s" %s>%s - %s</option>
""" % (id, id == int(fldvID) and 'selected="selected"' or '', name, value)
text += """</select><br />"""
output += createhiddenform(action="addexistingfieldvalue#7.4",
text=text,
button="Add",
colID=colID,
fldID=fldID,
ln=ln,
confirm=1)
if fldvID not in [-1, "-1"] and confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm in [1, "1"]:
output += """<b><span class="info">Select a value to add and try again.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_modifyfield(colID, fldID, ln, content=output)
def perform_addexistingfield(colID, ln, fldID=-1, fldvID=-1, fmeth='', callback='yes', confirm=-1):
"""form to add an existing field to a collection.
colID - the collection to add the field to
fldID - the field to add
sel_ln - the language of the field"""
subtitle = """<a name="6.2"></a><a name="7.2"></a><a name="8.2"></a>Add existing field to collection"""
output = ""
if fldID not in [-1, "-1"] and confirm in [1, "1"]:
fldID = int(fldID)
ares = add_col_fld(colID, fldID, fmeth, fldvID)
colID = int(colID)
lang = dict(get_languages())
res = get_def_name('', "field")
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(res)
col_fld = dict(map(lambda x: (x[0], x[1]), get_col_fld(colID, fmeth)))
fld_value = get_fld_value()
fldv_dict = dict(map(lambda x: (x[0], x[1]), fld_value))
if fldvID:
fldvID = int(fldvID)
text = """
<span class="adminlabel">Field</span>
<select name="fldID" class="admin_w200">
<option value="-1">- Select field -</option>
"""
for (id, var) in res:
if fmeth == 'seo' or (fmeth != 'seo' and not col_fld.has_key(id)):
text += """<option value="%s" %s>%s</option>
""" % (id, '', fld_dict[id])
text += """</select><br />"""
output += createhiddenform(action="addexistingfield#6.2",
text=text,
button="Add",
colID=colID,
fmeth=fmeth,
ln=ln,
confirm=1)
if fldID not in [-1, "-1"] and confirm in [1, "1"]:
output += write_outcome(ares)
elif fldID in [-1, "-1"] and confirm not in [-1, "-1"]:
output += """<b><span class="info">Select a field.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if fmeth == "soo":
return perform_showsortoptions(colID, ln, content=output)
elif fmeth == "sew":
return perform_showsearchfields(colID, ln, content=output)
elif fmeth == "seo":
return perform_showsearchoptions(colID, ln, content=output)
def perform_showsortoptions(colID, ln, callback='yes', content='', confirm=-1):
"""show the sort fields of this collection.."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
subtitle = """<a name="8">8. Modify sort options for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.8">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<dl>
<dt>Field actions (not related to this collection)</dt>
<dd>Go to the BibIndex interface to modify the available sort options</dd>
<dt>Collection specific actions
<dd><a href="addexistingfield?colID=%s&ln=%s&fmeth=soo#8.2">Add sort option to collection</a></dd>
<dd><a href="rearrangefield?colID=%s&ln=%s&fmeth=soo#8.2">Order sort options alphabetically</a></dd>
</dl>
""" % (colID, ln, colID, ln)
header = ['', 'Sort option', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
if len(get_col_fld(colID, 'soo')) > 0:
res = get_col_fld(colID, 'soo')
i = 0
for (fldID, fldvID, stype, score, score_fieldvalue) in res:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=soo&rand=%s#8"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, res[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(res):
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=soo&rand=%s#8"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>""" % (CFG_SITE_URL, colID, ln, fldID, res[i][0], random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append([move, fld_dict[int(fldID)]])
for col in [(('Remove sort option', 'removefield'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=soo#8.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=soo#8.5">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No sort options exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showsortoptions", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showsearchfields(colID, ln, callback='yes', content='', confirm=-1):
"""show the search fields of this collection.."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
subtitle = """<a name="6">6. Modify search fields for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.6">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<dl>
<dt>Field actions (not related to this collection)</dt>
<dd>Go to the BibIndex interface to modify the available search fields</dd>
<dt>Collection specific actions
<dd><a href="addexistingfield?colID=%s&ln=%s&fmeth=sew#6.2">Add search field to collection</a></dd>
<dd><a href="rearrangefield?colID=%s&ln=%s&fmeth=sew#6.2">Order search fields alphabetically</a></dd>
</dl>
""" % (colID, ln, colID, ln)
header = ['', 'Search field', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
if len(get_col_fld(colID, 'sew')) > 0:
res = get_col_fld(colID, 'sew')
i = 0
for (fldID, fldvID, stype, score, score_fieldvalue) in res:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=sew&rand=%s#6"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, res[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(res):
move += '<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=sew&rand=%s#6"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>' % (CFG_SITE_URL, colID, ln, fldID, res[i][0], random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append([move, fld_dict[int(fldID)]])
for col in [(('Remove search field', 'removefield'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=sew#6.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s#6.5">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No search fields exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showsearchfields", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_showsearchoptions(colID, ln, callback='yes', content='', confirm=-1):
"""show the sort and search options of this collection.."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
subtitle = """<a name="7">7. Modify search options for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.7">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<dl>
<dt>Field actions (not related to this collection)</dt>
<dd>Go to the BibIndex interface to modify the available search options</dd>
<dt>Collection specific actions
<dd><a href="addexistingfield?colID=%s&ln=%s&fmeth=seo#7.2">Add search option to collection</a></dd>
<dd><a href="rearrangefield?colID=%s&ln=%s&fmeth=seo#7.2">Order search options alphabetically</a></dd>
</dl>
""" % (colID, ln, colID, ln)
header = ['', 'Search option', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
fld_distinct = run_sql("SELECT distinct(id_field) FROM collection_field_fieldvalue WHERE type='seo' AND id_collection=%s ORDER by score desc", (colID, ))
if len(fld_distinct) > 0:
i = 0
for (id) in fld_distinct:
fldID = id[0]
col_fld = get_col_fld(colID, 'seo', fldID)
move = ""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=seo&rand=%s#7"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, fld_distinct[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
i += 1
if i != len(fld_distinct):
move += '<a href="%s/admin/websearch/websearchadmin.py/switchfldscore?colID=%s&ln=%s&id_1=%s&id_2=%s&fmeth=seo&rand=%s#7"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>' % (CFG_SITE_URL, colID, ln, fldID, fld_distinct[i][0], random.randint(0, 1000), CFG_SITE_URL)
actions.append([move, "%s" % fld_dict[fldID]])
for col in [(('Modify values', 'modifyfield'), ('Remove search option', 'removefield'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s#7.3">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fmeth=seo#7.3">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No search options exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showsearchoptions", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyfield(colID, fldID, fldvID='', ln=CFG_SITE_LANG, content='', callback='yes', confirm=0):
"""Modify the fieldvalues for a field"""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
fld_dict = dict(get_def_name('', "field"))
fld_type = get_sort_nametypes()
fldID = int(fldID)
subtitle = """<a name="7.3">Modify values for field '%s'</a>""" % (fld_dict[fldID])
output = """<dl>
<dt>Value specific actions
<dd><a href="addexistingfieldvalue?colID=%s&ln=%s&fldID=%s#7.4">Add existing value to search option</a></dd>
<dd><a href="addnewfieldvalue?colID=%s&ln=%s&fldID=%s#7.4">Add new value to search option</a></dd>
<dd><a href="rearrangefieldvalue?colID=%s&ln=%s&fldID=%s#7.4">Order values alphabetically</a></dd>
</dl>
""" % (colID, ln, fldID, colID, ln, fldID, colID, ln, fldID)
header = ['', 'Value name', 'Actions']
actions = []
sitelangs = get_languages()
lang = dict(sitelangs)
fld_type_list = fld_type.items()
col_fld = list(get_col_fld(colID, 'seo', fldID))
if len(col_fld) == 1 and col_fld[0][1] is None:
output += """<b><span class="info">No values added for this search option yet</span></b>"""
else:
j = 0
for (fldID, fldvID, stype, score, score_fieldvalue) in col_fld:
fieldvalue = get_fld_value(fldvID)
move = ""
if j != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldvaluescore?colID=%s&ln=%s&id_1=%s&id_fldvalue_1=%s&id_fldvalue_2=%s&rand=%s#7.3"><img border="0" src="%s/img/smallup.gif" title="Move up"></a>""" % (CFG_SITE_URL, colID, ln, fldID, fldvID, col_fld[j - 1][1], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
j += 1
if j != len(col_fld):
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfldvaluescore?colID=%s&ln=%s&id_1=%s&id_fldvalue_1=%s&id_fldvalue_2=%s&rand=%s#7.3"><img border="0" src="%s/img/smalldown.gif" title="Move down"></a>""" % (CFG_SITE_URL, colID, ln, fldID, fldvID, col_fld[j][1], random.randint(0, 1000), CFG_SITE_URL)
if fieldvalue[0][1] != fieldvalue[0][2] and fldvID is not None:
actions.append([move, "%s - %s" % (fieldvalue[0][1], fieldvalue[0][2])])
elif fldvID is not None:
actions.append([move, "%s" % fieldvalue[0][1]])
move = ''
for col in [(('Modify value', 'modifyfieldvalue'), ('Remove value', 'removefieldvalue'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fldvID=%s&fmeth=seo#7.4">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, fldID, fldvID, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fldID=%s&fldvID=%s#7.4">%s</a>' % (CFG_SITE_URL, function, colID, ln, fldID, fldvID, str)
output += tupletotable(header=header, tuple=actions)
output += content
body = [output]
output = "<br />" + addadminbox(subtitle, body)
if len(col_fld) == 0:
output = content
return perform_showsearchoptions(colID, ln, content=output)
def perform_showoutputformats(colID, ln, callback='yes', content='', confirm=-1):
"""shows the outputformats of the current collection
colID - the collection id."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
subtitle = """<a name="10">10. Modify output formats for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.10">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """
<dl>
<dt>Output format actions (not specific to the chosen collection)
<dd>Go to the BibFormat interface to modify</dd>
<dt>Collection specific actions
<dd><a href="addexistingoutputformat?colID=%s&ln=%s#10.2">Add existing output format to collection</a></dd>
</dl>
""" % (colID, ln)
header = ['', 'Code', 'Output format', 'Actions']
actions = []
col_fmt = get_col_fmt(colID)
fmt_dict = dict(get_def_name('', "format"))
i = 0
if len(col_fmt) > 0:
for (id_format, colID_fld, code, score) in col_fmt:
move = """<table cellspacing="1" cellpadding="0" border="0"><tr><td>"""
if i != 0:
move += """<a href="%s/admin/websearch/websearchadmin.py/switchfmtscore?colID=%s&ln=%s&type=format&id_1=%s&id_2=%s&rand=%s#10"><img border="0" src="%s/img/smallup.gif" title="Move format up"></a>""" % (CFG_SITE_URL, colID, ln, id_format, col_fmt[i - 1][0], random.randint(0, 1000), CFG_SITE_URL)
else:
move += " "
move += "</td><td>"
i += 1
if i != len(col_fmt):
move += '<a href="%s/admin/websearch/websearchadmin.py/switchfmtscore?colID=%s&ln=%s&type=format&id_1=%s&id_2=%s&rand=%s#10"><img border="0" src="%s/img/smalldown.gif" title="Move format down"></a>' % (CFG_SITE_URL, colID, ln, id_format, col_fmt[i][0], random.randint(0, 1000), CFG_SITE_URL)
move += """</td></tr></table>"""
actions.append([move, code, fmt_dict[int(id_format)]])
for col in [(('Remove', 'removeoutputformat'),)]:
actions[-1].append('<a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fmtID=%s#10">%s</a>' % (CFG_SITE_URL, col[0][1], colID, ln, id_format, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/websearch/websearchadmin.py/%s?colID=%s&ln=%s&fmtID=%s#10">%s</a>' % (CFG_SITE_URL, function, colID, ln, id_format, str)
output += tupletotable(header=header, tuple=actions)
else:
output += """No output formats exists for this collection"""
output += content
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_showoutputformats", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def external_collections_build_select(colID, external_collection):
output = '<select name="state" class="admin_w200">'
if external_collection.parser:
max_state = 4
else:
max_state = 2
num_selected = external_collection_get_state(external_collection, colID)
for num in range(max_state):
state_name = CFG_EXTERNAL_COLLECTION_STATES_NAME[num]
if num == num_selected:
selected = ' selected'
else:
selected = ''
output += '<option value="%(num)d"%(selected)s>%(state_name)s</option>' % {'num': num, 'selected': selected, 'state_name': state_name}
output += '</select>\n'
return output
def perform_manage_external_collections(colID, ln, callback='yes', content='', confirm=-1):
"""Show the interface to configure external collections to the user."""
colID = int(colID)
subtitle = """<a name="11">11. Configuration of related external collections</a>
<small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.11">?</a>]</small>""" % CFG_SITE_URL
output = '<form action="update_external_collections" method="POST"><input type="hidden" name="colID" value="%(colID)d">' % {'colID': colID}
table_header = ['External collection', 'Mode', 'Apply also to daughter collections?']
table_content = []
external_collections = external_collection_sort_engine_by_name(external_collections_dictionary.values())
for external_collection in external_collections:
collection_name = external_collection.name
select = external_collections_build_select(colID, external_collection)
recurse = '<input type=checkbox name="recurse" value="%(collection_name)s">' % {'collection_name': collection_name}
table_content.append([collection_name, select, recurse])
output += tupletotable(header=table_header, tuple=table_content)
output += '<input class="adminbutton" type="submit" value="Modify"/>'
output += '</form>'
return addadminbox(subtitle, [output])
def perform_update_external_collections(colID, ln, state_list, recurse_list):
colID = int(colID)
changes = []
output = ""
if not state_list:
return 'Warning : No state found.<br />' + perform_manage_external_collections(colID, ln)
external_collections = external_collection_sort_engine_by_name(external_collections_dictionary.values())
if len(external_collections) != len(state_list):
return 'Warning : Size of state_list different from external_collections!<br />' + perform_manage_external_collections(colID, ln)
for (external_collection, state) in zip(external_collections, state_list):
state = int(state)
collection_name = external_collection.name
recurse = recurse_list and collection_name in recurse_list
oldstate = external_collection_get_state(external_collection, colID)
if oldstate != state or recurse:
changes += external_collection_get_update_state_list(external_collection, colID, state, recurse)
external_collection_apply_changes(changes)
return output + '<br /><br />' + perform_manage_external_collections(colID, ln)
def perform_showdetailedrecordoptions(colID, ln, callback='yes', content='', confirm=-1):
"""Show the interface to configure detailed record page to the user."""
colID = int(colID)
subtitle = """<a name="12">12. Configuration of detailed record page</a>
<small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.12">?</a>]</small>""" % CFG_SITE_URL
output = '''<form action="update_detailed_record_options" method="post">
<table><tr><td>
<input type="hidden" name="colID" value="%(colID)d">
<dl>
<dt><b>Show tabs:</b></dt>
<dd>
''' % {'colID': colID}
for (tab_id, tab_info) in get_detailed_page_tabs(colID).iteritems():
if tab_id == 'comments' and \
not CFG_WEBCOMMENT_ALLOW_REVIEWS and \
not CFG_WEBCOMMENT_ALLOW_COMMENTS:
continue
check = ''
output += '''<input type="checkbox" id="id%(tabid)s" name="tabs" value="%(tabid)s" %(check)s />
<label for="id%(tabid)s"> %(label)s</label><br />
''' % {'tabid':tab_id,
'check':((tab_info['visible'] and 'checked="checked"') or ''),
'label':tab_info['label']}
output += '</dd></dl></td><td>'
output += '</td></tr></table><input class="adminbutton" type="submit" value="Modify"/>'
output += '''<input type="checkbox" id="recurse" name="recurse" value="1" />
<label for="recurse"> Also apply to subcollections</label>'''
output += '</form>'
return addadminbox(subtitle, [output])
def perform_update_detailed_record_options(colID, ln, tabs, recurse):
"""Update the preferences for the tab to show/hide in the detailed record page."""
colID = int(colID)
changes = []
output = '<b><span class="info">Operation successfully completed.</span></b>'
if '' in tabs:
tabs.remove('')
tabs.append('metadata')
def update_settings(colID, tabs, recurse):
run_sql("DELETE FROM collectiondetailedrecordpagetabs WHERE id_collection=%s", (colID, ))
run_sql("REPLACE INTO collectiondetailedrecordpagetabs" + \
" SET id_collection=%s, tabs=%s", (colID, ';'.join(tabs)))
## for enabled_tab in tabs:
## run_sql("REPLACE INTO collectiondetailedrecordpagetabs" + \
## " SET id_collection='%s', tabs='%s'" % (colID, ';'.join(tabs)))
if recurse:
for descendant_id in get_collection_descendants(colID):
update_settings(descendant_id, tabs, recurse)
update_settings(colID, tabs, recurse)
## for colID in colIDs:
## run_sql("DELETE FROM collectiondetailedrecordpagetabs WHERE id_collection='%s'" % colID) # kwalitee: disable=sql
## for enabled_tab in tabs:
## run_sql("REPLACE INTO collectiondetailedrecordpagetabs" + \
## " SET id_collection='%s', tabs='%s'" % (colID, ';'.join(tabs)))
#if callback:
return perform_editcollection(colID, ln, "perform_modifytranslations",
'<br /><br />' + output + '<br /><br />' + \
perform_showdetailedrecordoptions(colID, ln))
#else:
# return addadminbox(subtitle, body)
#return output + '<br /><br />' + perform_showdetailedrecordoptions(colID, ln)
def perform_addexistingoutputformat(colID, ln, fmtID=-1, callback='yes', confirm=-1):
"""form to add an existing output format to a collection.
colID - the collection the format should be added to
fmtID - the format to add."""
subtitle = """<a name="10.2"></a>Add existing output format to collection"""
output = ""
if fmtID not in [-1, "-1"] and confirm in [1, "1"]:
ares = add_col_fmt(colID, fmtID)
colID = int(colID)
res = get_def_name('', "format")
fmt_dict = dict(res)
col_dict = dict(get_def_name('', "collection"))
col_fmt = get_col_fmt(colID)
col_fmt = dict(map(lambda x: (x[0], x[2]), col_fmt))
if len(res) > 0:
text = """
<span class="adminlabel">Output format</span>
<select name="fmtID" class="admin_w200">
<option value="-1">- Select output format -</option>
"""
for (id, name) in res:
if not col_fmt.has_key(id):
text += """<option value="%s" %s>%s</option>
""" % (id, id == int(fmtID) and 'selected="selected"' or '', name)
text += """</select><br />
"""
output += createhiddenform(action="addexistingoutputformat#10.2",
text=text,
button="Add",
colID=colID,
ln=ln,
confirm=1)
else:
output = """No existing output formats to add, please create a new one."""
if fmtID not in [-1, "-1"] and confirm in [1, "1"]:
output += write_outcome(ares)
elif fmtID in [-1, "-1"] and confirm not in [-1, "-1"]:
output += """<b><span class="info">Please select output format.</span></b>"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showoutputformats(colID, ln, content=output)
def perform_deleteoutputformat(colID, ln, fmtID=-1, callback='yes', confirm=-1):
"""form to delete an output format not in use.
colID - the collection id of the current collection.
fmtID - the format id to delete."""
subtitle = """<a name="10.3"></a>Delete an unused output format"""
output = """
<dl>
<dd>Deleting an output format will also delete the translations associated.</dd>
</dl>
"""
colID = int(colID)
if fmtID not in [-1, "-1"] and confirm in [1, "1"]:
fmt_dict = dict(get_def_name('', "format"))
old_colNAME = fmt_dict[int(fmtID)]
ares = delete_fmt(int(fmtID))
res = get_def_name('', "format")
fmt_dict = dict(res)
col_dict = dict(get_def_name('', "collection"))
col_fmt = get_col_fmt()
col_fmt = dict(map(lambda x: (x[0], x[2]), col_fmt))
if len(res) > 0:
text = """
<span class="adminlabel">Output format</span>
<select name="fmtID" class="admin_w200">
"""
text += """<option value="-1">- Select output format -"""
for (id, name) in res:
if not col_fmt.has_key(id):
text += """<option value="%s" %s>%s""" % (id, id == int(fmtID) and 'selected="selected"' or '', name)
text += "</option>"
text += """</select><br />"""
output += createhiddenform(action="deleteoutputformat#10.3",
text=text,
button="Delete",
colID=colID,
ln=ln,
confirm=0)
if fmtID not in [-1, "-1"]:
fmtID = int(fmtID)
if confirm in [0, "0"]:
text = """<b>Do you want to delete the output format '%s'.</b>
""" % fmt_dict[fmtID]
output += createhiddenform(action="deleteoutputformat#10.3",
text=text,
button="Confirm",
colID=colID,
fmtID=fmtID,
ln=ln,
confirm=1)
elif confirm in [1, "1"]:
output += write_outcome(ares)
elif confirm not in [-1, "-1"]:
output += """<b><span class="info">Choose a output format to delete.</span></b>
"""
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showoutputformats(colID, ln, content=output)
def perform_removeoutputformat(colID, ln, fmtID='', callback='yes', confirm=0):
"""form to remove an output format from a collection.
colID - the collection id of the current collection.
fmtID - the format id.
"""
subtitle = """<a name="10.5"></a>Remove output format"""
output = ""
col_dict = dict(get_def_name('', "collection"))
fmt_dict = dict(get_def_name('', "format"))
if colID and fmtID:
colID = int(colID)
fmtID = int(fmtID)
if confirm in ["0", 0]:
text = """Do you want to remove the output format '%s' from the collection '%s'.""" % (fmt_dict[fmtID], col_dict[colID])
output += createhiddenform(action="removeoutputformat#10.5",
text=text,
button="Confirm",
colID=colID,
fmtID=fmtID,
confirm=1)
elif confirm in ["1", 1]:
res = remove_fmt(colID, fmtID)
output += write_outcome(res)
body = [output]
output = "<br />" + addadminbox(subtitle, body)
return perform_showoutputformats(colID, ln, content=output)
def perform_index(colID=1, ln=CFG_SITE_LANG, mtype='', content='', confirm=0):
"""The index method, calling methods to show the collection tree, create new collections and add collections to tree.
"""
subtitle = "Overview"
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
output = ""
fin_output = ""
if not col_dict.has_key(1):
res = add_col(CFG_SITE_NAME, '')
if res:
fin_output += """<b><span class="info">Created root collection.</span></b><br />"""
else:
return "Cannot create root collection, please check database."
if CFG_SITE_NAME != run_sql("SELECT name from collection WHERE id=1")[0][0]:
res = run_sql("update collection set name=%s where id=1", (CFG_SITE_NAME, ))
if res:
fin_output += """<b><span class="info">The name of the root collection has been modified to be the same as the %(sitename)s installation name given prior to installing %(sitename)s.</span><b><br />""" % {'sitename' : CFG_SITE_NAME}
else:
return "Error renaming root collection."
fin_output += """
<table>
<tr>
<td>0. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_showall">Show all</a></small></td>
<td>1. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_addcollection">Create new collection</a></small></td>
<td>2. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_addcollectiontotree">Attach collection to tree</a></small></td>
<td>3. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_modifycollectiontree">Modify collection tree</a></small></td>
<td>4. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_checkwebcollstatus">Webcoll Status</a></small></td>
</tr><tr>
<td>5. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_checkcollectionstatus">Collection Status</a></small></td>
<td>6. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_checkexternalcollections">Check external collections</a></small></td>
<td>7. <small><a href="%s/admin/websearch/websearchadmin.py?colID=%s&ln=%s&mtype=perform_checksearchservices">Search services</a></small></td>
<td>8. <small><a href="%s/help/admin/websearch-admin-guide?ln=%s">Guide</a></small></td>
</tr>
</table>
""" % (CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, colID, ln, CFG_SITE_URL, ln)
if mtype == "":
fin_output += """<br /><br /><b><span class="info">To manage the collections, select an item from the menu.</span><b><br />"""
if mtype == "perform_addcollection" and content:
fin_output += content
elif mtype == "perform_addcollection" or mtype == "perform_showall":
fin_output += perform_addcollection(colID=colID, ln=ln, callback='')
fin_output += "<br />"
if mtype == "perform_addcollectiontotree" and content:
fin_output += content
elif mtype == "perform_addcollectiontotree" or mtype == "perform_showall":
fin_output += perform_addcollectiontotree(colID=colID, ln=ln, callback='')
fin_output += "<br />"
if mtype == "perform_modifycollectiontree" and content:
fin_output += content
elif mtype == "perform_modifycollectiontree" or mtype == "perform_showall":
fin_output += perform_modifycollectiontree(colID=colID, ln=ln, callback='')
fin_output += "<br />"
if mtype == "perform_checkwebcollstatus" and content:
fin_output += content
elif mtype == "perform_checkwebcollstatus" or mtype == "perform_showall":
fin_output += perform_checkwebcollstatus(colID, ln, callback='')
if mtype == "perform_checkcollectionstatus" and content:
fin_output += content
elif mtype == "perform_checkcollectionstatus" or mtype == "perform_showall":
fin_output += perform_checkcollectionstatus(colID, ln, callback='')
if mtype == "perform_checkexternalcollections" and content:
fin_output += content
elif mtype == "perform_checkexternalcollections" or mtype == "perform_showall":
fin_output += perform_checkexternalcollections(colID, ln, callback='')
if mtype == "perform_checksearchservices" and content:
fin_output += content
elif mtype == "perform_checksearchservices" or mtype == "perform_showall":
fin_output += perform_checksearchservices(colID, ln, callback='')
body = [fin_output]
body = [fin_output]
return addadminbox('<b>Menu</b>', body)
def show_coll_not_in_tree(colID, ln, col_dict):
"""Returns collections not in tree"""
tree = get_col_tree(colID)
in_tree = {}
output = "These collections are not in the tree, and should be added:<br />"
for (id, up, down, dad, reltype) in tree:
in_tree[id] = 1
in_tree[dad] = 1
res = run_sql("SELECT id from collection")
if len(res) != len(in_tree):
for id in res:
if not in_tree.has_key(id[0]):
output += """<a href="%s/admin/websearch/websearchadmin.py/editcollection?colID=%s&ln=%s" title="Edit collection">%s</a> ,
""" % (CFG_SITE_URL, id[0], ln, col_dict[id[0]])
output += "<br /><br />"
else:
output = ""
return output
def create_colltree(tree, col_dict, colID, ln, move_from='', move_to='', rtype='', edit=''):
"""Creates the presentation of the collection tree, with the buttons for modifying it.
tree - the tree to present, from get_tree()
col_dict - the name of the collections in a dictionary
colID - the collection id to start with
move_from - if a collection to be moved has been chosen
move_to - the collection which should be set as father of move_from
rtype - the type of the tree, regular or virtual
edit - if the method should output the edit buttons."""
if move_from:
move_from_rtype = move_from[0]
move_from_id = int(move_from[1:len(move_from)])
tree_from = get_col_tree(colID, move_from_rtype)
tree_to = get_col_tree(colID, rtype)
tables = 0
tstack = []
i = 0
text = """
<table border ="0" cellspacing="0" cellpadding="0">"""
for i in range(0, len(tree)):
id_son = tree[i][0]
up = tree[i][1]
down = tree[i][2]
dad = tree[i][3]
reltype = tree[i][4]
tmove_from = ""
j = i
while j > 0:
j = j - 1
try:
if tstack[j][1] == dad:
table = tstack[j][2]
for k in range(0, tables - table):
tables = tables - 1
text += """</table></td></tr>
"""
break
except StandardError, e:
pass
text += """<tr><td>
"""
if i > 0 and tree[i][1] == 0:
tables = tables + 1
text += """</td><td></td><td></td><td></td><td><table border="0" cellspacing="0" cellpadding="0"><tr><td>
"""
if i == 0:
tstack.append((id_son, dad, 1))
else:
tstack.append((id_son, dad, tables))
if up == 1 and edit:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_up=%s&rtype=%s#%s"><img border="0" src="%s/img/smallup.gif" title="Move collection up"></a>""" % (CFG_SITE_URL, colID, ln, i, rtype, tree[i][0], CFG_SITE_URL)
else:
text += """ """
text += "</td><td>"
if down == 1 and edit:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_down=%s&rtype=%s#%s"><img border="0" src="%s/img/smalldown.gif" title="Move collection down"></a>""" % (CFG_SITE_URL, colID, ln, i, rtype, tree[i][0], CFG_SITE_URL)
else:
text += """ """
text += "</td><td>"
if edit:
if move_from and move_to:
tmove_from = move_from
move_from = ''
if not (move_from == "" and i == 0) and not (move_from != "" and int(move_from[1:len(move_from)]) == i and rtype == move_from[0]):
check = "true"
if move_from:
#if tree_from[move_from_id][0] == tree_to[i][0] or not check_col(tree_to[i][0], tree_from[move_from_id][0]):
# check = ''
#elif not check_col(tree_to[i][0], tree_from[move_from_id][0]):
# check = ''
#if not check and (tree_to[i][0] == 1 and tree_from[move_from_id][3] == tree_to[i][0] and move_from_rtype != rtype):
# check = "true"
if check:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_from=%s&move_to=%s%s&rtype=%s#tree"><img border="0" src="%s/img/move_to.gif" title="Move '%s' to '%s'"></a>
""" % (CFG_SITE_URL, colID, ln, move_from, rtype, i, rtype, CFG_SITE_URL, col_dict[tree_from[int(move_from[1:len(move_from)])][0]], col_dict[tree_to[i][0]])
else:
try:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&move_from=%s%s&rtype=%s#%s"><img border="0" src="%s/img/move_from.gif" title="Move '%s' from this location."></a>""" % (CFG_SITE_URL, colID, ln, rtype, i, rtype, tree[i][0], CFG_SITE_URL, col_dict[tree[i][0]])
except KeyError:
pass
else:
text += """<img border="0" src="%s/img/white_field.gif">
""" % CFG_SITE_URL
else:
text += """<img border="0" src="%s/img/white_field.gif">
""" % CFG_SITE_URL
text += """
</td>
<td>"""
if edit:
try:
text += """<a href="%s/admin/websearch/websearchadmin.py/modifycollectiontree?colID=%s&ln=%s&delete=%s&rtype=%s#%s"><img border="0" src="%s/img/iconcross.gif" title="Remove colletion from tree"></a>""" % (CFG_SITE_URL, colID, ln, i, rtype, tree[i][0], CFG_SITE_URL)
except KeyError:
pass
elif i != 0:
text += """<img border="0" src="%s/img/white_field.gif">
""" % CFG_SITE_URL
text += """</td><td>
"""
if tmove_from:
move_from = tmove_from
try:
text += """<a name="%s"></a>%s<a href="%s/admin/websearch/websearchadmin.py/editcollection?colID=%s&ln=%s" title="Edit collection">%s</a>%s%s%s""" % (tree[i][0], (reltype=="v" and '<i>' or ''), CFG_SITE_URL, tree[i][0], ln, col_dict[id_son], (move_to=="%s%s" %(rtype, i) and ' <img border="0" src="%s/img/move_to.gif">' % CFG_SITE_URL or ''), (move_from=="%s%s" % (rtype, i) and ' <img border="0" src="%s/img/move_from.gif">' % CFG_SITE_URL or ''), (reltype=="v" and '</i>' or ''))
except KeyError:
pass
text += """</td></tr>
"""
while tables > 0:
text += """</table></td></tr>
"""
tables = tables - 1
text += """</table>"""
return text
def perform_deletecollection(colID, ln, confirm=-1, callback='yes'):
"""form to delete a collection
colID - id of collection
"""
subtitle =''
output = """
<span class="warning">
<strong>
<dl>
<dt>WARNING:</dt>
<dd>When deleting a collection, you also deletes all data related to the collection like translations, relations to other collections and information about which rank methods to use.
<br />For more information, please go to the <a title="See guide" href="%s/help/admin/websearch-admin-guide">WebSearch guide</a> and read the section regarding deleting a collection.</dd>
</dl>
</strong>
</span>
""" % CFG_SITE_URL
col_dict = dict(get_def_name('', "collection"))
if colID != 1 and colID and col_dict.has_key(int(colID)):
colID = int(colID)
subtitle = """<a name="4">4. Delete collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.4">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
res = run_sql("SELECT id_dad,id_son,type,score from collection_collection WHERE id_dad=%s", (colID, ))
res2 = run_sql("SELECT id_dad,id_son,type,score from collection_collection WHERE id_son=%s", (colID, ))
if not res and not res2:
if confirm in ["-1", -1]:
text = """Do you want to delete this collection."""
output += createhiddenform(action="deletecollection#4",
text=text,
colID=colID,
button="Delete",
confirm=0)
elif confirm in ["0", 0]:
text = """Are you sure you want to delete this collection."""
output += createhiddenform(action="deletecollection#4",
text=text,
colID=colID,
button="Confirm",
confirm=1)
elif confirm in ["1", 1]:
result = delete_col(colID)
if not result:
raise Exception
else:
output = """<b><span class="info">Can not delete a collection that is a part of the collection tree, remove collection from the tree and try again.</span></b>"""
else:
subtitle = """4. Delete collection"""
output = """<b><span class="info">Not possible to delete the root collection</span></b>"""
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_deletecollection", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_editcollection(colID=1, ln=CFG_SITE_LANG, mtype='', content=''):
"""interface to modify a collection. this method is calling other methods which again is calling this and sending back the output of the method.
if callback, the method will call perform_editcollection, if not, it will just return its output.
colID - id of the collection
mtype - the method that called this method.
content - the output from that method."""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
if not col_dict.has_key(colID):
return """<b><span class="info">Collection deleted.</span></b>
"""
fin_output = """
<table>
<tr>
<td><b>Menu</b></td>
</tr>
<tr>
<td>0. <small><a href="editcollection?colID=%s&ln=%s">Show all</a></small></td>
<td>1. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifydbquery">Modify collection query</a></small></td>
<td>2. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifyrestricted">Modify access restrictions</a></small></td>
<td>3. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifytranslations">Modify translations</a></small></td>
<td>4. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_deletecollection">Delete collection</a></small></td>
</tr><tr>
<td>5. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showportalboxes">Modify portalboxes</a></small></td>
<td>6. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showsearchfields#6">Modify search fields</a></small></td>
<td>7. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showsearchoptions#7">Modify search options</a></small></td>
<td>8. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showsortoptions#8">Modify sort options</a></small></td>
<td>9. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_modifyrankmethods#9">Modify rank options</a></small></td>
</tr><tr>
<td>10. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showoutputformats#10">Modify output formats</a></small></td>
<td>11. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_manage_external_collections#11">Configuration of related external collections</a></small></td>
<td>12. <small><a href="editcollection?colID=%s&ln=%s&mtype=perform_showdetailedrecordoptions#12">Detailed record page options</a></small></td>
</tr>
</table>
""" % (colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln, colID, ln)
if mtype == "perform_modifydbquery" and content:
fin_output += content
elif mtype == "perform_modifydbquery" or not mtype:
fin_output += perform_modifydbquery(colID, ln, callback='')
if mtype == "perform_modifyrestricted" and content:
fin_output += content
elif mtype == "perform_modifyrestricted" or not mtype:
fin_output += perform_modifyrestricted(colID, ln, callback='')
if mtype == "perform_modifytranslations" and content:
fin_output += content
elif mtype == "perform_modifytranslations" or not mtype:
fin_output += perform_modifytranslations(colID, ln, callback='')
if mtype == "perform_deletecollection" and content:
fin_output += content
elif mtype == "perform_deletecollection" or not mtype:
fin_output += perform_deletecollection(colID, ln, callback='')
if mtype == "perform_showportalboxes" and content:
fin_output += content
elif mtype == "perform_showportalboxes" or not mtype:
fin_output += perform_showportalboxes(colID, ln, callback='')
if mtype == "perform_showsearchfields" and content:
fin_output += content
elif mtype == "perform_showsearchfields" or not mtype:
fin_output += perform_showsearchfields(colID, ln, callback='')
if mtype == "perform_showsearchoptions" and content:
fin_output += content
elif mtype == "perform_showsearchoptions" or not mtype:
fin_output += perform_showsearchoptions(colID, ln, callback='')
if mtype == "perform_showsortoptions" and content:
fin_output += content
elif mtype == "perform_showsortoptions" or not mtype:
fin_output += perform_showsortoptions(colID, ln, callback='')
if mtype == "perform_modifyrankmethods" and content:
fin_output += content
elif mtype == "perform_modifyrankmethods" or not mtype:
fin_output += perform_modifyrankmethods(colID, ln, callback='')
if mtype == "perform_showoutputformats" and content:
fin_output += content
elif mtype == "perform_showoutputformats" or not mtype:
fin_output += perform_showoutputformats(colID, ln, callback='')
if mtype == "perform_manage_external_collections" and content:
fin_output += content
elif mtype == "perform_manage_external_collections" or not mtype:
fin_output += perform_manage_external_collections(colID, ln, callback='')
if mtype == "perform_showdetailedrecordoptions" and content:
fin_output += content
elif mtype == "perform_showdetailedrecordoptions" or not mtype:
fin_output += perform_showdetailedrecordoptions(colID, ln, callback='')
return addadminbox("Overview of edit options for collection '%s'" % col_dict[colID], [fin_output])
def perform_checkwebcollstatus(colID, ln, confirm=0, callback='yes'):
"""Check status of the collection tables with respect to the webcoll cache."""
subtitle = """<a name="11"></a>Webcoll Status [<a href="%s/help/admin/websearch-admin-guide#5">?</a>]""" % CFG_SITE_URL
output = ""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
output += """<br /><b>Last updates:</b><br />"""
collection_table_update_time = ""
collection_web_update_time = ""
collection_table_update_time = get_table_update_time('collection')
output += "Collection table last updated: %s<br />" % collection_table_update_time
try:
file = open("%s/collections/last_updated" % CFG_CACHEDIR)
collection_web_update_time = file.readline().strip()
output += "Collection cache last updated: %s<br />" % collection_web_update_time
file.close()
except:
pass
# reformat collection_web_update_time to the format suitable for comparisons
try:
collection_web_update_time = strftime("%Y-%m-%d %H:%M:%S",
time.strptime(collection_web_update_time, "%d %b %Y %H:%M:%S"))
except ValueError, e:
pass
if collection_table_update_time > collection_web_update_time:
output += """<br /><b><span class="info">Warning: The collections have been modified since last time Webcoll was executed, to process the changes, Webcoll must be executed.</span></b><br />"""
header = ['ID', 'Name', 'Time', 'Status', 'Progress']
actions = []
output += """<br /><b>Last BibSched tasks:</b><br />"""
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='webcoll' and runtime< now() ORDER by runtime")
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[len(res) - 1]
webcoll__update_time = runtime
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
else:
actions.append(['', 'webcoll', '', '', 'Not executed yet'])
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='bibindex' and runtime< now() ORDER by runtime")
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[len(res) - 1]
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
else:
actions.append(['', 'bibindex', '', '', 'Not executed yet'])
output += tupletotable(header=header, tuple=actions)
output += """<br /><b>Next scheduled BibSched run:</b><br />"""
actions = []
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='webcoll' and runtime > now() ORDER by runtime")
webcoll_future = ""
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[0]
webcoll__update_time = runtime
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
webcoll_future = "yes"
else:
actions.append(['', 'webcoll', '', '', 'Not scheduled'])
res = run_sql("select id, proc, host, user, runtime, sleeptime, arguments, status, progress from schTASK where proc='bibindex' and runtime > now() ORDER by runtime")
bibindex_future = ""
if len(res) > 0:
(id, proc, host, user, runtime, sleeptime, arguments, status, progress) = res[0]
actions.append([id, proc, runtime, (status !="" and status or ''), (progress !="" and progress or '')])
bibindex_future = "yes"
else:
actions.append(['', 'bibindex', '', '', 'Not scheduled'])
output += tupletotable(header=header, tuple=actions)
if webcoll_future == "":
output += """<br /><b><span class="info">Warning: Webcoll is not scheduled for a future run by bibsched, any updates to the collection will not be processed.</span></b><br />"""
if bibindex_future == "":
output += """<br /><b><span class="info">Warning: Bibindex is not scheduled for a future run by bibsched, any updates to the records will not be processed.</span></b><br />"""
body = [output]
if callback:
return perform_index(colID, ln, "perform_checkwebcollstatus", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_modifyrestricted(colID, ln, rest='', callback='yes', confirm=-1):
"""modify which apache group is allowed to access the collection.
rest - the groupname"""
subtitle = ''
output = ""
col_dict = dict(get_def_name('', "collection"))
action_id = acc_get_action_id(VIEWRESTRCOLL)
if colID and col_dict.has_key(int(colID)):
colID = int(colID)
subtitle = """<a name="2">2. Modify access restrictions for collection '%s'</a> <small>[<a title="See guide" href="%s/help/admin/websearch-admin-guide#3.2">?</a>]</small>""" % (col_dict[colID], CFG_SITE_URL)
output = """<p>Please note that Invenio versions greater than <em>0.92.1</em> manage collection restriction via the standard
<strong><a href="/admin/webaccess/webaccessadmin.py/showactiondetails?id_action=%i">WebAccess Admin Interface</a></strong> (action '%s').</p>
""" % (action_id, VIEWRESTRCOLL)
body = [output]
if callback:
return perform_editcollection(colID, ln, "perform_modifyrestricted", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_checkcollectionstatus(colID, ln, confirm=0, callback='yes'):
"""Check the configuration of the collections."""
from invenio.search_engine import collection_restricted_p, restricted_collection_cache
subtitle = """<a name="11"></a>Collection Status [<a href="%s/help/admin/websearch-admin-guide#6">?</a>]""" % CFG_SITE_URL
output = ""
colID = int(colID)
col_dict = dict(get_def_name('', "collection"))
collections = run_sql("SELECT id, name, dbquery, nbrecs FROM collection "
"ORDER BY id")
header = ['ID', 'Name','Query', 'Subcollections', 'Restricted', 'Hosted',
'I18N', 'Status', 'Number of records']
rnk_list = get_def_name('', "rnkMETHOD")
actions = []
restricted_collection_cache.recreate_cache_if_needed()
for (id, name, dbquery, nbrecs) in collections:
reg_sons = col_has_son(id, 'r')
vir_sons = col_has_son(id, 'v')
status = ""
hosted = ""
if str(dbquery).startswith("hostedcollection:"): hosted = """<b><span class="info">Yes</span></b>"""
else: hosted = """<b><span class="info">No</span></b>"""
langs = run_sql("SELECT ln from collectionname where id_collection=%s", (id, ))
i8n = ""
if len(langs) > 0:
for lang in langs:
i8n += "%s, " % lang
else:
i8n = """<b><span class="info">None</span></b>"""
if reg_sons and dbquery:
status = """<b><span class="warning">1:Conflict</span></b>"""
elif not dbquery and not reg_sons:
status = """<b><span class="warning">2:Empty</span></b>"""
if (reg_sons or vir_sons):
subs = """<b><span class="info">Yes</span></b>"""
else:
subs = """<b><span class="info">No</span></b>"""
if dbquery is None:
dbquery = """<b><span class="info">No</span></b>"""
restricted = collection_restricted_p(name, recreate_cache_if_needed=False)
if restricted:
restricted = """<b><span class="warning">Yes</span></b>"""
if status:
status += """<b><span class="warning">,3:Restricted</span></b>"""
else:
status += """<b><span class="warning">3:Restricted</span></b>"""
else:
restricted = """<b><span class="info">No</span></b>"""
if status == "":
status = """<b><span class="info">OK</span></b>"""
actions.append([id, """<a href="%s/admin/websearch/websearchadmin.py/editcollection?colID=%s&ln=%s">%s</a>""" % (CFG_SITE_URL, id, ln, name), dbquery, subs, restricted, hosted, i8n, status, nbrecs])
output += tupletotable(header=header, tuple=actions)
body = [output]
return addadminbox(subtitle, body)
if callback:
return perform_index(colID, ln, "perform_checkcollectionstatus", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_checkexternalcollections(colID, ln, icl=None, update="", confirm=0, callback='yes'):
"""Check the external collections for inconsistencies."""
subtitle = """<a name="7"></a>Check external collections [<a href="%s/help/admin/websearch-admin-guide#7">?</a>]""" % CFG_SITE_URL
output = ""
colID = int(colID)
if icl:
if update == "add":
# icl : the "inconsistent list" comes as a string, it has to be converted back into a list
icl = eval(icl)
#icl = icl[1:-1].split(',')
for collection in icl:
#collection = str(collection[1:-1])
query_select = "SELECT name FROM externalcollection WHERE name like '%(name)s';" % {'name': collection}
results_select = run_sql(query_select)
if not results_select:
query_insert = "INSERT INTO externalcollection (name) VALUES ('%(name)s');" % {'name': collection}
run_sql(query_insert)
output += """<br /><span class=info>New collection \"%s\" has been added to the database table \"externalcollection\".</span><br />""" % (collection)
else:
output += """<br /><span class=info>Collection \"%s\" has already been added to the database table \"externalcollection\" or was already there.</span><br />""" % (collection)
elif update == "del":
# icl : the "inconsistent list" comes as a string, it has to be converted back into a list
icl = eval(icl)
#icl = icl[1:-1].split(',')
for collection in icl:
#collection = str(collection[1:-1])
query_select = "SELECT id FROM externalcollection WHERE name like '%(name)s';" % {'name': collection}
results_select = run_sql(query_select)
if results_select:
query_delete = "DELETE FROM externalcollection WHERE id like '%(id)s';" % {'id': results_select[0][0]}
query_delete_states = "DELETE FROM collection_externalcollection WHERE id_externalcollection like '%(id)s';" % {'id': results_select[0][0]}
run_sql(query_delete)
run_sql(query_delete_states)
output += """<br /><span class=info>Collection \"%s\" has been deleted from the database table \"externalcollection\".</span><br />""" % (collection)
else:
output += """<br /><span class=info>Collection \"%s\" has already been delete from the database table \"externalcollection\" or was never there.</span><br />""" % (collection)
external_collections_file = []
external_collections_db = []
for coll in external_collections_dictionary.values():
external_collections_file.append(coll.name)
external_collections_file.sort()
query = """SELECT name from externalcollection"""
results = run_sql(query)
for result in results:
external_collections_db.append(result[0])
external_collections_db.sort()
number_file = len(external_collections_file)
number_db = len(external_collections_db)
if external_collections_file == external_collections_db:
output += """<br /><span class="info">External collections are consistent.</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections""" % {
"number_db" : number_db,
"number_file" : number_file}
elif len(external_collections_file) > len(external_collections_db):
external_collections_diff = list(set(external_collections_file) - set(external_collections_db))
external_collections_db.extend(external_collections_diff)
external_collections_db.sort()
if external_collections_file == external_collections_db:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections
(<span class="warning">missing: %(diff)s</span>)<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><a href="%(site_url)s/admin/websearch/websearchadmin.py/checkexternalcollections?colID=%(colID)s&icl=%(diff)s&update=add&ln=%(ln)s">
Click here</a> to update your database adding the missing collections. If the problem persists please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file,
"diff" : external_collections_diff,
"site_url" : CFG_SITE_URL,
"colID" : colID,
"ln" : ln}
else:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><span class="warning">The external collections do not match.</span>
<br />To fix the problem please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file}
elif len(external_collections_file) < len(external_collections_db):
external_collections_diff = list(set(external_collections_db) - set(external_collections_file))
external_collections_file.extend(external_collections_diff)
external_collections_file.sort()
if external_collections_file == external_collections_db:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections
(<span class="warning">extra: %(diff)s</span>)<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><a href="%(site_url)s/admin/websearch/websearchadmin.py/checkexternalcollections?colID=%(colID)s&icl=%(diff)s&update=del&ln=%(ln)s">
Click here</a> to force remove the extra collections from your database (warning: use with caution!). If the problem persists please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file,
"diff" : external_collections_diff,
"site_url" : CFG_SITE_URL,
"colID" : colID,
"ln" : ln}
else:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><span class="warning">The external collections do not match.</span>
<br />To fix the problem please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file}
else:
output += """<br /><span class="warning">There is an inconsistency:</span><br /><br />
- database table \"externalcollection\" has %(number_db)s collections<br />
- configuration file \"websearch_external_collections_config.py\" has %(number_file)s collections
<br /><br /><span class="warning">The number of external collections is the same but the collections do not match.</span>
<br />To fix the problem please check your configuration manually.""" % {
"number_db" : number_db,
"number_file" : number_file}
body = [output]
return addadminbox(subtitle, body)
if callback:
return perform_index(colID, ln, "perform_checkexternalcollections", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def perform_checksearchservices(colID, ln, icl=None, update="", confirm=0, callback='yes'):
"""Check the enabled search services, and possible errors"""
from invenio.pluginutils import PluginContainer
from invenio.websearch_services import CFG_SEARCH_SERVICES_PATH, \
__required_plugin_API_version__, \
SearchService
subtitle = """<a name="10"></a>Check search services [<a href="%s/help/admin/websearch-admin-guide#10">?</a>]""" % CFG_SITE_URL
output = ""
output += "<p>You can enable a search service by dropping the corresonpding plugin at <code>%s</code>.</p>" % \
cgi.escape(CFG_SEARCH_SERVICES_PATH)
search_service_plugins = PluginContainer(os.path.join(CFG_SEARCH_SERVICES_PATH, '*Service.py'),
api_version=__required_plugin_API_version__,
plugin_signature=SearchService)
output += "<br /><b>Enabled search services:</b><br />"
header = ['Service', 'Description', 'Status']
actions = []
for name, plugin in search_service_plugins.get_enabled_plugins().iteritems():
description = plugin().get_description()
actions.append((name, description, '<span style="color:#080">OK</a>'))
if actions:
output += tupletotable(header=header, tuple=actions)
else:
output += '<em style="color:#f80;font-size:small">No search service enabled</em>'
output += "<br /><b>Search services with errors:</b><br />"
header = ['Service', 'Error']
actions = []
for name, error in search_service_plugins.get_broken_plugins().iteritems():
actions.append((name, '<pre style="color:#800">' + cgi.escape(repr(error[0]) + " " + repr(error[1]) + "\n" + "\n".join(traceback.format_tb(error[2]))) + '</pre>'))
if actions:
output += tupletotable(header=header, tuple=actions)
else:
output += '<em style="color:#080;font-size:small">No error found</em>'
body = [output]
if callback:
return perform_index(colID, ln, "perform_checksearchservices", addadminbox(subtitle, body))
else:
return addadminbox(subtitle, body)
def col_has_son(colID, rtype='r'):
"""Return True if the collection has at least one son."""
return run_sql("SELECT id_son FROM collection_collection WHERE id_dad=%s and type=%s LIMIT 1", (colID, rtype)) != ()
def get_col_tree(colID, rtype=''):
"""Returns a presentation of the tree as a list. TODO: Add loop detection
colID - startpoint for the tree
rtype - get regular or virtual part of the tree"""
try:
colID = int(colID)
stack = [colID]
ssize = 0
tree = [(colID, 0, 0, colID, 'r')]
while len(stack) > 0:
ccolID = stack.pop()
if ccolID == colID and rtype:
res = run_sql("SELECT id_son, score, type FROM collection_collection WHERE id_dad=%s AND type=%s ORDER BY score ASC,id_son", (ccolID, rtype))
else:
res = run_sql("SELECT id_son, score, type FROM collection_collection WHERE id_dad=%s ORDER BY score ASC,id_son", (ccolID, ))
ssize += 1
ntree = []
for i in range(0, len(res)):
id_son = res[i][0]
score = res[i][1]
rtype = res[i][2]
stack.append(id_son)
if i == (len(res) - 1):
up = 0
else:
up = 1
if i == 0:
down = 0
else:
down = 1
ntree.insert(0, (id_son, up, down, ccolID, rtype))
tree = tree[0:ssize] + ntree + tree[ssize:len(tree)]
return tree
except StandardError, e:
register_exception()
return ()
def add_col_dad_son(add_dad, add_son, rtype):
"""Add a son to a collection (dad)
add_dad - add to this collection id
add_son - add this collection id
rtype - either regular or virtual"""
try:
res = run_sql("SELECT score FROM collection_collection WHERE id_dad=%s ORDER BY score ASC", (add_dad, ))
highscore = 0
for score in res:
if int(score[0]) > highscore:
highscore = int(score[0])
highscore += 1
res = run_sql("INSERT INTO collection_collection(id_dad,id_son,score,type) values(%s,%s,%s,%s)", (add_dad, add_son, highscore, rtype))
return (1, highscore)
except StandardError, e:
register_exception()
return (0, e)
def compare_on_val(first, second):
"""Compare the two values"""
return cmp(first[1], second[1])
def get_col_fld(colID=-1, type = '', id_field=''):
"""Returns either all portalboxes associated with a collection, or based on either colID or language or both.
colID - collection id
ln - language id"""
sql = "SELECT id_field,id_fieldvalue,type,score,score_fieldvalue FROM collection_field_fieldvalue, field WHERE id_field=field.id"
params = []
if colID > -1:
sql += " AND id_collection=%s"
params.append(colID)
if id_field:
sql += " AND id_field=%s"
params.append(id_field)
if type:
sql += " AND type=%s"
params.append(type)
sql += " ORDER BY type, score desc, score_fieldvalue desc"
res = run_sql(sql, tuple(params))
return res
def get_col_pbx(colID=-1, ln='', position = ''):
"""Returns either all portalboxes associated with a collection, or based on either colID or language or both.
colID - collection id
ln - language id"""
sql = "SELECT id_portalbox, id_collection, ln, score, position, title, body FROM collection_portalbox, portalbox WHERE id_portalbox = portalbox.id"
params = []
if colID > -1:
sql += " AND id_collection=%s"
params.append(colID)
if ln:
sql += " AND ln=%s"
params.append(ln)
if position:
sql += " AND position=%s"
params.append(position)
sql += " ORDER BY position, ln, score desc"
res = run_sql(sql, tuple(params))
return res
def get_col_fmt(colID=-1):
"""Returns all formats currently associated with a collection, or for one specific collection
colID - the id of the collection"""
if colID not in [-1, "-1"]:
res = run_sql("SELECT id_format, id_collection, code, score FROM collection_format, format WHERE id_format = format.id AND id_collection=%s ORDER BY score desc", (colID, ))
else:
res = run_sql("SELECT id_format, id_collection, code, score FROM collection_format, format WHERE id_format = format.id ORDER BY score desc")
return res
def get_col_rnk(colID, ln):
""" Returns a list of the rank methods the given collection is attached to
colID - id from collection"""
try:
res1 = dict(run_sql("SELECT id_rnkMETHOD, '' FROM collection_rnkMETHOD WHERE id_collection=%s", (colID, )))
res2 = get_def_name('', "rnkMETHOD")
result = filter(lambda x: res1.has_key(x[0]), res2)
return result
except StandardError, e:
return ()
def get_pbx():
"""Returns all portalboxes"""
res = run_sql("SELECT id, title, body FROM portalbox ORDER by title,body")
return res
def get_fld_value(fldvID = ''):
"""Returns fieldvalue"""
sql = "SELECT id, name, value FROM fieldvalue"
params = []
if fldvID:
sql += " WHERE id=%s"
params.append(fldvID)
sql += " ORDER BY name"
res = run_sql(sql, tuple(params))
return res
def get_pbx_pos():
"""Returns a list of all the positions for a portalbox"""
position = {}
position["rt"] = "Right Top"
position["lt"] = "Left Top"
position["te"] = "Title Epilog"
position["tp"] = "Title Prolog"
position["ne"] = "Narrow by coll epilog"
position["np"] = "Narrow by coll prolog"
return position
def get_sort_nametypes():
"""Return a list of the various translationnames for the fields"""
type = {}
type['soo'] = 'Sort options'
type['seo'] = 'Search options'
type['sew'] = 'Search within'
return type
def get_fmt_nametypes():
"""Return a list of the various translationnames for the output formats"""
type = []
type.append(('ln', 'Long name'))
return type
def get_fld_nametypes():
"""Return a list of the various translationnames for the fields"""
type = []
type.append(('ln', 'Long name'))
return type
def get_col_nametypes():
"""Return a list of the various translationnames for the collections"""
type = []
type.append(('ln', 'Collection name'))
return type
def find_last(tree, start_son):
"""Find the previous collection in the tree with the same father as start_son"""
id_dad = tree[start_son][3]
while start_son > 0:
start_son -= 1
if tree[start_son][3] == id_dad:
return start_son
def find_next(tree, start_son):
"""Find the next collection in the tree with the same father as start_son"""
id_dad = tree[start_son][3]
while start_son < len(tree):
start_son += 1
if tree[start_son][3] == id_dad:
return start_son
def remove_col_subcol(id_son, id_dad, type):
"""Remove a collection as a son of another collection in the tree, if collection isn't used elsewhere in the tree, remove all registered sons of the id_son.
id_son - collection id of son to remove
id_dad - the id of the dad"""
try:
if id_son != id_dad:
tree = get_col_tree(id_son)
run_sql("DELETE FROM collection_collection WHERE id_son=%s and id_dad=%s", (id_son, id_dad))
else:
tree = get_col_tree(id_son, type)
run_sql("DELETE FROM collection_collection WHERE id_son=%s and id_dad=%s and type=%s", (id_son, id_dad, type))
if not run_sql("SELECT id_dad,id_son,type,score from collection_collection WHERE id_son=%s and type=%s", (id_son, type)):
for (id, up, down, dad, rtype) in tree:
run_sql("DELETE FROM collection_collection WHERE id_son=%s and id_dad=%s", (id, dad))
return (1, "")
except StandardError, e:
return (0, e)
def check_col(add_dad, add_son):
"""Check if the collection can be placed as a son of the dad without causing loops.
add_dad - collection id
add_son - collection id"""
try:
stack = [add_dad]
res = run_sql("SELECT id_dad FROM collection_collection WHERE id_dad=%s AND id_son=%s", (add_dad, add_son))
if res:
raise StandardError
while len(stack) > 0:
colID = stack.pop()
res = run_sql("SELECT id_dad FROM collection_collection WHERE id_son=%s", (colID, ))
for id in res:
if int(id[0]) == int(add_son):
# raise StandardError # this was the original but it didnt work
return(0)
else:
stack.append(id[0])
return (1, "")
except StandardError, e:
return (0, e)
def attach_rnk_col(colID, rnkID):
"""attach rank method to collection
rnkID - id from rnkMETHOD table
colID - id of collection, as in collection table """
try:
res = run_sql("INSERT INTO collection_rnkMETHOD(id_collection, id_rnkMETHOD) values (%s,%s)", (colID, rnkID))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def detach_rnk_col(colID, rnkID):
"""detach rank method from collection
rnkID - id from rnkMETHOD table
colID - id of collection, as in collection table """
try:
res = run_sql("DELETE FROM collection_rnkMETHOD WHERE id_collection=%s AND id_rnkMETHOD=%s", (colID, rnkID))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def switch_col_treescore(col_1, col_2):
try:
res1 = run_sql("SELECT score FROM collection_collection WHERE id_dad=%s and id_son=%s", (col_1[3], col_1[0]))
res2 = run_sql("SELECT score FROM collection_collection WHERE id_dad=%s and id_son=%s", (col_2[3], col_2[0]))
res = run_sql("UPDATE collection_collection SET score=%s WHERE id_dad=%s and id_son=%s", (res2[0][0], col_1[3], col_1[0]))
res = run_sql("UPDATE collection_collection SET score=%s WHERE id_dad=%s and id_son=%s", (res1[0][0], col_2[3], col_2[0]))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def move_col_tree(col_from, col_to, move_to_rtype=''):
"""Move a collection from one point in the tree to another. becomes a son of the endpoint.
col_from - move this collection from current point
col_to - and set it as a son of this collection.
move_to_rtype - either virtual or regular collection"""
try:
res = run_sql("SELECT score FROM collection_collection WHERE id_dad=%s ORDER BY score asc", (col_to[0], ))
highscore = 0
for score in res:
if int(score[0]) > highscore:
highscore = int(score[0])
highscore += 1
if not move_to_rtype:
move_to_rtype = col_from[4]
res = run_sql("DELETE FROM collection_collection WHERE id_son=%s and id_dad=%s", (col_from[0], col_from[3]))
res = run_sql("INSERT INTO collection_collection(id_dad,id_son,score,type) values(%s,%s,%s,%s)", (col_to[0], col_from[0], highscore, move_to_rtype))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def remove_pbx(colID, pbxID, ln):
"""Removes a portalbox from the collection given.
colID - the collection the format is connected to
pbxID - the portalbox which should be removed from the collection.
ln - the language of the portalbox to be removed"""
try:
res = run_sql("DELETE FROM collection_portalbox WHERE id_collection=%s AND id_portalbox=%s AND ln=%s", (colID, pbxID, ln))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def remove_fmt(colID, fmtID):
"""Removes a format from the collection given.
colID - the collection the format is connected to
fmtID - the format which should be removed from the collection."""
try:
res = run_sql("DELETE FROM collection_format WHERE id_collection=%s AND id_format=%s", (colID, fmtID))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def remove_fld(colID, fldID, fldvID=''):
"""Removes a field from the collection given.
colID - the collection the format is connected to
fldID - the field which should be removed from the collection."""
try:
sql = "DELETE FROM collection_field_fieldvalue WHERE id_collection=%s AND id_field=%s"
params = [colID, fldID]
if fldvID:
if fldvID != "None":
sql += " AND id_fieldvalue=%s"
params.append(fldvID)
else:
sql += " AND id_fieldvalue is NULL"
res = run_sql(sql, tuple(params))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def delete_fldv(fldvID):
"""Deletes all data for the given fieldvalue
fldvID - delete all data in the tables associated with fieldvalue and this id"""
try:
res = run_sql("DELETE FROM collection_field_fieldvalue WHERE id_fieldvalue=%s", (fldvID, ))
res = run_sql("DELETE FROM fieldvalue WHERE id=%s", (fldvID, ))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def delete_pbx(pbxID):
"""Deletes all data for the given portalbox
pbxID - delete all data in the tables associated with portalbox and this id """
try:
res = run_sql("DELETE FROM collection_portalbox WHERE id_portalbox=%s", (pbxID, ))
res = run_sql("DELETE FROM portalbox WHERE id=%s", (pbxID, ))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def delete_fmt(fmtID):
"""Deletes all data for the given format
fmtID - delete all data in the tables associated with format and this id """
try:
res = run_sql("DELETE FROM format WHERE id=%s", (fmtID, ))
res = run_sql("DELETE FROM collection_format WHERE id_format=%s", (fmtID, ))
res = run_sql("DELETE FROM formatname WHERE id_format=%s", (fmtID, ))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def delete_col(colID):
"""Deletes all data for the given collection
colID - delete all data in the tables associated with collection and this id """
try:
res = run_sql("DELETE FROM collection WHERE id=%s", (colID, ))
res = run_sql("DELETE FROM collectionname WHERE id_collection=%s", (colID, ))
res = run_sql("DELETE FROM collection_rnkMETHOD WHERE id_collection=%s", (colID, ))
res = run_sql("DELETE FROM collection_collection WHERE id_dad=%s", (colID, ))
res = run_sql("DELETE FROM collection_collection WHERE id_son=%s", (colID, ))
res = run_sql("DELETE FROM collection_portalbox WHERE id_collection=%s", (colID, ))
res = run_sql("DELETE FROM collection_format WHERE id_collection=%s", (colID, ))
res = run_sql("DELETE FROM collection_field_fieldvalue WHERE id_collection=%s", (colID, ))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def add_fmt(code, name, rtype):
"""Add a new output format. Returns the id of the format.
code - the code for the format, max 6 chars.
name - the default name for the default language of the format.
rtype - the default nametype"""
try:
res = run_sql("INSERT INTO format (code, name) values (%s,%s)", (code, name))
fmtID = run_sql("SELECT id FROM format WHERE code=%s", (code,))
res = run_sql("INSERT INTO formatname(id_format, type, ln, value) VALUES (%s,%s,%s,%s)",
(fmtID[0][0], rtype, CFG_SITE_LANG, name))
return (1, fmtID)
except StandardError, e:
register_exception()
return (0, e)
def update_fldv(fldvID, name, value):
"""Modify existing fieldvalue
fldvID - id of fieldvalue to modify
value - the value of the fieldvalue
name - the name of the fieldvalue."""
try:
res = run_sql("UPDATE fieldvalue set name=%s where id=%s", (name, fldvID))
res = run_sql("UPDATE fieldvalue set value=%s where id=%s", (value, fldvID))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def add_fldv(name, value):
"""Add a new fieldvalue, returns id of fieldvalue
value - the value of the fieldvalue
name - the name of the fieldvalue."""
try:
res = run_sql("SELECT id FROM fieldvalue WHERE name=%s and value=%s", (name, value))
if not res:
res = run_sql("INSERT INTO fieldvalue (name, value) values (%s,%s)", (name, value))
res = run_sql("SELECT id FROM fieldvalue WHERE name=%s and value=%s", (name, value))
if res:
return (1, res[0][0])
else:
raise StandardError
except StandardError, e:
register_exception()
return (0, e)
def add_pbx(title, body):
try:
res = run_sql("INSERT INTO portalbox (title, body) values (%s,%s)", (title, body))
res = run_sql("SELECT id FROM portalbox WHERE title=%s AND body=%s", (title, body))
if res:
return (1, res[0][0])
else:
raise StandardError
except StandardError, e:
register_exception()
return (0, e)
def add_col(colNAME, dbquery=None):
"""Adds a new collection to collection table
colNAME - the default name for the collection, saved to collection and collectionname
dbquery - query related to the collection"""
# BTW, sometimes '' are passed instead of None, so change them to None
if not dbquery:
dbquery = None
try:
rtype = get_col_nametypes()[0][0]
colID = run_sql("SELECT id FROM collection WHERE id=1")
if colID:
res = run_sql("INSERT INTO collection (name,dbquery) VALUES (%s,%s)",
(colNAME,dbquery))
else:
res = run_sql("INSERT INTO collection (id,name,dbquery) VALUES (1,%s,%s)",
(colNAME,dbquery))
colID = run_sql("SELECT id FROM collection WHERE name=%s", (colNAME,))
res = run_sql("INSERT INTO collectionname(id_collection, type, ln, value) VALUES (%s,%s,%s,%s)",
(colID[0][0], rtype, CFG_SITE_LANG, colNAME))
if colID:
return (1, colID[0][0])
else:
raise StandardError
except StandardError, e:
register_exception()
return (0, e)
def add_col_pbx(colID, pbxID, ln, position, score=''):
"""add a portalbox to the collection.
colID - the id of the collection involved
pbxID - the portalbox to add
ln - which language the portalbox is for
score - decides which portalbox is the most important
position - position on page the portalbox should appear."""
try:
if score:
res = run_sql("INSERT INTO collection_portalbox(id_portalbox, id_collection, ln, score, position) values (%s,%s,'%s',%s,%s)", (real_escape_string(pbxID), real_escape_string(colID), real_escape_string(ln), real_escape_string(score), real_escape_string(position))) # kwalitee: disable=sql
else:
res = run_sql("SELECT score FROM collection_portalbox WHERE id_collection=%s and ln=%s and position=%s ORDER BY score desc, ln, position", (colID, ln, position))
if res:
score = int(res[0][0])
else:
score = 0
res = run_sql("INSERT INTO collection_portalbox(id_portalbox, id_collection, ln, score, position) values (%s,%s,%s,%s,%s)", (pbxID, colID, ln, (score + 1), position))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def add_col_fmt(colID, fmtID, score=''):
"""Add a output format to the collection.
colID - the id of the collection involved
fmtID - the id of the format.
score - the score of the format, decides sorting, if not given, place the format on top"""
try:
if score:
res = run_sql("INSERT INTO collection_format(id_format, id_collection, score) values (%s,%s,%s)", (fmtID, colID, score))
else:
res = run_sql("SELECT score FROM collection_format WHERE id_collection=%s ORDER BY score desc", (colID, ))
if res:
score = int(res[0][0])
else:
score = 0
res = run_sql("INSERT INTO collection_format(id_format, id_collection, score) values (%s,%s,%s)", (fmtID, colID, (score + 1)))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def add_col_fld(colID, fldID, type, fldvID=''):
"""Add a sort/search/field to the collection.
colID - the id of the collection involved
fldID - the id of the field.
fldvID - the id of the fieldvalue.
type - which type, seo, sew...
score - the score of the format, decides sorting, if not given, place the format on top"""
try:
if fldvID and fldvID not in [-1, "-1"]:
run_sql("DELETE FROM collection_field_fieldvalue WHERE id_collection=%s AND id_field=%s and type=%s and id_fieldvalue is NULL", (colID, fldID, type))
res = run_sql("SELECT score FROM collection_field_fieldvalue WHERE id_collection=%s AND id_field=%s and type=%s ORDER BY score desc", (colID, fldID, type))
if res:
score = int(res[0][0])
res = run_sql("SELECT score_fieldvalue FROM collection_field_fieldvalue WHERE id_collection=%s AND id_field=%s and type=%s ORDER BY score_fieldvalue desc", (colID, fldID, type))
else:
res = run_sql("SELECT score FROM collection_field_fieldvalue WHERE id_collection=%s and type=%s ORDER BY score desc", (colID, type))
if res:
score = int(res[0][0]) + 1
else:
score = 1
res = run_sql("SELECT id_collection,id_field,id_fieldvalue,type,score,score_fieldvalue FROM collection_field_fieldvalue where id_field=%s and id_collection=%s and type=%s and id_fieldvalue=%s", (fldID, colID, type, fldvID))
if not res:
run_sql("UPDATE collection_field_fieldvalue SET score_fieldvalue=score_fieldvalue+1 WHERE id_field=%s AND id_collection=%s and type=%s", (fldID, colID, type))
res = run_sql("INSERT INTO collection_field_fieldvalue(id_field, id_fieldvalue, id_collection, type, score, score_fieldvalue) values (%s,%s,%s,%s,%s,%s)", (fldID, fldvID, colID, type, score, 1))
else:
return (0, (1, "Already exists"))
else:
res = run_sql("SELECT id_collection,id_field,id_fieldvalue,type,score,score_fieldvalue FROM collection_field_fieldvalue WHERE id_collection=%s AND type=%s and id_field=%s and id_fieldvalue is NULL", (colID, type, fldID))
if res:
return (0, (1, "Already exists"))
else:
run_sql("UPDATE collection_field_fieldvalue SET score=score+1")
res = run_sql("INSERT INTO collection_field_fieldvalue(id_field, id_collection, type, score,score_fieldvalue) values (%s,%s,%s,%s, 0)", (fldID, colID, type, 1))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def modify_dbquery(colID, dbquery=None):
"""Modify the dbquery of an collection.
colID - the id of the collection involved
dbquery - the new dbquery"""
# BTW, sometimes '' is passed instead of None, so change it to None
if not dbquery:
dbquery = None
try:
res = run_sql("UPDATE collection SET dbquery=%s WHERE id=%s", (dbquery, colID))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def modify_pbx(colID, pbxID, sel_ln, score='', position='', title='', body=''):
"""Modify a portalbox
colID - the id of the collection involved
pbxID - the id of the portalbox that should be modified
sel_ln - the language of the portalbox that should be modified
title - the title
body - the content
score - if several portalboxes in one position, who should appear on top.
position - position on page"""
try:
if title:
res = run_sql("UPDATE portalbox SET title=%s WHERE id=%s", (title, pbxID))
if body:
res = run_sql("UPDATE portalbox SET body=%s WHERE id=%s", (body, pbxID))
if score:
res = run_sql("UPDATE collection_portalbox SET score=%s WHERE id_collection=%s and id_portalbox=%s and ln=%s", (score, colID, pbxID, sel_ln))
if position:
res = run_sql("UPDATE collection_portalbox SET position=%s WHERE id_collection=%s and id_portalbox=%s and ln=%s", (position, colID, pbxID, sel_ln))
return (1, "")
except Exception, e:
register_exception()
return (0, e)
def switch_fld_score(colID, id_1, id_2):
"""Switch the scores of id_1 and id_2 in collection_field_fieldvalue
colID - collection the id_1 or id_2 is connected to
id_1/id_2 - id field from tables like format..portalbox...
table - name of the table"""
try:
res1 = run_sql("SELECT score FROM collection_field_fieldvalue WHERE id_collection=%s and id_field=%s", (colID, id_1))
res2 = run_sql("SELECT score FROM collection_field_fieldvalue WHERE id_collection=%s and id_field=%s", (colID, id_2))
if res1[0][0] == res2[0][0]:
return (0, (1, "Cannot rearrange the selected fields, either rearrange by name or use the mySQL client to fix the problem."))
else:
res = run_sql("UPDATE collection_field_fieldvalue SET score=%s WHERE id_collection=%s and id_field=%s", (res2[0][0], colID, id_1))
res = run_sql("UPDATE collection_field_fieldvalue SET score=%s WHERE id_collection=%s and id_field=%s", (res1[0][0], colID, id_2))
return (1, "")
except StandardError, e:
register_exception()
return (0, e)
def switch_fld_value_score(colID, id_1, fldvID_1, fldvID_2):
"""Switch the scores of two field_value
colID - collection the id_1 or id_2 is connected to
id_1/id_2 - id field from tables like format..portalbox...
table - name of the table"""
try:
res1 = run_sql("SELECT score_fieldvalue FROM collection_field_fieldvalue WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (colID, id_1, fldvID_1))
res2 = run_sql("SELECT score_fieldvalue FROM collection_field_fieldvalue WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (colID, id_1, fldvID_2))
if res1[0][0] == res2[0][0]:
return (0, (1, "Cannot rearrange the selected fields, either rearrange by name or use the mySQL client to fix the problem."))
else:
res = run_sql("UPDATE collection_field_fieldvalue SET score_fieldvalue=%s WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (res2[0][0], colID, id_1, fldvID_1))
res = run_sql("UPDATE collection_field_fieldvalue SET score_fieldvalue=%s WHERE id_collection=%s and id_field=%s and id_fieldvalue=%s", (res1[0][0], colID, id_1, fldvID_2))
return (1, "")
except Exception, e:
register_exception()
return (0, e)
def switch_pbx_score(colID, id_1, id_2, sel_ln):
"""Switch the scores of id_1 and id_2 in the table given by the argument.
colID - collection the id_1 or id_2 is connected to
id_1/id_2 - id field from tables like format..portalbox...
table - name of the table"""
try:
res1 = run_sql("SELECT score FROM collection_portalbox WHERE id_collection=%s and id_portalbox=%s and ln=%s", (colID, id_1, sel_ln))
res2 = run_sql("SELECT score FROM collection_portalbox WHERE id_collection=%s and id_portalbox=%s and ln=%s", (colID, id_2, sel_ln))
if res1[0][0] == res2[0][0]:
return (0, (1, "Cannot rearrange the selected fields, either rearrange by name or use the mySQL client to fix the problem."))
res = run_sql("UPDATE collection_portalbox SET score=%s WHERE id_collection=%s and id_portalbox=%s and ln=%s", (res2[0][0], colID, id_1, sel_ln))
res = run_sql("UPDATE collection_portalbox SET score=%s WHERE id_collection=%s and id_portalbox=%s and ln=%s", (res1[0][0], colID, id_2, sel_ln))
return (1, "")
except Exception, e:
register_exception()
return (0, e)
def switch_score(colID, id_1, id_2, table):
"""Switch the scores of id_1 and id_2 in the table given by the argument.
colID - collection the id_1 or id_2 is connected to
id_1/id_2 - id field from tables like format..portalbox...
table - name of the table"""
try:
res1 = run_sql("SELECT score FROM collection_%s WHERE id_collection=%%s and id_%s=%%s"% (wash_table_column_name(table), wash_table_column_name(table)), (colID, id_1))
res2 = run_sql("SELECT score FROM collection_%s WHERE id_collection=%%s and id_%s=%%s"% (wash_table_column_name(table), wash_table_column_name(table)), (colID, id_2))
if res1[0][0] == res2[0][0]:
return (0, (1, "Cannot rearrange the selected fields, either rearrange by name or use the mySQL client to fix the problem."))
res = run_sql("UPDATE collection_%s SET score=%%s WHERE id_collection=%%s and id_%s=%%s"% (wash_table_column_name(table), wash_table_column_name(table)), (res2[0][0], colID, id_1))
res = run_sql("UPDATE collection_%s SET score=%%s WHERE id_collection=%%s and id_%s=%%s"% (wash_table_column_name(table), wash_table_column_name(table)), (res1[0][0], colID, id_2))
return (1, "")
except Exception, e:
register_exception()
return (0, e)
def get_detailed_page_tabs(colID=None, recID=None, ln=CFG_SITE_LANG):
"""
Returns the complete list of tabs to be displayed in the
detailed record pages.
Returned structured is a dict with
- key : last component of the url that leads to detailed record tab: http:www.../CFG_SITE_RECORD/74/key
- values: a dictionary with the following keys:
- label: *string* label to be printed as tab (Not localized here)
- visible: *boolean* if False, tab should not be shown
- enabled: *boolean* if True, tab should be disabled
- order: *int* position of the tab in the list of tabs
- ln: language of the tab labels
returns dict
"""
_ = gettext_set_language(ln)
tabs = {'metadata' : {'label': _('Information'), 'visible': False, 'enabled': True, 'order': 1},
'references': {'label': _('References'), 'visible': False, 'enabled': True, 'order': 2},
'citations' : {'label': _('Citations'), 'visible': False, 'enabled': True, 'order': 3},
'keywords' : {'label': _('Keywords'), 'visible': False, 'enabled': True, 'order': 4},
'comments' : {'label': _('Discussion'), 'visible': False, 'enabled': True, 'order': 5},
'usage' : {'label': _('Usage statistics'), 'visible': False, 'enabled': True, 'order': 6},
'files' : {'label': _('Files'), 'visible': False, 'enabled': True, 'order': 7},
'plots' : {'label': _('Plots'), 'visible': False, 'enabled': True, 'order': 8},
'holdings' : {'label': _('Holdings'), 'visible': False, 'enabled': True, 'order': 9},
'linkbacks' : {'label': _('Linkbacks'), 'visible': False, 'enabled': True, 'order': 10},
'hepdata' : {'label': _('HepData'), 'visible': False, 'enabled': True, 'order': 11}
}
res = run_sql("SELECT tabs FROM collectiondetailedrecordpagetabs " + \
"WHERE id_collection=%s", (colID, ))
if len(res) > 0:
tabs_state = res[0][0].split(';')
for tab_state in tabs_state:
if tabs.has_key(tab_state):
tabs[tab_state]['visible'] = True;
else:
# no preference set for this collection.
# assume all tabs are displayed
for key in tabs.keys():
tabs[key]['visible'] = True
if not CFG_WEBCOMMENT_ALLOW_COMMENTS and \
not CFG_WEBCOMMENT_ALLOW_REVIEWS:
tabs['comments']['visible'] = False
tabs['comments']['enabled'] = False
if recID is not None:
# Disable references if no references found
#bfo = BibFormatObject(recID)
#if bfe_references.format_element(bfo, '', '') == '':
# tabs['references']['enabled'] = False
## FIXME: the above was commented out because bfe_references
## may be too slow. And we do not really need this anyway
## because we can disable tabs in WebSearch Admin on a
## collection-by-collection basis. If we need this, then we
## should probably call bfo.fields('999') here that should be
## much faster than calling bfe_references.
# Disable citations if not citations found
#if len(get_cited_by(recID)) == 0:
# tabs['citations']['enabled'] = False
## FIXME: the above was commented out because get_cited_by()
## may be too slow. And we do not really need this anyway
## because we can disable tags in WebSearch Admin on a
## collection-by-collection basis.
# Disable Files tab if no file found except for Plots:
disable_files_tab_p = True
for abibdoc in BibRecDocs(recID).list_bibdocs():
abibdoc_type = abibdoc.get_type()
if abibdoc_type == 'Plot':
continue # ignore attached plots
else:
if CFG_INSPIRE_SITE and not \
abibdoc_type in ('', 'INSPIRE-PUBLIC', 'Supplementary Material'):
# ignore non-empty, non-INSPIRE-PUBLIC, non-suppl doctypes for INSPIRE
continue
# okay, we found at least one non-Plot file:
disable_files_tab_p = False
break
if disable_files_tab_p:
tabs['files']['enabled'] = False
#Disable holdings tab if collection != Books
collection = run_sql("""select name from collection where id=%s""", (colID, ))
if collection[0][0] != 'Books':
tabs['holdings']['enabled'] = False
# Disable Plots tab if no docfile of doctype Plot found
brd = BibRecDocs(recID)
if len(brd.list_bibdocs('Plot')) == 0:
tabs['plots']['enabled'] = False
if CFG_CERN_SITE:
from invenio.search_engine import get_collection_reclist
if recID in get_collection_reclist("Books & Proceedings"):
tabs['holdings']['visible'] = True
tabs['holdings']['enabled'] = True
# now treating the HEP data -> we have to check if there is HepData
# associated with the record and if so, make the tab visible and enabled
has_hepdata = record_has_hepdata_attached(recID)
tabs['hepdata']['visible'] = has_hepdata
tabs['hepdata']['enabled'] = has_hepdata
tabs[''] = tabs['metadata']
del tabs['metadata']
return tabs
def record_has_hepdata_attached(recID):
"""returns True or False depending if there is HepData attached or not"""
from invenio.search_engine import search_pattern
return len(search_pattern(p="786__w:%s" % (str(recID)))) > 0
def get_detailed_page_tabs_counts(recID):
"""
Returns the number of citations, references and comments/reviews
that have to be shown on the corresponding tabs in the
detailed record pages
@param recID: record id
@return: dictionary with following keys
'Citations': number of citations to be shown in the "Citations" tab
'References': number of references to be shown in the "References" tab
'Discussions': number of comments and reviews to be shown in the "Discussion" tab
"""
num_comments = 0 #num of comments
num_reviews = 0 #num of reviews
tabs_counts = {'Citations' : 0,
'References' : -1,
'Discussions' : 0
}
from invenio.search_engine import get_field_tags, get_record
if CFG_BIBRANK_SHOW_CITATION_LINKS:
if CFG_INSPIRE_SITE:
from invenio.search_engine import search_unit
citers_recids = intbitset(get_cited_by(recID))
citeable_recids = search_unit(p='citeable', f='collection')
tabs_counts['Citations'] = len(citers_recids & citeable_recids)
else:
tabs_counts['Citations'] = get_cited_by_count(recID)
if not CFG_CERN_SITE:#FIXME:should be replaced by something like CFG_SHOW_REFERENCES
reftag = ""
reftags = get_field_tags("reference")
if reftags:
reftag = reftags[0]
tmprec = get_record(recID)
if reftag and len(reftag) > 4:
tabs_counts['References'] = len(record_get_field_instances(tmprec, reftag[0:3], reftag[3], reftag[4]))
# obtain number of comments/reviews
from invenio.webcommentadminlib import get_nb_reviews, get_nb_comments
if CFG_WEBCOMMENT_ALLOW_COMMENTS and CFG_WEBSEARCH_SHOW_COMMENT_COUNT:
num_comments = get_nb_comments(recID, count_deleted=False)
if CFG_WEBCOMMENT_ALLOW_REVIEWS and CFG_WEBSEARCH_SHOW_REVIEW_COUNT:
num_reviews = get_nb_reviews(recID, count_deleted=False)
if num_comments or num_reviews:
tabs_counts['Discussions'] = num_comments + num_reviews
return tabs_counts
| gpl-2.0 | 325,945,467,182,998,340 | 45.626352 | 507 | 0.570987 | false |
coinchon/crc-dabmod | src/crc-dwap.py | 1 | 21796 | #!/usr/bin/env python
# Copyright (C) 2006, 2007, 2008, 2009,-2010 Her Majesty the Queen in
# Right of Canada (Communications Research Center Canada)
# This file is part of CRC-DADMOD.
#
# CRC-DADMOD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# CRC-DADMOD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CRC-DADMOD. If not, see <http://www.gnu.org/licenses/>.
from wxPython.wx import *
from optparse import OptionParser
from gnuradio import gr
from gnuradio import usrp
from gnuradio.wxgui import fftsink, scopesink
from gnuradio.eng_notation import num_to_str
from gnuradio.eng_option import *
ID_ABOUT = wxNewId()
ID_EXIT = wxNewId()
ID_GAIN_SLIDER0 = wxNewId()
ID_FREQ_SLIDER0 = wxNewId()
ID_GAIN_SLIDER1 = wxNewId()
ID_FREQ_SLIDER1 = wxNewId()
ID_START = wxNewId()
ID_STOP = wxNewId()
def gcd(a, b) :
if b == 0 :
return a
return gcd(b, a % b)
def appendFrequency(option, opt, value, parser):
if parser.values.frequency is None :
parser.values.frequency = [ value ]
else :
parser.values.frequency.append(value)
def listUsrp(option, opt, value, parser):
id = 0
while (true) :
try:
version = usrp._look_for_usrp(id)
print "USRP #%i" % id
print " Rev: %i" % version
dst = usrp.sink_c(id)
src = usrp.source_c(id)
print " Tx"
for db in dst.db:
if (db[0].dbid() != -1):
print " %s" % db[0].side_and_name()
(min, max, offset) = db[0].freq_range()
print " Frequency"
print " Min: %sHz" % num_to_str(min)
print " Max: %sHz" % num_to_str(max)
print " Offset: %sHz" % num_to_str(offset)
(min, max, offset) = db[0].gain_range()
print " Gain"
print " Min: %sdB" % num_to_str(min)
print " Max: %sdB" % num_to_str(max)
print " Offset: %sdB" % num_to_str(offset)
print " Rx"
for db in src.db:
if (db[0].dbid() != -1):
print " %s" % db[0].side_and_name()
(min, max, offset) = db[0].freq_range()
print " Frequency"
print " Min: %sHz" % num_to_str(min)
print " Max: %sHz" % num_to_str(max)
print " Offset: %sHz" % num_to_str(offset)
(min, max, offset) = db[0].gain_range()
print " Gain"
print " Min: %sdB" % num_to_str(min)
print " Max: %sdB" % num_to_str(max)
print " Offset: %sdB" % num_to_str(offset)
except RuntimeError:
break
id += 1
raise SystemExit
class MyFrame(wxFrame):
def __init__(self, parent, ID, title):
wxFrame.__init__(self, parent, ID, title,
wxDefaultPosition)
self.pga = 0
self.pgaMin = -20
self.pgaMax = 0
self.pgaStep = 0.25
# Parsing options
parser = OptionParser(option_class=eng_option,
usage="usage: %prog [options] filename1" \
" [-f frequency2 filename2 [...]]")
parser.add_option("-a", "--agc", action="store_true",
help="enable agc")
parser.add_option("-c", "--clockrate", type="eng_float", default=128e6,
help="set USRP clock rate (128e6)")
parser.add_option("--copy", action="store_true",
help="enable real to imag data copy when in real mode")
parser.add_option("-e", "--encoding", type="choice", choices=["s", "f"],
default="f", help="choose data encoding: [s]igned or [f]loat.")
parser.add_option("-f", "--frequency", type="eng_float",
action="callback", callback=appendFrequency,
help="set output frequency (222.064e6)")
parser.add_option("-g", "--gain", type="float",
help="set output pga gain")
parser.add_option("-l", "--list", action="callback", callback=listUsrp,
help="list USRPs and daugtherboards")
parser.add_option("-m", "--mode", type="eng_float", default=2,
help="mode: 1: real, 2: complex (2)")
parser.add_option("-o", "--osc", action="store_true",
help="enable oscilloscope")
parser.add_option("-r", "--samplingrate", type="eng_float",
default=3.2e6,
help="set input sampling rate (3200000)")
parser.add_option("-s", "--spectrum", action="store_true",
help="enable spectrum analyzer")
# parser.add_option("-t", "--tx", type="choice", choices=["A", "B"],
# default="A", help="choose USRP tx A|B output (A)")
parser.add_option("-u", "--usrp", action="store_true",
help="enable USRP output")
(options, args) = parser.parse_args()
if len(args) == 0 :
options.filename = [ "/dev/stdin" ]
else :
options.filename = args
# Setting default frequency
if options.frequency is None :
options.frequency = [ 222.064e6 ]
if len(options.filename) != len(options.frequency) :
parser.error("Nb input file != nb frequency!")
# Status bar
# self.CreateStatusBar(3, 0)
# msg = "PGA: %.2f dB" % (self.pga * self.pgaStep)
# self.SetStatusText(msg, 1)
# msg = "Freq: %.3f mHz" % (options.frequency[0] / 1000000.0)
# self.SetStatusText(msg, 2)
# Menu bar
menu = wxMenu()
menu.Append(ID_ABOUT, "&About",
"More information about this program")
menu.AppendSeparator()
menu.Append(ID_EXIT, "E&xit", "Terminate the program")
menuBar = wxMenuBar()
menuBar.Append(menu, "&File")
self.SetMenuBar(menuBar)
# Main windows
mainSizer = wxFlexGridSizer(0, 1)
sliderSizer = wxFlexGridSizer(0, 2)
buttonSizer = wxBoxSizer(wxHORIZONTAL)
if options.usrp :
# TX d'board 0
gainLabel = wxStaticText(self, -1, "PGA 0")
gainSlider = wxSlider(self, ID_GAIN_SLIDER0, self.pga,
self.pgaMin / self.pgaStep, self.pgaMax / self.pgaStep,
style = wxSL_HORIZONTAL | wxSL_AUTOTICKS)
gainSlider.SetSize((400, -1))
sliderSizer.Add(gainLabel, 0,
wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0)
sliderSizer.Add(gainSlider, 0,
wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0)
freqLabel = wxStaticText(self, -1, "Frequency 0")
freqSlider = wxSlider(self, ID_FREQ_SLIDER0,
options.frequency[0] / 16000, 0, 20e3,
style = wxSL_HORIZONTAL | wxSL_AUTOTICKS)
freqSlider.SetSize((400, -1))
sliderSizer.Add(freqLabel, 0,
wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0)
sliderSizer.Add(freqSlider, 0,
wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0)
if len(options.frequency) > 1 :
# TX d'board 1
gainLabel = wxStaticText(self, -1, "PGA 1")
gainSlider = wxSlider(self, ID_GAIN_SLIDER1, self.pga,
self.pgaMin / self.pgaStep, self.pgaMax / self.pgaStep,
style = wxSL_HORIZONTAL | wxSL_AUTOTICKS)
gainSlider.SetSize((400, -1))
sliderSizer.Add(gainLabel, 0,
wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0)
sliderSizer.Add(gainSlider, 0,
wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0)
freqLabel = wxStaticText(self, -1, "Frequency 1")
freqSlider = wxSlider(self, ID_FREQ_SLIDER1,
options.frequency[1] / 16000, 0, 20e3,
style = wxSL_HORIZONTAL | wxSL_AUTOTICKS)
freqSlider.SetSize((400, -1))
sliderSizer.Add(freqLabel, 0,
wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0)
sliderSizer.Add(freqSlider, 0,
wxALIGN_CENTER_VERTICAL | wxFIXED_MINSIZE, 0)
mainSizer.Add(sliderSizer, 1, wxEXPAND, 0)
start = wxButton(self, ID_START, "Start")
stop = wxButton(self, ID_STOP, "Stop")
buttonSizer.Add(start, 1, wxALIGN_CENTER, 0)
buttonSizer.Add(stop, 1, wxALIGN_CENTER, 0)
mainSizer.Add(buttonSizer, 1, wxEXPAND, 0)
# GnuRadio
self.fg = gr.flow_graph()
if options.mode == 1 :
print "Source: real"
if (options.encoding == "s") :
print "Source encoding: short"
src = gr.file_source(gr.sizeof_short, options.filename[0], 1)
if (options.copy) :
print "Imag: copy"
imag = src
else :
print "Imag: null"
imag = gr.null_source(gr.sizeof_short)
interleaver = gr.interleave(gr.sizeof_short)
self.fg.connect(src, (interleaver, 0))
self.fg.connect(imag, (interleaver, 1))
tail = interleaver
elif (options.encoding == "f") :
print "Source encoding: float"
src = gr.file_source(gr.sizeof_gr_complex,
options.filename[0], 1)
tail = src
elif (options.mode == 2) :
print "Source: complex"
if len(options.frequency) == 1 :
if (options.encoding == "s") :
print "Source encoding: short"
src = gr.file_source(gr.sizeof_short,
options.filename[0], 1)
elif (options.encoding == "f") :
print "Source encoding: float"
src = gr.file_source(gr.sizeof_gr_complex,
options.filename[0], 1)
else :
parser.error("Invalid encoding type for complex data!")
tail = src
elif (len(options.frequency) == 2) :
src0 = gr.file_source(gr.sizeof_gr_complex,
options.filename[0], 1)
src1 = gr.file_source(gr.sizeof_gr_complex,
options.filename[1], 1)
interleaver = gr.interleave(gr.sizeof_gr_complex)
self.fg.connect(src0, (interleaver, 0))
self.fg.connect(src1, (interleaver, 1))
tail = interleaver
else :
parser.error(
"Invalid number of source (> 2) with complex input!")
else :
parser.error("Invalid mode!")
# Interpolation
dac_freq = options.clockrate
interp = int(dac_freq / options.samplingrate)
if interp == 0 :
parser.error("Invalid sampling rate!")
if options.mode == 2 :
print "Input sampling rate: %s complex samples/s" % \
num_to_str(options.samplingrate)
else :
print "Input sampling rate: %s samples/s" % \
num_to_str(options.samplingrate)
print "Interpolation rate: int(%s / %s) = %sx" % \
(num_to_str(dac_freq), num_to_str(options.samplingrate), interp)
if interp > 512 :
factor = gcd(dac_freq / 512, options.samplingrate)
num = int((dac_freq / 512) / factor)
den = int(options.samplingrate / factor)
print "Resampling by %i / %i" % (num, den)
resampler = blks.rational_resampler_ccc(self.fg, num, den)
self.fg.connect(tail, resampler)
tail = resampler
interp = 512
options.samplingrate = dac_freq / 512
# AGC
if options.agc :
agc = gr.agc_cc()
self.fg.connect(tail, agc)
tail = agc
# USRP
if options.usrp :
nchan = len(options.frequency)
if len(options.frequency) == 1 :
if options.mode == 1 :
mux = 0x00000098
elif options.mode == 2 :
mux = 0x00000098
else :
parser.error("Unsupported mode for USRP mux!")
elif len(options.frequency) == 2 :
if options.mode == 1 :
mux = 0x0000ba98
elif options.mode == 2 :
mux = 0x0000ba98
else :
parser.error("Unsupported mode for USRP mux!")
else :
parser.error("Invalid number of frequency [0..2]!")
# if options.tx == "A" :
# mux = 0x00000098
# else :
# mux = 0x00009800
print "Nb channels: ", nchan
print "Mux: 0x%x" % mux
if options.encoding == 's' :
dst = usrp.sink_s(0, interp, nchan, mux)
elif options.encoding == 'f' :
dst = usrp.sink_c(0, interp, nchan, mux)
else :
parser.error("Unsupported data encoding for USRP!")
dst.set_verbose(1)
for i in range(len(options.frequency)) :
if options.gain is None :
print "Setting gain to %f" % dst.pga_max()
dst.set_pga(i << 1, dst.pga_max())
else :
print "Setting gain to %f" % options.gain
dst.set_pga(i << 1, options.gain)
tune = false
for dboard in dst.db:
if (dboard[0].dbid() != -1):
device = dboard[0]
print "Tuning TX d'board %s to %sHz" % \
(device.side_and_name(),
num_to_str(options.frequency[i]))
device.lo_offset = 38e6
(min, max, offset) = device.freq_range()
print " Frequency"
print " Min: %sHz" % num_to_str(min)
print " Max: %sHz" % num_to_str(max)
print " Offset: %sHz" % num_to_str(offset)
#device.set_gain(device.gain_range()[1])
device.set_enable(True)
tune = \
dst.tune(device._which, device,
options.frequency[i] * 128e6 / dac_freq)
if tune:
print " Baseband frequency: %sHz" % \
num_to_str(tune.baseband_freq)
print " DXC frequency: %sHz" % \
num_to_str(tune.dxc_freq)
print " Residual Freqency: %sHz" % \
num_to_str(tune.residual_freq)
print " Inverted: ", \
tune.inverted
mux = usrp.determine_tx_mux_value(dst,
(device._which, 0))
dst.set_mux(mux)
break
else:
print " Failed!"
if not tune:
print " Failed!"
raise SystemExit
# int nunderruns ()
print "USRP"
print " Rx halfband: ", dst.has_rx_halfband()
print " Tx halfband: ", dst.has_tx_halfband()
print " Nb DDC: ", dst.nddc()
print " Nb DUC: ", dst.nduc()
#dst._write_9862(0, 14, 224)
print " DAC frequency: %s samples/s" % num_to_str(dst.dac_freq())
print " Fpga decimation rate: %s -> %s samples/s" % \
(num_to_str(dst.interp_rate()),
num_to_str(dac_freq / dst.interp_rate()))
print " Nb channels:",
if hasattr(dst, "nchannels()") :
print dst.nchannels()
else:
print "N/A"
print " Mux:",
if hasattr(dst, "mux()") :
print "0x%x" % dst.mux()
else :
print "N/A"
print " FPGA master clock frequency:",
if hasattr(dst, "fpga_master_clock_freq()") :
print "%sHz" % num_to_str(dst.fpga_master_clock_freq())
else :
print "N/A"
print " Converter rate:",
if hasattr(dst, "converter_rate()") :
print "%s" % num_to_str(dst.converter_rate())
else :
print "N/A"
print " DAC rate:",
if hasattr(dst, "dac_rate()") :
print "%s sample/s" % num_to_str(dst.dac_rate())
else :
print "N/A"
print " Interp rate: %sx" % num_to_str(dst.interp_rate())
print " DUC frequency 0: %sHz" % num_to_str(dst.tx_freq(0))
print " DUC frequency 1: %sHz" % num_to_str(dst.tx_freq(1))
print " Programmable Gain Amplifier 0: %s dB" % \
num_to_str(dst.pga(0))
print " Programmable Gain Amplifier 1: %s dB" % \
num_to_str(dst.pga(2))
else :
dst = gr.null_sink(gr.sizeof_gr_complex)
# AGC
if options.agc :
agc = gr.agc_cc()
self.fg.connect(tail, agc)
tail = agc
self.fg.connect(tail, dst)
# oscilloscope
if options.osc :
oscPanel = wxPanel(self, -1)
if (options.encoding == "s") :
converter = gr.interleaved_short_to_complex()
self.fg.connect(tail, converter)
signal = converter
elif (options.encoding == "f") :
signal = tail
else :
parser.error("Unsupported data encoding for oscilloscope!")
#block = scope_sink_f(fg, parent, title=label, sample_rate=input_rate)
#return (block, block.win)
oscWin = scopesink.scope_sink_c(self.fg, oscPanel, "Signal",
options.samplingrate)
self.fg.connect(signal, oscWin)
mainSizer.Add(oscPanel, 1, wxEXPAND)
# spectrometer
if options.spectrum :
ymin = 0
ymax = 160
fftPanel = wxPanel(self, -1)
if (options.encoding == "s") :
converter = gr.interleaved_short_to_complex()
self.fg.connect(tail, converter)
signal = converter
elif (options.encoding == "f") :
signal = tail
else :
parser.error("Unsupported data encoding for oscilloscope!")
fftWin = fftsink.fft_sink_c(self.fg, fftPanel,
title="Spectrum",
fft_size=2048,
sample_rate=options.samplingrate,
y_per_div=(ymax - ymin) / 8,
ref_level=ymax,
fft_rate=50,
average=True
)
self.fg.connect(signal, fftWin)
mainSizer.Add(fftPanel, 1, wxEXPAND)
# Events
EVT_MENU(self, ID_ABOUT, self.OnAbout)
EVT_MENU(self, ID_EXIT, self.TimeToQuit)
EVT_SLIDER(self, ID_GAIN_SLIDER0, self.slideEvent)
EVT_SLIDER(self, ID_FREQ_SLIDER0, self.slideEvent)
EVT_SLIDER(self, ID_GAIN_SLIDER1, self.slideEvent)
EVT_SLIDER(self, ID_FREQ_SLIDER1, self.slideEvent)
EVT_BUTTON(self, ID_START, self.onClick)
EVT_BUTTON(self, ID_STOP, self.onClick)
#Layout sizers
self.SetSizer(mainSizer)
self.SetAutoLayout(1)
mainSizer.Fit(self)
self.fg.start()
def OnAbout(self, event):
dlg = wxMessageDialog(self, "This sample program shows off\n"
"frames, menus, statusbars, and this\n"
"message dialog.",
"About Me", wxOK | wxICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def TimeToQuit(self, event):
self.Close(true)
def slideEvent(self, evt):
value = evt.GetInt()
id = evt.GetId()
if id == ID_GAIN_SLIDER:
msg = "PGA: %.2f dB" % (value * self.pgaStep)
self.SetStatusText(msg, 1)
elif id == ID_FREQ_SLIDER:
msg = "Freq: %.3f mHz" % (value * 16.0 / 1000)
self.SetStatusText(msg, 2)
else:
print "Slider event not yet coded!"
self.Close(True)
def onClick(self, event):
id = event.GetId()
if id == ID_START:
self.fg.start()
elif id == ID_STOP:
self.fg.stop()
else:
print "Click event not yet coded!"
self.Close(True)
class MyApp(wxApp):
def OnInit(self):
frame = MyFrame(NULL, -1, "Digital WAve Player")
frame.Show(true)
self.SetTopWindow(frame)
return true
app = MyApp(0)
app.MainLoop()
| gpl-3.0 | -4,136,697,583,841,737,000 | 38.846435 | 80 | 0.49376 | false |
madmax983/h2o-3 | h2o-py/tests/testdir_algos/deeplearning/pyunit_weights_and_distributions_deeplearning.py | 1 | 1284 | import sys, os
sys.path.insert(1, os.path.join("..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
def weights_and_distributions():
htable = h2o.upload_file(pyunit_utils.locate("smalldata/gbm_test/moppe.csv"))
htable["premiekl"] = htable["premiekl"].asfactor()
htable["moptva"] = htable["moptva"].asfactor()
htable["zon"] = htable["zon"]
# gamma
dl = H2ODeepLearningEstimator(distribution="gamma")
dl.train(x=range(3),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# gaussian
dl = H2ODeepLearningEstimator(distribution="gaussian")
dl.train(x=range(3),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# poisson
dl = H2ODeepLearningEstimator(distribution="poisson")
dl.train(x=range(3),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# tweedie
dl = H2ODeepLearningEstimator(distribution="tweedie")
dl.train(x=range(3),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_and_distributions)
else:
weights_and_distributions() | apache-2.0 | -2,314,508,829,679,752,700 | 32.815789 | 82 | 0.732087 | false |
uschille/FabSim | python/lib/DataAnalysis.py | 1 | 3679 | import numpy as np
from scipy.optimize import leastsq
def derivatives(x, y):
num_x = len(x);
deriv = np.zeros((len(x)))
# If there for two input points, use a straight line as the derivative.
if num_x == 2:
deriv[0] = (y[1] - y[0]) / (x[1] - x[0])
deriv[1] = deriv[0]
return deriv
# Calculate the derivatives for the interior points. This loop uses
# a total of 6 points to calculate the derivative at any one
# point. And when the loop moves along in increasing array
# position, the same data point is used three times. So instead of
# reading the correct value from the array three times, just shift
# the values down by copying them from one variable to the next.
xi = 2*x[0]-x[1] # 0.0
xj = x[0]
xk = x[1]
yi = 2*y[0]-y[1] # 0.0
yj = y[0]
yk = y[1]
for i in xrange(1, num_x-1):
xi = xj
xj = xk
xk = x[i+1]
yi = yj
yj = yk
yk = y[i+1]
r1 = (xk - xj)*(xk - xj) + (yk - yj)*(yk - yj)
r2 = (xj - xi)*(xj - xi) + (yj - yi)*(yj - yi)
deriv[i] = ( (yj - yi)*r1 + (yk - yj)*r2 ) / ( (xj - xi)*r1 + (xk - xj)*r2 )
# Calculate the derivative at the first point, (x(0),y(0)).
slope = (y[1] - y[0]) / (x[1] - x[0])
if ((slope >= 0) and (slope >= deriv[1])) or ((slope <= 0) and (slope <= deriv[1])):
deriv[0] = 2 * slope - deriv[1]
else:
deriv[0] = slope + (abs(slope) * (slope - deriv[1])) / (abs(slope) + abs(slope - deriv[1]))
# Calculate the derivative at the last point.
slope = (y[num_x-1] - y[num_x-2]) / (x[num_x-1] - x[num_x-2])
if ((slope >= 0) and (slope >= deriv[num_x-2])) or ((slope <= 0) and (slope <= deriv[num_x-2])):
deriv[num_x-1] = 2 * slope - deriv[num_x-2]
else:
deriv[num_x-1] = slope + (abs(slope) * (slope - deriv[num_x-2])) / (abs(slope) + abs(slope - deriv[num_x-2]) )
return deriv
def get_centre_of_mass(molecule_particles, bounds):
# calculate centre of mass of a sheet in a periodic box.
# Becomes incorrect if any structure extends beyond 0.5 of the box size.
cm_rel = np.array(([0.0, 0.0, 0.0 ]))
rp = molecule_particles[0] #reference particle
for p in molecule_particles:
for i in xrange(0,3):
a = p[i] - rp[i]
if a > 0.5 * bounds[i]:
a = p[i] - rp[i] - bounds[i]
elif a < -0.5 * bounds[i]:
a = p[i] - rp[i] + bounds[i]
cm_rel[i] += a
cm_rel = cm_rel / len(molecule_particles)
cm = rp + cm_rel
cm[0] = cm[0] %bounds[0]
cm[1] = cm[1] %bounds[1]
cm[2] = cm[2] %bounds[2]
#print cm
#import sys
#sys.exit()
return cm
def f_min(X,p):
plane_xyz = p[0:3]
distance = (plane_xyz*X.T).sum(axis=1) + p[3]
return distance / np.linalg.norm(plane_xyz)
def residuals(params, signal, X):
return f_min(X, params)
def get_fitting_plane(points):
# returns a,b,c,d in ax+by+cz+d=0. a,b,c are also the normal.
pointsT = points.transpose()
# Inital guess of the plane
diff = points[0] - points[-1]
p0 = np.array(([diff[0], diff[1], diff[2], 1.]))
sol = leastsq(residuals, p0, args=(None, pointsT))[0]
#print "Solution: ", sol
#print "Old Error: ", (f_min(pointsT, p0)**2).sum()
#print "New Error: ", (f_min(pointsT, sol)**2).sum()
return sol
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
# Returns the angle in radians between vectors 'v1' and 'v2' in radians.
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if np.isnan(angle):
if (v1_u == v2_u).all():
return 0.0
else:
return np.pi
return angle | lgpl-3.0 | 584,378,390,467,305,900 | 28.44 | 114 | 0.568905 | false |
gentimouton/swarch | server-master/server.py | 1 | 4410 | """
Server master:
The server is almighty.
Every frame, it receives player inputs from clients,
executes these inputs to update the game state,
and sends the whole game state to all the clients for display.
"""
from __future__ import division # So to make division be float instead of int
from network import Listener, Handler, poll_for
from random import randint
import time
import uuid
##################### game logic #############
TICK_DURATION = 0.05 # seconds
# game state
borders = [[0, 0, 2, 300], [0, 0, 400, 2], [398, 0, 2, 300], [0, 298, 400, 2]]
pellets = [[randint(10, 390), randint(10, 290), 5, 5] for _ in range(4)]
players = {} # map a client handler to a player object
# map inputs received from clients to directions
input_dir = {'up': [0, -1], 'down': [0, 1],
'left': [-1, 0], 'right': [1, 0]}
class Player:
def __init__(self):
self.name = str(uuid.uuid4())
self.revive()
def revive(self):
self.box = [randint(10, 380), randint(10, 280), 10, 10]
self.dir = input_dir['down'] # original direction: downwards
self.speed = 2
def change_dir(self, inputt):
self.dir = input_dir[inputt]
def move(self):
self.box[0] += self.dir[0] * self.speed
self.box[1] += self.dir[1] * self.speed
def grow_and_slow(self, qty=2):
self.box[2] += qty
self.box[3] += qty
self.speed -= self.speed/6
def collide_borders(self):
[self.revive() for border in borders if collide_boxes(self.box, border)]
def collide_other_players(self):
for p in players.values():
# only the player with lowest id of the pair detects the collision
if self.name < p.name and collide_boxes(self.box, p.box):
playerw, pw = self.box[2], p.box[2] # widths
if playerw > pw:
self.grow_and_slow(pw)
p.revive()
elif playerw < pw:
p.grow_and_slow(playerw)
self.revive()
else: # they have same width: kill both
p.revive()
self.revive()
def collide_pellets(self):
for index, pellet in enumerate(pellets):
if collide_boxes(self.box, pellet):
self.grow_and_slow()
pellets[index] = [randint(10, 390), randint(10, 290), 5, 5]
def update(self):
self.move()
self.collide_borders()
self.collide_other_players()
self.collide_pellets()
def collide_boxes(box1, box2):
x1, y1, w1, h1 = box1
x2, y2, w2, h2 = box2
return x1 < x2 + w2 and y1 < y2 + h2 and x2 < x1 + w1 and y2 < y1 + h1
################### network ##############
event_queue = [] # list of ('event', handler)
# 'event' can be 'quit', 'join', 'up', 'down', 'left', 'right'
class MyHandler(Handler):
def on_open(self):
event_queue.append(('join', self))
def on_close(self):
event_queue.append(('quit', self))
def on_msg(self, data):
event_queue.append((data['input'], self))
server = Listener(8888, MyHandler)
######################### loop #######################
def apply_events():
# apply events onto game state
global event_queue
for event, handler in event_queue:
if event == 'quit':
del players[handler]
elif event == 'join':
players[handler] = Player()
else: # movement input
players[handler].change_dir(event)
event_queue = []
def update_simulation():
[player.update() for player in players.values()]
def broadcast_state():
# Send to all players 1) the whole game state, and 2) their own name,
# so players can draw themselves differently from the other players.
serialized_players = {p.name: p.box for p in players.values()}
for handler, player in players.items():
msg = {'borders': borders,
'pellets': pellets,
'myname': player.name,
'players': serialized_players}
handler.do_send(msg)
while 1:
loop_start = time.time()
apply_events()
update_simulation()
broadcast_state()
poll_for(TICK_DURATION - (time.time() - loop_start)) # poll until tick is over
| mit | 2,149,018,377,298,858,800 | 30.726619 | 82 | 0.547846 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.