metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jonbleiberg88/mayacal",
"score": 3
} |
#### File: mayacal/utils/haab.py
```python
__all__ = ["Haab", "HAAB_MONTHS", "HAAB_IDX_TO_MONTH", "HAAB_MONTH_TO_IDX"]
# Module level constants
HAAB_MONTHS = [
"Pop",
"Wo",
"Sip",
"Sotz",
"Sek",
"Xul",
"Yaxkin",
"Mol",
"Chen",
"Yax",
"Sak",
"Keh",
"Mak",
"Kankin",
"Muwan",
"Pax",
"Kayab",
"Kumku",
"Wayeb",
]
HAAB_IDX_TO_MONTH = {idx: month for idx, month in enumerate(HAAB_MONTHS)}
HAAB_MONTH_TO_IDX = {month: idx for idx, month in HAAB_IDX_TO_MONTH.items()}
class Haab:
"""Represents a month number, month name combination in the 365 day count
Attributes:
day_number (int): The day number associated with the Haab date
day_name (str): The day name associated with the Haab date
"""
def __init__(self, month_number=None, month_name=None):
"""Creates a new Haab object
Can either be constructed from a day name, day number combination or
from the position in the tzolkin count counting from 1 Imix.
Args:
month_number (int): Integer from 0-19 (or 0-4 for Wayeb)
representing the month number
month_name (str): month name
"""
if month_name not in HAAB_MONTHS and month_name is not None:
raise ValueError(f"Invalid Haab month name {month_name}")
self.month_name = month_name
if month_name == "Wayeb":
if month_number not in list(range(5)):
raise ValueError(
"Invalid Haab month number, Wayeb number must be between 0 and 4"
)
elif month_number not in list(range(20)) and month_number is not None:
raise ValueError(
"Invalid Haab month number, must be an integer between 0 and 19 or NoneType"
)
self.month_number = month_number
if not self.has_missing():
self.haab_num = 20 * HAAB_MONTH_TO_IDX[month_name] + month_number
else:
self.haab_num = None
def has_missing(self):
"""Checks whether the month number or name is missing
Returns:
(bool): True if either the month number or month name is None, False
otherwise
"""
if self.month_name is None or self.month_number is None:
return True
return False
def reset_by_haab_num(self, new_num):
"""Set the Haab object to a new position by its 365 day count number
Note:
0 Pop is used as the reference 'Day 0' of the cycle
Args:
new_num (int): Integer from 0-365 representing new position in the
365 day count.
"""
self.month_name = HAAB_IDX_TO_MONTH[new_num // 20]
self.month_number = new_num % 20
return self
def add_days(self, num_days, in_place=False):
"""Adds days to the current Haab object
Args:
num_days (int): Number of days to add to the Haab object
in_place (bool): Whether to modify the existing object or return a
new object. Defaults to False.
Returns:
A new Haab object num_days ahead of the previous object
"""
new_num = (self.haab_num + num_days) % 365
if in_place:
self.haab_num = new_num
self.reset_by_haab_num(self.haab_num)
return self
else:
return Haab().reset_by_haab_num(new_num)
def match(self, date):
"""Checks for a potential match with another Haab object
A value of None is treated as matching any value, consistent with the use
of None to mark values for later inference.
Args:
date (Haab): The Haab object to check for a match with
Returns:
(bool): True if the month name and number match, with None as an
automatic match. False otherwise.
"""
name_same = self.__fuzzy_eq(self.month_name, date.month_name)
num_same = self.__fuzzy_eq(self.month_number, date.month_number)
if name_same and num_same:
return True
else:
return False
def to_dict(self):
"""Returns a JSON style dictionary representation
Returns:
(dict): Dictionary representation of the object ready for conversion
to JSON
"""
return {"month_number": self.month_number, "month_name": self.month_name}
def __fuzzy_eq(self, v1, v2):
"""Helper function for NoneType matching"""
if v1 == v2 or v1 is None or v2 is None:
return True
return False
def __eq__(self, date):
name_same = self.month_name == date.month_name
num_same = self.month_number == date.month_number
if name_same and num_same:
return True
else:
return False
def __sub__(self, date):
return abs(self.haab_num - date.haab_num)
def __repr__(self):
return f"{self.month_number} {self.month_name}"
```
#### File: mayacal/utils/mayadate.py
```python
from .calendar_round import CalendarRound
from .long_count import LongCount
from .tzolkin import Tzolkin
from .haab import Haab
from .utils import *
import logging
__all__ = ["Mayadate", "from_dict"]
class Mayadate:
"""Umbrella class to handle Maya calendar dates, conversions, and inference
Attributes:
long_count (LongCount): The Long Count representation of the date
calendar_round (CalendarRound): The Calendar Round position of the date
glyph_g (str): The Glyph G associated with the date
"""
def __init__(self, long_count=None, calendar_round=None, glyph_g=None):
"""Creates a new Mayadate object
Args:
long_count (LongCount): The Long Count representation of the date
calendar_round (CalendarRound): The Calendar Round position of the
date
glyph_g (str): The Glyph G associated with the date, e.g. "G3"
"""
if long_count is None:
self.long_count = LongCount(None, None, None, None, None)
self.glyph_g = glyph_g
else:
self.long_count = long_count
if long_count.winal is not None and long_count.kin is not None:
g = self.long_count.get_glyph_g()
if g != glyph_g and glyph_g is not None:
raise ValueError(
"Provided Glyph G does not match the Long Count date"
)
self.glyph_g = g
else:
self.glyph_g = glyph_g
if calendar_round is None:
if long_count is not None:
if not self.long_count.has_missing():
self.calendar_round = self.long_count.get_calendar_round()
else:
self.calendar_round = CalendarRound(None, None)
else:
self.calendar_round = calendar_round
def has_missing(self):
"""Checks whether the Mayadate object has missing values in any position
Returns:
(bool): True if any of the Long Count components (baktun, katun, ...)
or Calendar Round components are None. Otherwise returns False.
"""
return self.long_count.has_missing() or self.calendar_round.has_missing()
def add_days(self, num_days, in_place=False):
"""Adds num_days days (kin) to the current Mayadate object
Args:
num_days (int): The number of days to add
in_place (bool): If True, modify the existing Mayadate object, else
return a new Mayadate object. Defaults to False.
Returns:
(Mayadate): The Mayadate object num_days ahead of the current Mayadate
object's date.
"""
if in_place:
self.long_count = self.long_count.add_days(num_days)
self.calendar_round = self.calendar_round.add_days(num_days)
return self
else:
return Mayadate(
self.long_count.add_days(num_days),
self.calendar_round.add_days(num_days),
)
def infer_long_count_dates(self):
"""Finds Long Count dates that match the supplied information
Returns:
(list) A list of potential Long Count dates that match the supplied
portions of the Long Count and Calendar Round Dates
"""
if not self.long_count.has_missing():
return [self.long_count]
if not self.calendar_round.has_missing():
min_lc, max_lc = LongCount(0, 0, 0, 0, 0), LongCount(13, 19, 19, 17, 19)
poss_lc = self.calendar_round.get_long_count_possibilities(min_lc, max_lc)
poss_lc = [lc for lc in poss_lc if self.match(lc.get_mayadate())]
else:
poss_lc = self.__infer_lc_recursive(self.long_count.to_list(), [])
if poss_lc == []:
logging.info("No matching dates found - check the inputted values")
return poss_lc
def infer_mayadates(self):
"""Finds Maya calendar dates that match the supplied information
Returns:
(list) A list of potential Mayadate objects that match the supplied
portions of the Long Count and Calendar Round Dates
"""
if not self.long_count.has_missing():
return [self.long_count.get_mayadate()]
lcs = self.infer_long_count_dates()
if lcs == []:
logging.info("No matching dates found - check the inputted values")
return [lc.get_mayadate() for lc in lcs]
def __infer_lc_recursive(self, lc, poss_dates):
"""Helper function to recursively check for possible dates"""
if None not in lc:
lc_obj = LongCount(*lc)
if self.calendar_round.match(lc_obj.get_calendar_round()):
if self.glyph_g is not None:
if lc_obj.get_glyph_g() == self.glyph_g:
return lc_obj
else:
return lc_obj
return
max_vals = [14, 20, 20, 18, 20]
for idx, v in enumerate(zip(lc, max_vals)):
val, max = v
if val is None:
for i in range(max):
lc_test = lc[:]
lc_test[idx] = i
res = self.__infer_lc_recursive(lc_test, poss_dates)
if type(res) is LongCount:
poss_dates.append(res)
break
return poss_dates
def to_julian_day(self, correlation=584283):
"""Converts the Mayan calendar date to its corresponding Julian Day number
By default uses the correlation constant 584,283 proposed by Thompson.
Args:
correlation (int): The correlation constant to use in the conversion.
Defaults to 584283.
Returns:
(int): The Julian Day number associated with the Mayan calendar date
"""
return self.long_count.to_julian_day(correlation)
def to_julian(self, correlation=584283):
"""Converts the Mayan calendar date to its corresponding Julian calendar date
By default uses the correlation constant 584,283 proposed by Thompson.
Args:
correlation (int): The correlation constant to use in the conversion.
Defaults to 584283.
Returns:
(JulianDate): The Julian calendar date associated with the Mayan
calendar date
"""
return self.long_count.to_julian(correlation)
def to_gregorian(self, correlation=584283):
"""Converts the Mayan calendar date to its corresponding Gregorian calendar date
By default uses the correlation constant 584,283 proposed by Thompson.
Args:
correlation (int): The correlation constant to use in the conversion.
Defaults to 584283.
Returns:
(int): The Gregorian calendar date associated with the Mayan
calendar date
"""
return self.long_count.to_gregorian(correlation)
def get_total_kin(self):
"""Returns the total number of kin since the initial date 0.0.0.0.0
Returns:
(int): The number of kin since the initial date of the Mayan calendar.
"""
return self.long_count.get_total_kin()
def get_glyph_g(self):
"""Calculates the number of the Glyph G associated with the Long Count date
Returns:
(str): The Glyph G associated with the given date e.g. "G6"
"""
return self.long_count.get_glyph_g()
def to_dict(self):
"""Returns a JSON style dictionary representation
Output dictionary will be in format:
{
'long_count' : {
'baktun' : 9,
'katun' : 0,
'tun' : 0,
'winal' : 0,
'kin' : 0
},
'calendar_round' : {
'tzolkin' : {
'day_number' : 8,
'day_name' : "Ajaw"
},
'haab' : {
'month_number' : 13,
'month_name' : "Keh"
}
},
'glyph_g' : 'G9'
}
Missing values will be replaced with None.
Returns:
(dict): Dictionary representation of the object ready for conversion
to JSON
"""
date_dict = {
"long_count": self.long_count.to_dict(),
"calendar_round": self.calendar_round.to_dict(),
"glyph_g": self.glyph_g,
}
return date_dict
def match(self, date):
"""Checks for a potential match with another Mayadate object
A value of None is treated as matching any value, consistent with the use
of None to mark values for later inference.
Args:
date (Mayadate): The Mayadate object to check for a match with
Returns:
(bool): True if the all entries match, with None as an
automatic match. False otherwise.
"""
lc_match = self.long_count.match(date.long_count)
cr_match = self.calendar_round.match(date.calendar_round)
g_match = self.__fuzzy_eq(self.glyph_g, date.glyph_g)
if lc_match and cr_match and g_match:
return True
return False
def __fuzzy_eq(self, v1, v2):
"""Helper function for NoneType matching"""
if v1 == v2 or v1 is None or v2 is None:
return True
return False
def __add__(self, dist):
lc = self.long_count + dist.long_count
return lc.get_mayadate()
def __sub__(self, dist):
dist = self.long_count - dist.long_count
return dist
def __eq__(self, date):
if self.get_total_kin() == date.get_total_kin():
return True
else:
return False
def __gt__(self, date):
if self.get_total_kin() > date.get_total_kin():
return True
else:
return False
def __ge__(self, date):
if self.get_total_kin() >= date.get_total_kin():
return True
else:
return False
def __lt__(self, date):
if self.get_total_kin() < date.get_total_kin():
return True
else:
return False
def __le__(self, date):
if self.get_total_kin() <= date.get_total_kin():
return True
else:
return False
def __repr__(self):
return f"{self.long_count.__repr__()} {self.calendar_round.__repr__()}"
def from_dict(dict_obj):
"""Converts dictionary to Mayadate object
Mainly intended for use with JSON objects.
Dictionary must be in format:
{
'long_count' : {
'baktun' : 9,
'katun' : 0,
'tun' : 0,
'winal' : 0,
'kin' : 0
},
'calendar_round' : {
'tzolkin' : {
'day_number' : 8,
'day_name' : "Ajaw"
},
'haab' : {
'month_number' : 13,
'month_name' : "Keh"
}
},
'glyph_g' : 'G9'
}
Missing values should be replaced with None.
Args:
dict_obj (dict): Dictionary in above format to convert
Returns:
(Mayadate): the corresponding Mayadate object
"""
lc_dict = _none_to_dict(dict_obj.get("long_count", {}))
cr_dict = _none_to_dict(dict_obj.get("calendar_round", {}))
tz_dict = _none_to_dict(cr_dict.get("tzolkin", {}))
hb_dict = _none_to_dict(cr_dict.get("haab", {}))
glyph_g = dict_obj.get("glyph_g")
return Mayadate(
long_count=LongCount(**lc_dict),
calendar_round=CalendarRound(Tzolkin(**tz_dict), Haab(**hb_dict)),
glyph_g=glyph_g,
)
def _none_to_dict(obj):
if type(obj) is dict:
return obj
elif obj is None:
return {}
else:
raise ValueError("Dictionary not properly formatted - see documentation")
def main():
pass
if __name__ == "__main__":
main()
```
#### File: mayacal/utils/utils.py
```python
import math
import datetime
__all__ = [
"JulianDate",
"GregorianDate",
"julian_day_to_julian",
"julian_day_to_gregorian",
"datetime_to_gregorian",
"datetime_to_julian",
"datetime_to_julian_day",
"datetime_to_mayadate",
]
class JulianDate:
"""Basic class to handle (proleptic) Julian calendar dates and conversions
Note that this class uses the astronomical year convention for years before 1 CE,
i.e. 1 BCE = 0, 2 BCE = -1, etc.
Attributes:
day (int): The day of the Julian calendar date
month (int): The month number of the Julian calendar date
year (int): The (astronomical) year number of the Julian calendar date
"""
def __init__(self, day, month, year):
"""Creates a new JulianDate object
Note that this class uses the astronomical year convention for years before 1 CE,
i.e. 1 BCE = 0, 2 BCE = -1, etc.
Args:
day (int): The day of the Julian calendar date
month (int): The month number of the Julian calendar date
year (int): The (astronomical) year number of the Julian calendar date
"""
if day > 31 or day < 1:
raise ValueError("Invalid day, must be integer between 1 and 31")
self.day = day
if month > 12 or month < 1:
raise ValueError("Invalid month, must be integer between 1 and 12")
self.month = month
self.year = year
self.__check_month_days()
def to_julian_day(self):
"""Converts the Julian Calendar date to its corresponding Julian Day number
Note that the algorithm is only valid for Julian Day numbers greater than or
equal to zero, i.e. Julian calendar years after -4712 (4713 BCE). Earlier
calendar years will raise a ValueError.
Adapted from: https://www.researchgate.net/publication/316558298_Date_Algorithms#pf5
Returns:
(int): The Julian Day number corresponding to the Julian Calendar date
"""
if self.year < -4712:
raise ValueError(
"Algorithm only valid for Julian year greater than or equal to -4712"
)
if self.month < 3:
M = self.month + 12
Y = self.year - 1
else:
M = self.month
Y = self.year
D = self.day
return D + (153 * M - 457) // 5 + 365 * Y + math.floor(Y / 4) + 1721116.5
def to_gregorian(self, as_datetime=False):
"""Converts the Julian calendar date to its Gregorian calendar equivalent
Returns:
(GregorianDate): The Gregorian calendar date corresponding to the
Julian calendar date.
"""
return julian_day_to_gregorian(math.ceil(self.to_julian_day()))
def to_mayadate(self, correlation=584283):
"""Converts the Julian calendar date to its Mayan calendar equivalent
Returns:
(Mayadate): The Mayan calendar date corresponding to the Gregorian
calendar date.
"""
from .long_count import LongCount, kin_to_long_count
from .mayadate import Mayadate
num_kin = math.ceil(self.to_julian_day()) - correlation
long_count = kin_to_long_count(num_kin)
return Mayadate(long_count, None)
def is_leap_year(self):
"""Determines whether the year of the JulianDate object is a leap year
Returns:
(bool): True if the year is a leap year, False otherwise
"""
if self.year % 4 == 0:
return True
return False
def __check_month_days(self):
"""Raises error if the current configuration of month, day, year is invalid"""
max_days = {
1: 31,
2: 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31,
}
if self.is_leap_year():
max_days[2] = 29
if max_days[self.month] < self.day:
raise ValueError(f"Invalid day, month combination {self.month}/{self.day}")
def __eq__(self, other):
return (
self.day == other.day
and self.month == other.month
and self.year == other.year
)
def __repr__(self):
return f"({self.day}, {self.month}, {self.year})"
def __str__(self):
if self.year > 0:
return f"{_num_to_month(self.month)} {self.day}, {self.year} CE"
elif self.year <= 0:
return f"{_num_to_month(self.month)} {self.day}, {abs(self.year) + 1} BCE"
class GregorianDate:
"""Basic class to handle (proleptic) Gregorian calendar dates and conversions
Note that this class uses the astronomical year convention for years before 1 CE,
i.e. 1 BCE = 0, 2 BCE = -1, etc.
Attributes:
day (int): The day of the Gregorian calendar date
month (int): The month number of the Gregorian calendar date
year (int): The (astronomical) year number of the Gregorian calendar date
"""
def __init__(self, day, month, year):
"""Creates a new GregorianDate object
Note that this class uses the astronomical year convention for years before 1 CE,
i.e. 1 BCE = 0, 2 BCE = -1, etc.
Args:
day (int): The day of the Gregorian calendar date
month (int): The month number of the Gregorian calendar date
year (int): The (astronomical) year number of the Gregorian calendar date
"""
if day > 31 or day < 1:
raise ValueError("Invalid day, must be integer between 1 and 31")
self.day = day
if month > 12 or month < 1:
raise ValueError("Invalid month, must be integer between 1 and 12")
self.month = month
self.year = year
self.__check_month_days()
def to_julian_day(self):
"""Converts the Gregorian calendar date to its Julian Day number equivalent
Adapted from: https://www.researchgate.net/publication/316558298_Date_Algorithms#pf5
Returns:
(float): The Julian day number corresponding to the Gregorian calendar
date.
"""
if self.month < 3:
M = self.month + 12
Y = self.year - 1
else:
M = self.month
Y = self.year
D = self.day
return (
D
+ (153 * M - 457) // 5
+ 365 * Y
+ math.floor(Y / 4)
- math.floor(Y / 100)
+ math.floor(Y / 400)
+ 1721118.5
)
def to_julian(self):
"""Converts the Gregorian calendar date to its Julian calendar equivalent
Returns:
(JulianDate): The Julian calendar date corresponding to the Gregorian
calendar date.
"""
return julian_day_to_julian(math.ceil(self.to_julian_day()))
def to_mayadate(self, correlation=584283):
"""Converts the Gregorian calendar date to its Mayan calendar equivalent
Returns:
(Mayadate): The Mayan calendar date corresponding to the Gregorian
calendar date.
"""
from .long_count import LongCount, kin_to_long_count
from .mayadate import Mayadate
num_kin = math.ceil(self.to_julian_day()) - correlation
long_count = kin_to_long_count(num_kin)
return Mayadate(long_count, None)
def to_datetime(self):
"""Converts the GregorianDate object to a datetime.date object
Note that datetime.date objects do not support years before 1 CE. Attempting
to convert GregorianDate objects with year before 1 CE will raise a ValueError.
Returns:
(datetime.date): The datetime.date object corresponding to the Gregorian
calendar date.
"""
if self.year < 1:
raise ValueError("datetime.date objects do not support years before 1 CE")
return datetime.date(self.year, self.month, self.day)
def is_leap_year(self):
"""Determines whether the year of the GregorianDate object is a leap year
Returns:
(bool): True if the year is a leap year, False otherwise
"""
if self.year % 4 == 0:
if self.year % 100 == 0 and self.year % 400 != 0:
return False
else:
return True
return False
def __check_month_days(self):
"""Raises error if the current configuration of month, day, year is invalid"""
max_days = {
1: 31,
2: 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31,
}
if self.is_leap_year():
max_days[2] = 29
if max_days[self.month] < self.day:
raise ValueError(f"Invalid day, month combination {self.month}/{self.day}")
def __eq__(self, other):
return (
self.day == other.day
and self.month == other.month
and self.year == other.year
)
def __repr__(self):
return f"({self.day}, {self.month}, {self.year})"
def __str__(self):
if self.year > 0:
return f"{_num_to_month(self.month)} {self.day}, {self.year} CE"
elif self.year <= 0:
return f"{_num_to_month(self.month)} {self.day}, {abs(self.year) + 1} BCE"
def _convert_julian_day(julian_day, mode="julian"):
"""Converts a Julian Day number to its (proleptic) Julian or Gregorian calendar equivalent
Adapted from: https://en.wikipedia.org/wiki/Julian_day#Julian_or_Gregorian_calendar_from_Julian_day_number
Note that the algorithm is only valid for Julian Day numbers greater than or
equal to zero. Negative arguments for julian_day will raise a ValueError.
Args:
julian_day (int): Julian Day number to convert, must be greater than or
equal to 0
mode (str): The target calendar to convert to, either 'julian' or
'gregorian'. Defaults to 'julian'.
Returns:
A (day, month, year) tuple representing the day, month, and year in the
target calendar.
"""
if julian_day < 0:
raise ValueError(
"Algorithm only valid for Julian Day greater than or equal to zero"
)
julian_day = math.ceil(julian_day)
# algorithm parameters
y = 4716
j = 1401
m = 2
n = 12
r = 4
p = 1461
v = 3
u = 5
s = 153
w = 2
B = 274277
C = -38
# intermediate calculations
if mode == "julian":
f = julian_day + j
elif mode == "gregorian":
f = julian_day + j + (((4 * julian_day + B) // 146097) * 3) // 4 + C
else:
raise ValueError("Unrecognized mode - supports 'julian' or 'gregorian'")
e = r * f + v
g = (e % p) // r
h = u * g + w
day = (h % s) // u + 1 # day in target calendar
month = ((h // s + m) % n) + 1 # month in target calendar
year = (e // p) - y + ((n + m - month) // n) # year in target calendar
return day, month, year
def julian_day_to_julian(julian_day):
"""Converts a Julian Day number to its (proleptic) Julian calendar equivalent
Adapted from: https://en.wikipedia.org/wiki/Julian_day#Julian_or_Gregorian_calendar_from_Julian_day_number
Note that the algorithm is only valid for Julian Day numbers greater than or
equal to zero. Negative arguments for julian_day will raise a ValueError.
Args:
julian_day (int): Julian Day number to convert, must be greater than or
equal to 0
Returns:
A (day, month, year) tuple representing the day, month, and year in the
Julian calendar.
"""
day, month, year = _convert_julian_day(julian_day, mode="julian")
return JulianDate(day, month, year)
def julian_day_to_gregorian(julian_day):
"""Converts a Julian Day number to its (proleptic) Gregorian calendar equivalent
Adapted from: https://en.wikipedia.org/wiki/Julian_day#Julian_or_Gregorian_calendar_from_Julian_day_number
Note that the algorithm is only valid for Julian Day numbers greater than or
equal to zero. Negative arguments for julian_day will raise a ValueError.
Args:
julian_day (int): Julian Day number to convert, must be greater than or
equal to 0
Returns:
A (day, month, year) tuple representing the day, month, and year in the
Gregorian calendar.
"""
day, month, year = _convert_julian_day(julian_day, mode="gregorian")
return GregorianDate(day, month, year)
def datetime_to_gregorian(date):
"""Converts a datetime.date object to a GregorianDate object
Args:
date (datetime.date): The datetime.date object to convert
Returns:
(GregorianDate): The corresponding GregorianDate object
"""
return GregorianDate(date.day, date.month, date.year)
def datetime_to_julian(date):
"""Converts a datetime.date object to the corresponding Julian calendar date
Args:
date (datetime.date): The datetime.date object to convert
Returns:
(JulianDate): The corresponding Julian calendar date
"""
g = GregorianDate(date.day, date.month, date.year)
return g.to_julian()
def datetime_to_julian_day(date):
"""Converts a datetime.date object to the corresponding Julian Day number
Args:
date (datetime.date): The datetime.date object to convert
Returns:
(float): The corresponding Julian Day number
"""
g = GregorianDate(date.day, date.month, date.year)
return g.to_julian_day()
def datetime_to_mayadate(date):
"""Converts a datetime.date object to the corresponding Maya calendar date
Args:
date (datetime.date): The datetime.date object to convert
Returns:
(Mayadate): The corresponding Mayan calendar date.
"""
g = GregorianDate(date.day, date.month, date.year)
return g.to_mayadate()
def _num_to_month(num):
"""Helper function to convert month number to short name
Args:
num (int): the month number to convert
Returns:
(str): The three letter short name of the corresponding month
"""
return {
1: "Jan",
2: "Feb",
3: "Mar",
4: "Apr",
5: "May",
6: "Jun",
7: "Jul",
8: "Aug",
9: "Sep",
10: "Oct",
11: "Nov",
12: "Dec",
}[num]
``` |
{
"source": "jonbleiberg88/mayacal_web",
"score": 3
} |
#### File: jonbleiberg88/mayacal_web/app.py
```python
import sys
from flask import Flask, request, jsonify, render_template, session
from flask_session import Session
import mayacal as mc
app = Flask(__name__)
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
api_route = '/api/v1/'
@app.route('/')
def index():
session.clear()
return render_template("index.html")
@app.route('/initial_series', methods=['GET', 'POST', 'PUT'])
def initial_series():
if request.method == "POST":
req_json = request.get_json()
# try:
#TODO fix server response apostophes
initial_series = json_to_mayadate(req_json)
session["initial_series"] = initial_series
response_dict = {
'success' : True,
'message' : "Initial series successfully posted"}
return jsonify(response_dict)
# except:
# response_dict = {
# 'success' : False,
# 'message' : "Could not parse given initial series"}
# return jsonify(response_dict)
return
@app.route('/distance_number', methods=['GET', 'POST', 'PUT'])
def distance_number():
if request.method != "GET":
req_json = request.get_json()
row = req_json["row"]
initial_series = session.get("initial_series")
if session.get("distance_numbers") is None:
session["distance_numbers"] = []
dn = json_to_distance_number(req_json["distance_number"])
if request.method == 'PUT':
if row - 1 < len(session["distance_numbers"]):
session["distance_numbers"][row-1] = dn
else:
session["distance_numbers"].append(dn)
elif request.method == 'POST':
session["distance_numbers"].insert(row-1, dn)
else:
return jsonify({
'success' : False,
'message' : "Unrecognized request type"
})
dns = session["distance_numbers"]
response_dict = {
'success' : True,
'resulting_dates' : []
}
current_lc = initial_series.long_count
for idx, dn in enumerate(dns):
current_lc = current_lc + dn
response_dict['resulting_dates'].append({
'row' : idx + 1,
'date' : process_date_dict(current_lc.get_mayadate().to_dict())
})
return jsonify(response_dict)
@app.route(f'{api_route}infer', methods=['POST'])
def infer():
req_json = request.get_json()
print(req_json, file=sys.stderr)
# try:
date = json_to_mayadate(req_json)
poss_dates = date.infer_mayadates()
poss_dates = [process_date_dict(d.to_dict()) for d in poss_dates]
response_dict = {
'success' : True,
'data' : {'poss_dates' : poss_dates}}
#
# except:
# response_dict = {
# 'success' : False,
# 'message' : "Unable to infer date components"
# }
return jsonify(response_dict)
@app.route(f'{api_route}convert/from_maya', methods=['POST'])
def convert_from_maya():
req_json = request.get_json()
print(req_json, file=sys.stderr)
try:
date_dict = req_json.get('date')
correlation = req_json.get('correlation')
mode = req_json.get('mode')
date = mc.mayadate.from_dict(date_dict)
response_dict = {
'success' : True,
'correlation' : correlation
}
if mode == 'julian':
jd = date.to_julian(correlation=correlation)
response_dict['date'] = {
'day' : jd.day,
'month' : jd.month,
'year': jd.year
}
elif mode == 'gregorian':
gd = date.to_gregorian(correlation=correlation)
response_dict['date'] = {
'day' : gd.day,
'month' : gd.month,
'year': gd.year
}
elif mode == 'julian_day':
jdn = date.to_julian_day(correlation=correlation)
response_dict['date'] = {
'day_number' : jdn
}
else:
response_dict['success'] = False
response_dict['message'] = f"Invalid mode {mode} - must be one of 'julian', 'gregorian', or 'julian_day'"
except:
response_dict = {
'success' : False,
'message' : 'Could not convert the given date'
}
return jsonify(response_dict)
@app.route(f'{api_route}convert/batch/from_maya', methods=['POST'])
def batch_convert_from_maya():
req_json = request.get_json()
print(req_json, file=sys.stderr)
# try:
correlation = req_json.get('correlation')
mode = req_json.get('mode')
response_dict = {
'success' : True,
'correlation' : correlation,
'mode' : mode,
'dates' : []
}
date_dicts = req_json.get('dates')
for date_dict in date_dicts:
date = mc.mayadate.from_dict(remove_apostrophes(date_dict))
if mode == 'julian':
jd = date.to_julian(correlation=correlation)
response_dict['dates'].append({
'day' : jd.day,
'month' : jd.month,
'year': jd.year
})
elif mode == 'gregorian':
gd = date.to_gregorian(correlation=correlation)
response_dict['dates'].append({
'day' : gd.day,
'month' : gd.month,
'year': gd.year
})
elif mode == 'julian_day':
jdn = date.to_julian_day(correlation=correlation)
response_dict['dates'].append({
'day_number' : jdn
})
else:
response_dict['success'] = False
response_dict['message'] = f"Invalid mode {mode} - must be one of 'julian', 'gregorian', or 'julian_day'"
# except:
# response_dict = {
# 'success' : False,
# 'message' : 'Could not convert the given date'
# }
return jsonify(response_dict)
@app.route(f'{api_route}convert/to_maya', methods=['POST'])
def convert_to_maya():
return
def json_to_mayadate(json_obj):
#TODO fix server response apostophes
day_name = json_obj['calendar_round']['tzolkin']['day_name']
if day_name is not None:
json_obj['calendar_round']['tzolkin']['day_name'] = day_name.replace("'","")
month_name = json_obj['calendar_round']['haab']['month_name']
if month_name is not None:
json_obj['calendar_round']['haab']['month_name'] = month_name.replace("'","")
date = mc.mayadate.from_dict(json_obj)
return date
def json_to_distance_number(json_obj):
sign = json_obj['sign']
baktun = json_obj['baktun']
katun = json_obj['katun']
tun = json_obj['tun']
winal = json_obj['winal']
kin = json_obj['kin']
lc = mc.LongCount(baktun, katun, tun, winal, kin)
return mc.DistanceNumber(lc, sign)
def process_date_dict(date_dict):
date_dict["calendar_round"]["tzolkin"]["day_name"] = tz_add_apostrophes(date_dict["calendar_round"]["tzolkin"]["day_name"])
date_dict["calendar_round"]["haab"]["month_name"] = hb_add_apostrophes(date_dict["calendar_round"]["haab"]["month_name"])
return date_dict
def tz_add_apostrophes(day_name):
apost_dict = {k:k for k in mc.TZOLKIN_DAYS}
apost_dict["Ik"] = "Ik'"
apost_dict["Akbal"] = "Ak'bal"
apost_dict["Kan"] = "K'an"
apost_dict["Manik"] = "Manik'"
apost_dict["Etznab"] = "Etz'nab"
return apost_dict[day_name]
def hb_add_apostrophes(month_name):
apost_dict = {k:k for k in mc.HAAB_MONTHS}
apost_dict["Sotz"] = "Sotz'"
apost_dict["Yaxkin"] = "Yaxk'in"
apost_dict["Chen"] = "Ch'en"
apost_dict["Kankin"] = "Kank'in"
apost_dict["Kayab"] = "K'ayab"
apost_dict["Kumku"] = "Kumk'u"
return apost_dict[month_name]
def remove_apostrophes(date_dict):
date_dict["calendar_round"]["tzolkin"]["day_name"] = date_dict["calendar_round"]["tzolkin"]["day_name"].replace("'","")
date_dict["calendar_round"]["haab"]["month_name"] = date_dict["calendar_round"]["haab"]["month_name"].replace("'","")
return date_dict
``` |
{
"source": "jonblower/python-viz-intro",
"score": 4
} |
#### File: python-viz-intro/Lesson 1. Introduction to Python/example.py
```python
temperature = 5.0 # a floating-point (decimal) number
numberOfLegs = 2 # an integer (whole number)
name = "Jon" # a string
# Conditionals
if temperature < 0.0:
print "It's freezing!"
elif temperature > 30.0:
print "It's hot! (If you're British)"
else:
print "Not too hot, not too cold"
# Note: there is no "end if"
# Loops
print "Here are the numbers from 0 to 9:"
for i in range(10):
print i
print "Here's another (longer) way to print the same thing"
i = 0
while i < 10:
print i
i = i + 1
# Define a function that constrains a value of longitude (in degrees) to be in the range
# [-180:180]
def lon180(lon):
lon = lon % 360 # The % symbol means "modulo"
if lon > 180:
return lon - 360
else:
return lon
# Here's a function that tests the above routine. It calls lon180 and checks
# the answer is as expected
def testLon180(lon, expected):
actual = lon180(lon)
# str(number) converts a number to a string
print "lon180(" + str(lon) + ") = " + str(actual) + ". Expected = " + str(expected)
# Here's another way to print the same information, using something like C's
# C's printf statement
#print "lon180(%f) = %f. Expected = %f" % (lon, actual, expected)
# Now test the function. You can probably think of lots more tests
testLon180(-180, 180)
testLon180(360, 0)
testLon180(-190, 170)
``` |
{
"source": "jonbmartin/sigpy_rf",
"score": 3
} |
#### File: mri/rf/b1sel.py
```python
import numpy as np
from sigpy.mri.rf import slr as slr
from sigpy.mri.rf.util import dinf, b12wbs, calc_kbs
from scipy.interpolate import interp1d
__all__ = ['dz_bssel_rf', 'bssel_bs', 'dz_b1_rf', 'bssel_ex_slr',
'dz_b1_gslider_rf', 'dz_b1_hadamard_rf']
def dz_bssel_rf(dt=2e-6, tb=4, short_rat=1, ndes=128, ptype='ex', flip=np.pi/4,
pbw=0.25, pbc=[1], d1e=0.01, d2e=0.01,
rampfilt=True, bs_offset=20000,
fa_correct=True,):
"""Design a math:`B_1^{+}`-selective pulse following J Martin's
Bloch Siegert method.
Args:
dt (float): hardware sampling dwell time in s.
tb (int): time-bandwidth product.
short_rat (float): ratio of duration of desired pulse to duration
required by nyquist. Can shorten pulse at expense of profile.
ndes (int): number of taps in filter design.
ptype (string): pulse type, 'st' (small-tip excitation), 'ex' (pi/2
excitation pulse), 'se' (spin-echo pulse), 'inv' (inversion), or
'sat' (pi/2 saturation pulse).
flip (float): flip angle, in radians. Only required for ptype 'st',
implied for other ptypes.
pbw (float): width of passband in Gauss.
pbc (list of floats): center of passband(s) in Gauss.
d1e (float): passband ripple level in :math:`M_0^{-1}`.
d2e (float): stopband ripple level in :math:`M_0^{-1}`.
rampfilt (bool): option to directly design the modulated filter, to
compensate b1 variation across a slice profile.
bs_offset (float): (Hz) constant offset during pulse.
fa_correct (bool): option to apply empirical flip angle correction.
Returns:
3-element tuple containing
- **bsrf** (*array*): complex bloch-siegert gradient waveform.
- **rfp** (*array*): complex slice-selecting waveform.
- **rw** (*array*): complex bloch-siegert rewinder
References:
<NAME>., <NAME>., <NAME>., & <NAME>. (2021).
Bloch-Siegert |B 1+ |-Selective Excitation Pulses.
Proc. Intl. Soc. Magn. Reson. Med.
"""
beta = 0.5 # AM waveform parameter for fermi sweeps # JBM was 0.5
nsw = np.round(1250e-6 / dt) # number of time points in sweeps
kappa = np.arctan(2) # FM waveform parameter
# calculate bandwidth and pulse duration using lowest PBC of bands. Lower
# PBC's require a longer pulse, so lowest constrains our pulse length
upper_b1 = min(pbc) + pbw / 2
lower_b1 = min(pbc) - pbw / 2
# using Ramsey's BS shift equation pre- w_rf >> gam*b1 approximation
B = b12wbs(bs_offset, upper_b1) - b12wbs(bs_offset,lower_b1)
Tex = (tb / B) * short_rat # seconds, the entire pulse duration
# perform the design of the BS far off resonant pulse
bsrf, rw, phi_bs = bssel_bs(Tex, dt, bs_offset)
# design pulse for number of bands desired
if len(pbc) == 1:
rfp, phi_ex = bssel_ex_slr(Tex, dt, tb, ndes, ptype, flip, pbw, pbc[0],
d1e, d2e, rampfilt, bs_offset, fa_correct)
# repeat design for multiple bands of excitation
else:
rfp = np.zeros((1, np.int(np.ceil(Tex / dt / 2) * 2)), dtype=complex)
for ii in range(0, len(pbc)):
upper_b1 = pbc[ii] + pbw / 2
lower_b1 = pbc[ii] - pbw / 2
B_i = bs_offset * ((1 + (4258 * upper_b1) ** 2 / bs_offset ** 2) ** (
1 / 2) - 1) - \
bs_offset * ((1 + (4258 * lower_b1) ** 2 / bs_offset ** 2) ** (
1 / 2) - 1)
T_i = tb / B_i # seconds, the entire pulse duration
ex_subpulse = bssel_ex_slr(T_i, dt, tb, ndes, ptype, flip, pbw,
pbc[ii], d1e, d2e, rampfilt,
bs_offset)
# zero pad to match the length of the longest pulse
if ii > 0:
zpad = np.zeros((1, np.size(rfp)-np.size(ex_subpulse)))
zp1 = zpad[:, :np.size(zpad)//2]
zp2 = zpad[:, (np.size(zpad))//2:]
ex_subpulse = np.concatenate([zp1, ex_subpulse, zp2], axis=1)
rfp += ex_subpulse
# zero-pad it to the same length as bs
nsw = int(np.ceil((np.size(bsrf) - np.size(rfp))/2))
rfp = np.concatenate([np.zeros((1, np.int(nsw))), rfp], axis=1)
rfp = np.concatenate([rfp,np.zeros((1,np.size(bsrf)-np.size(rfp)))], axis=1)
# return the subpulses. User should superimpose bsrf and rfp if desired
return bsrf, rfp, rw
def bssel_bs(T, dt, bs_offset):
"""Design the Bloch-Siegert shift inducing component pulse for a
math:`B_1^{+}`-selective pulse following J Martin's Bloch Siegert method.
Args:
T (float): total pulse duration (s).
dt (float): hardware sampling dwell time (s).
bs_offset (float): constant offset during pulse (Hz).
Returns:
2-element tuple containing
- **bsrf** (*array*): complex BS pulse.
- **bsrf_rew** (*array*): FM waveform (radians/s).
References:
<NAME>., <NAME>., <NAME>., & <NAME>. (2021).
Bloch-Siegert |B 1+ |-Selective Excitation Pulses.
Proc. Intl. Soc. Magn. Reson. Med.
"""
a = 0.00006
Bth = 0.95
t0 = T/2 - a*np.log((1-Bth)/Bth)
T_full = 2*t0 +13.81 * a
t = np.arange(-T_full/2, T_full/2, dt)
bs_am = 1 / (1 + np.exp((np.abs(t)-t0)/a))
if np.mod(np.size(bs_am), 2) != 0:
bs_am = bs_am[:-1]
A_half = bs_am[0:int(np.size(bs_am)/2)]
gam = 4258
k = 0.2
t_v = np.arange(dt, T_full/2*dt+dt, dt)
om = (gam*A_half)/np.sqrt((1-(gam*A_half*abs(t_v))/k)**(-2)-1)
om -= np.max(abs(om))
om = np.expand_dims(om*1,0)
bs_fm = np.concatenate([-om, np.fliplr(-om)],axis=1) + bs_offset
kbs_bs = calc_kbs(bs_am, bs_fm, T)
bsrf = bs_am * np.exp(1j * dt * 2 * np.pi * np.cumsum(bs_fm))
bsrf = np.expand_dims(bsrf,0)
phi_bs = np.cumsum((4258*bs_am)**2/(2*bs_fm))
# Build an RF rewinder, same amplitude but shorter duration to produce -0.5
# the Kbs. Pull middle samples until duration matched
bs_am_rew = np.ndarray.tolist(np.squeeze(bs_am))
bs_fm_rew = np.ndarray.tolist(np.squeeze(-bs_fm))
kbs_rw = -kbs_bs
while abs(kbs_rw) > 0.5 * abs(kbs_bs):
mid = len(bs_am_rew)//2
bs_am_rew = bs_am_rew[:mid] + bs_am_rew[mid+1:]
bs_fm_rew = bs_fm_rew[:mid] + bs_fm_rew[mid+1:]
kbs_rw = calc_kbs(bs_am_rew, bs_fm_rew, len(bs_am_rew)*dt)
# adjust amplitude to precisely give correct Kbs
bs_am_rew = np.array(bs_am_rew) * np.sqrt(abs(kbs_bs/(2*kbs_rw)))
kbs_rw = calc_kbs(bs_am_rew, bs_fm_rew, len(bs_am_rew) * dt)
bsrf_rew = np.array(bs_am_rew) * np.exp(1j * dt * 2 * np.pi * np.cumsum(np.array(bs_fm_rew)))
print('RW kbs = {}'.format(kbs_rw))
return bsrf, bsrf_rew, phi_bs
def bssel_ex_slr(T, dt=2e-6, tb=4, ndes=128, ptype='ex', flip=np.pi/2,
pbw=0.25, pbc=1, d1e=0.01, d2e=0.01, rampfilt=True,
bs_offset=20000, fa_correct=True):
n = np.int(np.ceil(T / dt / 2) * 2) # samples in final pulse, force even
if not rampfilt:
# straightforward SLR design, no ramp
rfp = slr.dzrf(ndes, tb, ptype, 'ls', d1e, d2e)
rfp = np.expand_dims(rfp, 0)
else:
# perform a filtered design that compensates the b1 variation across
# the slice. Here, calc parameter relations
bsf, d1, d2 = slr.calc_ripples(ptype, d1e, d2e)
# create a beta that corresponds to a ramp
b = slr.dz_ramp_beta(ndes, T, ptype, pbc, pbw, bs_offset, tb, d1, d2, dt)
if ptype == 'st':
rfp = b
else:
# inverse SLR transform to get the pulse
b = bsf * b
rfp = slr.b2rf(np.squeeze(b))
rfp = np.expand_dims(rfp, 0)
# interpolate to target dwell time
rfinterp = interp1d(np.linspace(-T / 2, T / 2, ndes), rfp, kind='cubic')
trf = np.linspace(-T / 2, T / 2, n)
rfp = rfinterp(trf)
rfp = rfp * ndes / n
# scale for desired flip if ptype 'st'
if ptype == 'st':
rfp = rfp / np.sum(rfp) * flip / (2 * np.pi * 4258 * dt) # gauss
else: # rf is already in radians in other cases
rfp = rfp / (2 * np.pi * 4258 * dt)
# slice select modulation is middle of upper and lower b1
upper_b1 = pbc + pbw / 2
lower_b1 = pbc - pbw / 2
rfp_modulation = 0.5*(b12wbs(bs_offset, upper_b1) + b12wbs(bs_offset, lower_b1))
print(f'SS modulation = {rfp_modulation} Hz')
# empirical correction factor for scaling
if fa_correct:
scalefact = pbc*(0.3323*np.exp(-0.9655*(rfp_modulation/bs_offset))
+ 0.6821*np.exp(-0.02331*(rfp_modulation/bs_offset)))
rfp = rfp / scalefact
else:
rfp = rfp / pbc
# modulate RF to be centered at the passband. complex modulation => 1 band!
t = np.linspace(- np.int(T / dt / 2), np.int(T / dt / 2), np.size(rfp))
rfp = rfp * np.exp(-1j * 2 * np.pi * rfp_modulation * t * dt)
phi_bs = np.cumsum((4258*np.real(rfp))**2/(2*rfp_modulation))
return rfp, phi_bs
def dz_b1_rf(dt=2e-6, tb=4, ptype='st', flip=np.pi / 6, pbw=0.3,
pbc=2, d1=0.01, d2=0.01, os=8, split_and_reflect=True):
"""Design a :math:`B_1^{+}`-selective excitation pulse following Grissom \
JMR 2014
Args:
dt (float): hardware sampling dwell time in s.
tb (int): time-bandwidth product.
ptype (string): pulse type, 'st' (small-tip excitation), 'ex' (pi/2
excitation pulse), 'se' (spin-echo pulse), 'inv' (inversion), or
'sat' (pi/2 saturation pulse).
flip (float): flip angle, in radians.
pbw (float): width of passband in Gauss.
pbc (float): center of passband in Gauss.
d1 (float): passband ripple level in :math:`M_0^{-1}`.
d2 (float): stopband ripple level in :math:`M_0^{-1}`.
os (int): matrix scaling factor.
split_and_reflect (bool): option to split and reflect designed pulse.
Split-and-reflect preserves pulse selectivity when scaled to excite large
tip-angles.
Returns:
2-element tuple containing
- **om1** (*array*): AM waveform.
- **dom** (*array*): FM waveform (radians/s).
References:
<NAME>., <NAME>., & <NAME>. (2014).
:math:`B_1^{+}`-selective excitation pulse design using the Shinnar-Le
Roux algorithm. Journal of Magnetic Resonance, 242, 189-196.
"""
# calculate beta filter ripple
[_, d1, d2] = slr.calc_ripples(ptype, d1, d2)
# calculate pulse duration
b = 4257 * pbw
pulse_len = tb / b
# calculate number of samples in pulse
n = np.int(np.ceil(pulse_len / dt / 2) * 2)
if pbc == 0:
# we want passband as close to zero as possible.
# do my own dual-band filter design to minimize interaction
# between the left and right bands
# build system matrix
A = np.exp(1j * 2 * np.pi *
np.outer(np.arange(-n * os / 2, n * os / 2),
np.arange(-n / 2, n / 2)) / (n * os))
# build target pattern
ii = np.arange(-n * os / 2, n * os / 2) / (n * os) * 2
w = dinf(d1, d2) / tb
f = np.asarray([0, (1 - w) * (tb / 2),
(1 + w) * (tb / 2),
n / 2]) / (n / 2)
d = np.double(np.abs(ii) < f[1])
ds = np.double(np.abs(ii) > f[2])
# shift the target pattern to minimum center position
pbc = np.int(np.ceil((f[2] - f[1]) * n * os / 2 + f[1] * n * os / 2))
dl = np.roll(d, pbc)
dr = np.roll(d, -pbc)
dsl = np.roll(ds, pbc)
dsr = np.roll(ds, -pbc)
# build error weight vector
w = dl + dr + d1 / d2 * np.multiply(dsl, dsr)
# solve for the dual-band filter
AtA = A.conj().T @ np.multiply(np.reshape(w, (np.size(w), 1)), A)
Atd = A.conj().T @ np.multiply(w, dr - dl)
h = np.imag(np.linalg.pinv(AtA) @ Atd)
else: # normal design
# design filter
h = slr.dzls(n, tb, d1, d2)
# dual-band-modulate the filter
om = 2 * np.pi * 4257 * pbc # modulation frequency
t = np.arange(0, n) * pulse_len / n - pulse_len / 2
h = 2 * h * np.sin(om * t)
if split_and_reflect:
# split and flip fm waveform to improve large-tip accuracy
dom = np.concatenate((h[n // 2::-1], h, h[n:n // 2:-1])) / 2
else:
dom = np.concatenate((0 * h[n // 2::-1], h, 0 * h[n:n // 2:-1]))
# scale to target flip, convert to Hz
dom = dom * flip / (2 * np.pi * dt)
# build am waveform
om1 = np.concatenate((-np.ones(n // 2), np.ones(n), -np.ones(n // 2)))
return om1, dom
def dz_b1_gslider_rf(dt=2e-6, g=5, tb=12, ptype='st', flip=np.pi / 6,
pbw=0.5, pbc=2, d1=0.01, d2=0.01, split_and_reflect=True):
"""Design a :math:`B_1^{+}`-selective excitation gSlider pulse following
Grissom JMR 2014.
Args:
dt (float): hardware sampling dwell time in s.
g (int): number of slabs to be acquired.
tb (int): time-bandwidth product.
ptype (string): pulse type, 'st' (small-tip excitation), 'ex' (pi/2
excitation pulse), 'se' (spin-echo pulse), 'inv' (inversion), or
'sat' (pi/2 saturation pulse).
flip (float): flip angle, in radians.
pbw (float): width of passband in Gauss.
pbc (float): center of passband in Gauss.
d1 (float): passband ripple level in :math:`M_0^{-1}`.
d2 (float): stopband ripple level in :math:`M_0^{-1}`.
split_and_reflect (bool): option to split and reflect designed pulse.
Split-and-reflect preserves pulse selectivity when scaled to excite large
tip-angles.
Returns:
2-element tuple containing
- **om1** (*array*): AM waveform.
- **dom** (*array*): FM waveform (radians/s).
References:
<NAME>., <NAME>., & <NAME>. (2014).
:math:`B_1^{+}`-selective excitation pulse design using the Shinnar-Le
Roux algorithm. Journal of Magnetic Resonance, 242, 189-196.
"""
# calculate beta filter ripple
[_, d1, d2] = slr.calc_ripples(ptype, d1, d2)
# if ptype == 'st':
bsf = flip
# calculate pulse duration
b = 4257 * pbw
pulse_len = tb / b
# calculate number of samples in pulse
n = np.int(np.ceil(pulse_len / dt / 2) * 2)
om = 2 * np.pi * 4257 * pbc # modulation freq to center profile at pbc
t = np.arange(0, n) * pulse_len / n - pulse_len / 2
om1 = np.zeros((2 * n, g))
dom = np.zeros((2 * n, g))
for gind in range(1, g + 1):
# design filter
h = bsf*slr.dz_gslider_b(n, g, gind, tb, d1, d2, np.pi, n // 4)
# modulate filter to center and add it to a time-reversed and modulated
# copy, then take the imaginary part to get an odd filter
h = np.imag(h * np.exp(1j * om * t) - h[n::-1] * np.exp(1j * -om * t))
if split_and_reflect:
# split and flip fm waveform to improve large-tip accuracy
dom[:, gind - 1] = np.concatenate((h[n // 2::-1],
h, h[n:n // 2:-1])) / 2
else:
dom[:, gind - 1] = np.concatenate((0 * h[n // 2::-1],
h, 0 * h[n:n // 2:-1]))
# build am waveform
om1[:, gind - 1] = np.concatenate((-np.ones(n // 2), np.ones(n),
-np.ones(n // 2)))
# scale to target flip, convert to Hz
dom = dom / (2 * np.pi * dt)
return om1, dom
def dz_b1_hadamard_rf(dt=2e-6, g=8, tb=16, ptype='st', flip=np.pi / 6,
pbw=2, pbc=2, d1=0.01, d2=0.01, split_and_reflect=True):
"""Design a :math:`B_1^{+}`-selective Hadamard-encoded pulse following \
Grissom JMR 2014.
Args:
dt (float): hardware sampling dwell time in s.
g (int): number of slabs to be acquired.
tb (int): time-bandwidth product.
ptype (string): pulse type, 'st' (small-tip excitation), 'ex' (pi/2 \
excitation pulse), 'se' (spin-echo pulse), 'inv' (inversion), or \
'sat' (pi/2 saturation pulse).
flip (float): flip angle, in radians.
pbw (float): width of passband in Gauss.
pbc (float): center of passband in Gauss.
d1 (float): passband ripple level in :math:`M_0^{-1}`.
d2 (float): stopband ripple level in :math:`M_0^{-1}`.
split_and_reflect (bool): option to split and reflect designed pulse.
Split-and-reflect preserves pulse selectivity when scaled to excite large
tip-angles.
Returns:
2-element tuple containing
- **om1** (*array*): AM waveform.
- **dom** (*array*): FM waveform (radians/s).
References:
<NAME>., <NAME>., & <NAME>. (2014).
:math:`B_1^{+}`-selective excitation pulse design using the Shinnar-Le
Roux algorithm. Journal of Magnetic Resonance, 242, 189-196.
"""
# calculate beta filter ripple
[_, d1, d2] = slr.calc_ripples(ptype, d1, d2)
bsf = flip
# calculate pulse duration
b = 4257 * pbw
pulse_len = tb / b
# calculate number of samples in pulse
n = np.int(np.ceil(pulse_len / dt / 2) * 2)
# modulation frequency to center profile at pbc gauss
om = 2 * np.pi * 4257 * pbc
t = np.arange(0, n) * pulse_len / n - pulse_len / 2
om1 = np.zeros((2 * n, g))
dom = np.zeros((2 * n, g))
for gind in range(1, g + 1):
# design filter
h = bsf*slr.dz_hadamard_b(n, g, gind, tb, d1, d2, n // 4)
# modulate filter to center and add it to a time-reversed and modulated
# copy, then take the imaginary part to get an odd filter
h = np.imag(h * np.exp(1j * om * t) - h[n::-1] * np.exp(1j * -om * t))
if split_and_reflect:
# split and flip fm waveform to improve large-tip accuracy
dom[:, gind - 1] = np.concatenate((h[n // 2::-1],
h,
h[n:n // 2:-1])) / 2
else:
dom[:, gind - 1] = np.concatenate((0 * h[n // 2::-1],
h,
0 * h[n:n // 2:-1]))
# build am waveform
om1[:, gind - 1] = np.concatenate((-np.ones(n // 2), np.ones(n),
-np.ones(n // 2)))
# scale to target flip, convert to Hz
dom = dom / (2 * np.pi * dt)
return om1, dom
```
#### File: mri/rf/optcont.py
```python
from sigpy import backend
from sigpy.mri.rf import slr
import numpy as np
__all__ = ['optcont1d', 'blochsim', 'deriv']
def optcont1d(dthick, N, os, tb, stepsize=0.001, max_iters=1000, d1=0.01,
d2=0.01, dt=4e-6, conv_tolerance=1e-5):
r"""1D optimal control pulse designer
Args:
dthick: thickness of the slice (cm)
N: number of points in pulse
os: matrix scaling factor
tb: time bandwidth product, unitless
stepsize: optimization step size
max_iters: max number of iterations
d1: ripple level in passband
d2: ripple level in stopband
dt: dwell time (s)
conv_tolerance: max change between iterations, convergence tolerance
Returns:
gamgdt: scaled gradient
pulse: pulse of interest, complex RF waveform
"""
# set mag of gamgdt according to tb + dthick
gambar = 4257 # gamma/2/pi, Hz/g
gmag = tb / (N * dt) / dthick / gambar
# get spatial locations + gradient
x = np.arange(0, N * os, 1) / N / os - 1 / 2
gamgdt = 2 * np.pi * gambar * gmag * dt * np.ones(N)
# set up target beta pattern
d1 = np.sqrt(d1 / 2) # Mxy -> beta ripple for ex pulse
d2 = d2 / np.sqrt(2)
dib = slr.dinf(d1, d2)
ftwb = dib / tb
# freq edges, normalized to 2*nyquist
fb = np.asarray([0, (1 - ftwb) * (tb / 2),
(1 + ftwb) * (tb / 2), N / 2]) / N
dpass = np.abs(x) < fb[1] # passband mask
dstop = np.abs(x) > fb[2] # stopband mask
wb = [1, d1 / d2]
w = dpass + wb[1] / wb[0] * dstop # 'points we care about' mask
# target beta pattern
db = np.sqrt(1 / 2) * dpass * np.exp(-1j / 2 * x * 2 * np.pi)
pulse = np.zeros(N, dtype=complex)
a = np.exp(1j / 2 * x / (gambar * dt * gmag) * np.sum(gamgdt))
b = np.zeros(a.shape, dtype=complex)
eb = b - db
cost = np.zeros(max_iters + 1)
cost[0] = np.real(np.sum(w * np.abs(eb) ** 2))
for ii in range(0, max_iters, 1):
# calculate search direction
auxb = w * (b - db)
drf = deriv(pulse, x / (gambar * dt * gmag), gamgdt, None,
auxb, a, b)
drf = 1j * np.imag(drf)
# get test point
pulse -= stepsize * drf
# simulate test point
[a, b] = blochsim(pulse, x / (gambar * dt * gmag), gamgdt)
# calculate cost
eb = b - db
cost[ii + 1] = np.sum(w * np.abs(eb) ** 2)
# check cost with tolerance
if (cost[ii] - cost[ii + 1]) / cost[ii] < conv_tolerance:
break
return gamgdt, pulse
def blochsim(rf, x, g):
r"""1D RF pulse simulation, with simultaneous RF + gradient rotations.
Assume x has inverse spatial units of g, and g has gamma*dt applied and
assume x = [...,Ndim], g = [Ndim,Nt].
Args:
rf (array): rf waveform input.
x (array): spatial locations.
g (array): gradient waveform.
Returns:
array: SLR alpha parameter
array: SLR beta parameter
"""
device = backend.get_device(rf)
xp = device.xp
with device:
a = xp.ones(xp.shape(x)[0], dtype=complex)
b = xp.zeros(xp.shape(x)[0], dtype=complex)
for mm in range(0, xp.size(rf), 1): # loop over time
# apply RF
c = xp.cos(xp.abs(rf[mm]) / 2)
s = 1j * xp.exp(1j * xp.angle(rf[mm])) * xp.sin(xp.abs(rf[mm]) / 2)
at = a * c - b * xp.conj(s)
bt = a * s + b * c
a = at
b = bt
# apply gradient
if g.ndim > 1:
z = xp.exp(-1j * x @ g[mm, :])
else:
z = xp.exp(-1j * x * g[mm])
b = b * z
# apply total phase accrual
if g.ndim > 1:
z = xp.exp(1j / 2 * x @ xp.sum(g, 0))
else:
z = xp.exp(1j / 2 * x * xp.sum(g))
a = a * z
b = b * z
return a, b
def deriv(rf, x, g, auxa, auxb, af, bf):
r"""1D RF pulse simulation, with simultaneous RF + gradient rotations.
'rf', 'g', and 'x' should have consistent units.
Args:
rf (array): rf waveform input.
x (array): spatial locations.
g (array): gradient waveform.
auxa (None or array): auxa
auxb (array): auxb
af (array): forward sim a.
bf( array): forward sim b.
Returns:
array: SLR alpha parameter
array: SLR beta parameter
"""
device = backend.get_device(rf)
xp = device.xp
with device:
drf = xp.zeros(xp.shape(rf), dtype=complex)
ar = xp.ones(xp.shape(af), dtype=complex)
br = xp.zeros(xp.shape(bf), dtype=complex)
for mm in range(xp.size(rf) - 1, -1, -1):
# calculate gradient blip phase
if g.ndim > 1:
z = xp.exp(1j / 2 * x @ g[mm, :])
else:
z = xp.exp(1j / 2 * x * g[mm])
# strip off gradient blip from forward sim
af = af * xp.conj(z)
bf = bf * z
# add gradient blip to backward sim
ar = ar * z
br = br * z
# strip off the curent rf rotation from forward sim
c = xp.cos(xp.abs(rf[mm]) / 2)
s = 1j * xp.exp(1j * xp.angle(rf[mm])) * xp.sin(xp.abs(rf[mm]) / 2)
at = af * c + bf * xp.conj(s)
bt = -af * s + bf * c
af = at
bf = bt
# calculate derivatives wrt rf[mm]
db1 = xp.conj(1j / 2 * br * bf) * auxb
db2 = xp.conj(1j / 2 * af) * ar * auxb
drf[mm] = xp.sum(db2 + xp.conj(db1))
if auxa is not None:
da1 = xp.conj(1j / 2 * bf * ar) * auxa
da2 = 1j / 2 * xp.conj(af) * br * auxa
drf[mm] += xp.sum(da2 + xp.conj(da1))
# add current rf rotation to backward sim
art = ar * c - xp.conj(br) * s
brt = br * c + xp.conj(ar) * s
ar = art
br = brt
return drf
``` |
{
"source": "jonbmartin/sigpy-rf-staging",
"score": 3
} |
#### File: mri/rf/sim.py
```python
from sigpy import backend
__all__ = ['abrm', 'abrm_nd', 'abrm_hp']
def abrm(rf, x, balanced=False):
r"""1D RF pulse simulation, with simultaneous RF + gradient rotations.
Args:
rf (array): rf waveform input.
x (array): spatial locations.
balanced (bool): toggles application of rewinder.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
<NAME>., <NAME>, Patrick., <NAME>., and <NAME>.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
eps = 1e-16
g = xp.ones(xp.size(rf)) * 2 * xp.pi / xp.size(rf)
a = xp.ones(xp.size(x), dtype=complex)
b = xp.zeros(xp.size(x), dtype=complex)
for mm in range(xp.size(rf)):
om = x * g[mm]
phi = xp.sqrt(xp.abs(rf[mm]) ** 2 + om ** 2) + eps
n = xp.column_stack((xp.real(rf[mm]) / phi,
xp.imag(rf[mm]) / phi,
om / phi))
av = xp.cos(phi / 2) - 1j * n[:, 2] * xp.sin(phi / 2)
bv = -1j * (n[:, 0] + 1j * n[:, 1]) * xp.sin(phi / 2)
at = av * a - xp.conj(bv) * b
bt = bv * a + xp.conj(av) * b
a = at
b = bt
if balanced: # apply a rewinder
g = -2 * xp.pi / 2
om = x * g
phi = xp.abs(om) + eps
nz = om / phi
av = xp.cos(phi / 2) - 1j * nz * xp.sin(phi / 2)
a = av * a
b = xp.conj(av) * b
return a, b
def abrm_nd(rf, x, g):
r"""N-dim RF pulse simulation
Assumes that x has inverse spatial units of g, and g has gamma*dt applied.
Assumes dimensions x = [...,Ndim], g = [Ndim,Nt].
Args:
rf (array): rf waveform input.
x (array): spatial locations.
g (array): gradient array.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
<NAME>., <NAME>, Patrick., <NAME>., and <NAME>.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
eps = 1e-16
a = xp.ones(xp.shape(x)[0], dtype=complex)
b = xp.zeros(xp.shape(x)[0], dtype=complex)
for mm in range(xp.size(rf)):
om = x @ g[mm, :]
phi = xp.sqrt(xp.abs(rf[mm]) ** 2 + om ** 2)
n = xp.column_stack((xp.real(rf[mm]) / (phi + eps),
xp.imag(rf[mm]) / (phi + eps),
om / (phi + eps)))
av = xp.cos(phi / 2) - 1j * n[:, 2] * xp.sin(phi / 2)
bv = -1j * (n[:, 0] + 1j * n[:, 1]) * xp.sin(phi / 2)
at = av * a - xp.conj(bv) * b
bt = bv * a + xp.conj(av) * b
a = at
b = bt
return a, b
def abrm_hp(rf, gamgdt, xx, dom0dt=0):
r"""1D RF pulse simulation, with non-simultaneous RF + gradient rotations.
Args:
rf (array): rf pulse samples in radians.
gamdt (array): gradient samples in radians/(units of xx).
xx (array): spatial locations.
dom0dt (float): off-resonance phase in radians.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
<NAME>., <NAME>, Patrick., <NAME>., and <NAME>.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
Ns = xp.shape(xx)
Ns = Ns[0] # Ns: # of spatial locs
Nt = xp.shape(gamgdt)
Nt = Nt[0] # Nt: # time points
a = xp.ones((Ns,))
b = xp.zeros((Ns,))
for ii in xp.arange(Nt):
# apply phase accural
z = xp.exp(-1j * (xx * gamgdt[ii, ] + dom0dt))
b = b * z
# apply rf
C = xp.cos(xp.abs(rf[ii]) / 2)
S = 1j * xp.exp(1j * xp.angle(rf[ii])) * xp.sin(xp.abs(rf[ii]) / 2)
at = a * C - b * xp.conj(S)
bt = a * S + b * C
a = at
b = bt
z = xp.exp(1j / 2 * (xx * xp.sum(gamgdt, axis=0) + Nt * dom0dt))
a = a * z
b = b * z
return a, b
``` |
{
"source": "jonbmartin/sigpy_rf",
"score": 2
} |
#### File: mri/rf/test_adiabatic.py
```python
import unittest
import numpy as np
import numpy.testing as npt
from sigpy.mri import rf
if __name__ == '__main__':
unittest.main()
class TestAdiabatic(unittest.TestCase):
def test_bir4(self):
# test an excitation bir4 pulse
n = 1176
dt = 4e-6
dw0 = 100 * np.pi / dt / n
beta = 10
kappa = np.arctan(20)
flip = np.pi / 4
[am_bir, om_bir] = rf.adiabatic.bir4(n, beta, kappa, flip, dw0)
# check relatively homogeneous over range of B1 values
b1 = np.arange(0.2, 0.8, 0.1)
b1 = np.reshape(b1, (np.size(b1), 1))
a = np.zeros(np.shape(b1), dtype='complex')
b = np.zeros(np.shape(b1), dtype='complex')
for ii in range(0, np.size(b1)):
[a[ii], b[ii]] = rf.sim.abrm_nd(
2 * np.pi * dt * 4258 * b1[ii] * am_bir, np.ones(1),
dt * np.reshape(om_bir, (np.size(om_bir), 1)))
mxy = 2 * np.multiply(np.conj(a), b)
test = np.ones(mxy.shape) * 0.7 # magnetization value we expect
npt.assert_array_almost_equal(np.abs(mxy), test, 2)
def test_hyp_ex(self):
# test an inversion adiabatic hyp pulse
n = 512
beta = 800
mu = 4.9
dur = 0.012
[am_sech, om_sech] = rf.adiabatic.hypsec(n, beta, mu, dur)
# check relatively homogeneous over range of B1 values
b1 = np.arange(0.2, 0.8, 0.1)
b1 = np.reshape(b1, (np.size(b1), 1))
a = np.zeros(np.shape(b1), dtype='complex')
b = np.zeros(np.shape(b1), dtype='complex')
for ii in range(0, np.size(b1)):
[a[ii], b[ii]] = rf.sim.abrm_nd(
2 * np.pi * (dur / n) * 4258 * b1[ii] * am_sech, np.ones(1),
dur / n * np.reshape(om_sech, (np.size(om_sech), 1)))
mz = 1 - 2 * np.abs(b) ** 2
test = np.ones(mz.shape) * -1 # magnetization value we expect
npt.assert_array_almost_equal(mz, test, 2)
def test_hypsec_n(self):
# test an HSn adiabatic inversion pulse
n = 500
beta = 8 # rad/ms
a_max = 12000 # Hz
pwr = 8
dur = 0.012 # s
[am_sechn, fm_sechn] = rf.adiabatic.hypsec_n(n, beta, a_max, pwr)
# check relatively homogeneous over range of B1 values
b1 = np.arange(0.2, 0.8, 0.1)
b1 = np.reshape(b1, (np.size(b1), 1))
a = np.zeros(np.shape(b1), dtype='complex')
b = np.zeros(np.shape(b1), dtype='complex')
for ii in range(0, np.size(b1)):
[a[ii], b[ii]] = rf.sim.abrm_nd(
2 * np.pi * (dur / n) * 4258 * b1[ii] * am_sechn, np.ones(1),
dur / n * np.reshape(fm_sechn, (np.size(fm_sechn), 1)))
mz = 1 - 2 * np.abs(b) ** 2
test = np.ones(mz.shape) * -1 # magnetization value we expect
npt.assert_array_almost_equal(mz, test, 2)
npt.assert_almost_equal(np.max(am_sechn), 1, 3)
npt.assert_almost_equal(np.max(fm_sechn), a_max, 3)
def test_goia_wurst(self):
# test a goia-wurst adiabatic pulse
n = 512
dur = 3.5e-3
f = 0.9
n_b1 = 16
m_grad = 4
[_, om_goia, g_goia] = rf.adiabatic.goia_wurst(n, dur, f, n_b1, m_grad)
# test midpoint of goia pulse. Expect 1-f g, 0.1 fm
npt.assert_almost_equal(g_goia[int(len(g_goia)/2)], 1-f, 2)
npt.assert_almost_equal(g_goia[int(len(om_goia)/2)], 0.1, 2)
``` |
{
"source": "jonboh/gestion_ea",
"score": 3
} |
#### File: jonboh/gestion_ea/client.py
```python
class Client:
tree_header = ['Nombre', 'Apellidos', 'DNI', 'Tlf 1', 'Tlf 2', 'e-mail', 'ID Cliente', 'Importe', 'Observaciones']
default_header_map = [1 for _ in tree_header]
default_header_map[tree_header.index('ID Cliente')] = 0
default_header_map[tree_header.index('Observaciones')] = 0
def __init__(self, name='', surname='', id_card='', phone1='', phone2='', email='',
client_id='', price='', observations=''):
self.name = name
self.surname = surname
self.id_card = id_card
self.phone1 = phone1
self.phone2 = phone2
self.email = email
if client_id is '':
self.id = -1
else:
self.id = int(client_id)
self.price = price
self.observations = self.decode_observations(observations)
def tree_header_map(self, header_map):
raw_entries_list = Client.tree_header
entries_list = list()
for entry, isincluded in zip(raw_entries_list, header_map[0:len(raw_entries_list)]):
if isincluded:
entries_list.append(entry)
return entries_list
def tree_entries(self, header_map=default_header_map):
raw_entries_list = [self.name, self.surname, self.id_card, self.phone1, self.phone2, self.email, self.id, self.price, self.observations]
entries_list = list()
for entry, isincluded in zip(raw_entries_list, header_map[0:len(raw_entries_list)]):
if isincluded:
entries_list.append(entry)
return entries_list
def __str__(self):
ret_string = ';'.join(
[self.name, self.surname, self.id_card, self.phone1, self.phone2, self.email, str(self.id), self.price, self.encode_observations()])
return ret_string
def encode_observations(self):
encoded_observations = ''
observations = self.observations
if '\n' in self.observations:
while observations.count('\n') > 0:
encoded_observations += observations[0:observations.index('\n')]+'/#n'
observations = observations[observations.index('\n')+1:]
encoded_observations += observations
else:
encoded_observations = observations
return encoded_observations
def decode_observations(self, observations):
decoded_obs = ''
if '/#n' in observations:
while observations.count('/#n') > 0:
decoded_obs += observations[0:observations.index('/#n')]+'\n'
observations = observations[observations.index('/#n')+3:]
decoded_obs+=observations
else:
decoded_obs = observations
return decoded_obs
class Alumn(Client):
alumn_extra =['Alta/Baja', 'Domicilia', 'Cuenta Bancaria', 'Fecha Mandato', 'Periodo Pago', 'Grupos']
tree_header = Client.tree_header + alumn_extra
default_header_map = Client.default_header_map + [1 for _ in alumn_extra]
def __init__(self, name='', surname='', id_card='', phone1='', phone2='', email='',
client_id='', price='', observations='', active='0', pay_bank='0', bank_acc='', date_sent='', pay_period='0', groups='{}'):
super().__init__(name, surname, id_card, phone1, phone2, email, client_id, price, observations)
self.active = int(active)
self.pay_bank = bool(eval(pay_bank))
self.bank_acc = bank_acc
self.date_sent = date_sent
self.pay_period = int(pay_period)
if type(eval(groups)) is dict:
self.groups = set()
else:
self.groups = eval(groups) # set
def tree_header_map(self, header_map):
raw_entries_list = ['Alta/Baja', 'Domicilia', 'Cuenta Bancaria', 'Fecha Mandato', 'Periodo Pago', 'Grupos']
entries_list = super().tree_header_map(header_map)
for entry, isincluded in zip(raw_entries_list, header_map[len(Client.tree_header):]):
if isincluded:
entries_list.append(entry)
return entries_list
def tree_entries(self, header_map=default_header_map):
if self.active is 1:
active = 'Alta'
else:
active = 'Baja'
if self.pay_bank:
pay_bank = 'Si'
else:
pay_bank = 'No'
if self.pay_period is 0:
pay_period = 'Mensual'
elif self.pay_period is 1:
pay_period = 'Trimestral'
elif self.pay_period is 2:
pay_period = 'Anual'
else:
pay_period = 'Desconocido'
groups = self.groups
if len(self.groups) == 0:
groups = '{}'
raw_entries_list = [active, pay_bank, self.bank_acc, self.date_sent, pay_period, groups]
entries_list = super().tree_entries(header_map)
for entry, isincluded in zip(raw_entries_list, header_map[len(Client.tree_header):]):
if isincluded:
entries_list.append(entry)
return entries_list
def __str__(self):
ret_string = ';'.join(
[super().__str__(), str(self.active), str(self.pay_bank), self.bank_acc, self.date_sent,str(self.pay_period),
str(self.groups)])
return ret_string
def cast_client_alumn(client):
alumn = Alumn()
alumn.name = client.name
alumn.surname = client.surname
alumn.id_card = client.id_card
alumn.phone1 = client.phone1
alumn.phone2 = client.phone2
alumn.email = client.email
alumn.id = client.id
alumn.price = client.price
alumn.observations = client.observations
return alumn
def cast_alumn_client(alumn):
client = Client()
client.name = alumn.name
client.surname = alumn.surname
client.id_card = alumn.id_card
client.phone1 = alumn.phone1
client.phone2 = alumn.phone2
client.email = alumn.email
client.id = alumn.id
client.price = alumn.price
client.observations = alumn.observations
return client
``` |
{
"source": "jonboiser/kolibri",
"score": 2
} |
#### File: kolibri/build_tools/py2only.py
```python
import os
import shutil
import sys
dest = 'py2only'
futures_dirname = 'concurrent'
DIST_DIR = os.path.realpath('kolibri/dist')
def hide_py2_modules():
"""
Move the directory of 'futures' and python2-only modules of 'future'
inside the directory 'py2only'
"""
# Move the directory of 'futures' inside the directory 'py2only'
_move_modules_to_py2only(futures_dirname)
# Future's submodules are not downloaded in Python 3 but only in Python 2
if sys.version_info[0] == 2:
from future.standard_library import TOP_LEVEL_MODULES
for module in TOP_LEVEL_MODULES:
if module == 'test':
continue
# Move the directory of submodules of 'future' inside 'py2only'
_move_modules_to_py2only(module)
def _move_modules_to_py2only(module_name):
module_src_path = os.path.join(DIST_DIR, module_name)
module_dst_path = os.path.join(DIST_DIR, dest, module_name)
shutil.move(module_src_path, module_dst_path)
if __name__ == '__main__':
# Temporarily add `kolibri/dist` to PYTHONPATH to import future
sys.path = sys.path + [os.path.realpath(os.path.join(DIST_DIR))]
try:
os.makedirs(os.path.join(DIST_DIR, dest))
except OSError:
raise
hide_py2_modules()
# Remove `kolibri/dist` from PYTHONPATH
sys.path = sys.path[:-1]
```
#### File: management/commands/generate_schema.py
```python
import io
import json
import os
import pickle
from django.apps import apps
from django.core.management import call_command
from django.core.management.base import BaseCommand
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import sessionmaker
from kolibri.content.utils.sqlalchemybridge import get_default_db_string
from kolibri.content.utils.sqlalchemybridge import SCHEMA_PATH_TEMPLATE
DATA_PATH_TEMPLATE = os.path.join(os.path.dirname(__file__), '../../fixtures/{name}_content_data.json')
class Command(BaseCommand):
"""
This management command produces SQLAlchemy schema reflections of the content database app.
It should be run when the Content Models schema is updated, and if it is a change between released
versions the CONTENT_DB_SCHEMA version should have been incremented.
It also produces a data dump of the content test fixture that fits to this database schema,
so that we can use it for testing purposes.
Note: this command requires an empty, but migrated, database to work properly.
"""
def add_arguments(self, parser):
parser.add_argument('version', type=str)
def handle(self, *args, **options):
engine = create_engine(get_default_db_string(), convert_unicode=True)
metadata = MetaData()
app_config = apps.get_app_config('content')
# Exclude channelmetadatacache in case we are reflecting an older version of Kolibri
table_names = [model._meta.db_table for name, model in app_config.models.items() if name != 'channelmetadatacache']
metadata.reflect(bind=engine, only=table_names)
Base = automap_base(metadata=metadata)
# TODO map relationship backreferences using the django names
Base.prepare()
session = sessionmaker(bind=engine, autoflush=False)()
# Load fixture data into the test database with Django
call_command('loaddata', 'content_import_test.json', interactive=False)
def get_dict(item):
value = {key: value for key, value in item.__dict__.items() if key != '_sa_instance_state'}
return value
data = {}
for table_name, record in Base.classes.items():
data[table_name] = [get_dict(r) for r in session.query(record).all()]
with open(SCHEMA_PATH_TEMPLATE.format(name=options['version']), 'wb') as f:
pickle.dump(metadata, f, protocol=2)
data_path = DATA_PATH_TEMPLATE.format(name=options['version'])
with io.open(data_path, mode='w', encoding='utf-8') as f:
json.dump(data, f)
```
#### File: core/device/models.py
```python
from django.conf import settings
from django.db import models
from kolibri.auth.models import Facility
from kolibri.auth.models import FacilityUser
from .permissions import UserCanManageDevicePermissions
device_permissions_fields = [
'is_superuser',
'can_manage_content',
]
class DevicePermissions(models.Model):
"""
This class stores metadata about device permissions for FacilityUsers.
"""
permissions = UserCanManageDevicePermissions()
user = models.OneToOneField(
FacilityUser,
on_delete=models.CASCADE,
related_name='devicepermissions',
blank=False,
null=False,
primary_key=True,
)
is_superuser = models.BooleanField(default=False)
can_manage_content = models.BooleanField(default=False)
class DeviceSettings(models.Model):
"""
This class stores data about settings particular to this device
"""
is_provisioned = models.BooleanField(default=False)
language_id = models.CharField(max_length=15, default=settings.LANGUAGE_CODE)
default_facility = models.ForeignKey(Facility, on_delete=models.SET_NULL, blank=True, null=True)
def save(self, *args, **kwargs):
self.pk = 1
super(DeviceSettings, self).save(*args, **kwargs)
```
#### File: core/device/utils.py
```python
from .models import DeviceSettings
def device_provisioned():
return DeviceSettings.objects.filter(is_provisioned=True).exists()
```
#### File: kolibri/core/hooks.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import logging
from kolibri.plugins.hooks import KolibriHook
logger = logging.getLogger(__name__)
class NavigationHook(KolibriHook):
# : A string label for the menu item
label = "Untitled"
# : A string or lazy proxy for the url
url = "/"
def get_menu(self):
menu = {}
for hook in self.registered_hooks:
menu[hook.label] = self.url
return menu
class Meta:
abstract = True
class UserNavigationHook(KolibriHook):
"""
A hook for adding navigation items to the user menu.
"""
# : A string label for the menu item
label = "Untitled"
# : A string or lazy proxy for the url
url = "/"
def get_menu(self):
menu = {}
for hook in self.registered_hooks:
menu[hook.label] = self.url
return menu
class Meta:
abstract = True
```
#### File: deployment/default/views.py
```python
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.generic.base import View
class RootURLRedirectView(View):
def get(self, request):
"""
Redirects user to learn page if they are logged in, else redirects to sign in/sign up page.
"""
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('kolibri:learnplugin:learn'))
return HttpResponseRedirect(reverse('kolibri:user:user'))
```
#### File: facility_management/templatetags/facility_management_tags.py
```python
from __future__ import absolute_import, print_function, unicode_literals
from django import template
from kolibri.core.webpack.utils import webpack_asset_render
from ..hooks import FacilityManagementSyncHook
register = template.Library()
@register.simple_tag()
def facility_management_assets():
"""
Using in a template will inject script tags that include the javascript
assets defined by any concrete hook that subclasses
FacilityManagementSyncHook.
:return: HTML of script tags to insert into the template
"""
return webpack_asset_render(FacilityManagementSyncHook, async=False)
```
#### File: plugins/setup_wizard/middleware.py
```python
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils.deprecation import MiddlewareMixin
from kolibri.core.device.utils import device_provisioned
ALLOWED_PATH_LIST = [
"deviceprovision",
"kolibri:setupwizardplugin:setupwizard",
"kolibri:set_language",
"session-list"
]
class SetupWizardMiddleware(MiddlewareMixin):
"""
display the setup wizard if device is not provisioned
"""
device_provisioned = False
def process_request(self, request):
# If a DevicePermissions with is_superuser has already been created, no need to do anything here
self.device_provisioned = self.device_provisioned or device_provisioned()
if self.device_provisioned:
if request.path.startswith(reverse("kolibri:setupwizardplugin:setupwizard")):
return redirect(reverse("kolibri:learnplugin:learn"))
return
# Don't redirect for URLs that are required for the setup wizard
allowed_paths = [reverse(name) for name in ALLOWED_PATH_LIST]
if any(request.path.startswith(path_prefix) for path_prefix in allowed_paths):
return
# If we've gotten this far, we want to redirect to the setup wizard
return redirect(reverse("kolibri:setupwizardplugin:setupwizard"))
```
#### File: kolibri/test/test_future_and_futures.py
```python
import os
import sys
import imp
from django.test import TestCase
from kolibri import dist as kolibri_dist
dist_dir = os.path.realpath(os.path.dirname(kolibri_dist.__file__))
class FutureAndFuturesTestCase(TestCase):
def test_import_concurrent_py3(self):
import concurrent
if sys.version_info[0] == 3:
# Python 3 is supposed to import its builtin package `concurrent`
# instead of being inside kolibri/dist/py2only or kolibri/dist
concurrent_parent_path = os.path.realpath(
os.path.dirname(os.path.dirname(concurrent.__file__)))
self.assertNotEqual(dist_dir, concurrent_parent_path)
self.assertNotEqual(os.path.join(dist_dir, 'py2only'), concurrent_parent_path)
def test_import_future_py2(self):
from future.standard_library import TOP_LEVEL_MODULES
if sys.version_info[0] == 2:
for module_name in TOP_LEVEL_MODULES:
if 'test' in module_name:
continue
module_parent_path = os.path.realpath(
os.path.dirname(imp.find_module(module_name)[1]))
# future's standard libraries such as `html` should not be found
# at the same level as kolibri/dist; otherwise, python3 will try to
# import them from kolibri/dist instead of its builtin packages
self.assertNotEqual(dist_dir, module_parent_path)
``` |
{
"source": "jonboland/colosseum",
"score": 4
} |
#### File: src/colosseum/colors.py
```python
class Color:
pass
class rgb(Color):
"A representation of an RGBA color"
def __init__(self, r, g, b, a=1.0):
self.r = r
self.g = g
self.b = b
self.a = a
def __repr__(self):
return "rgba({}, {}, {}, {})".format(self.r, self.g, self.b, self.a)
@property
def rgb(self):
return self
class hsl(Color):
"A representation of an HSLA color"
def __init__(self, h, s, l, a=1.0): # noqa: E741
self.h = h
self.s = s
self.l = l # noqa
self.a = a
def __repr__(self):
return "hsla({}, {}, {}, {})".format(self.h, self.s, self.l, self.a)
@property
def rgb(self):
c = (1.0 - abs(2.0 * self.l - 1.0)) * self.s
h = self.h / 60.0
x = c * (1.0 - abs(h % 2 - 1.0))
m = self.l - 0.5 * c
if h < 1.0:
r, g, b = c + m, x + m, m
elif h < 2.0:
r, g, b = x + m, c + m, m
elif h < 3.0:
r, g, b = m, c + m, x + m
elif h < 4.0:
r, g, b = m, x + m, c + m
elif h < 5.0:
r, g, b = m, x + m, c + m
else:
r, g, b = c + m, m, x + m
return rgb(
round(r * 0xff),
round(g * 0xff),
round(b * 0xff),
self.a
)
ALICEBLUE = 'aliceblue'
ANTIQUEWHITE = 'antiquewhite'
AQUA = 'aqua'
AQUAMARINE = 'aquamarine'
AZURE = 'azure'
BEIGE = 'beige'
BISQUE = 'bisque'
BLACK = 'black'
BLANCHEDALMOND = 'blanchedalmond'
BLUE = 'blue'
BLUEVIOLET = 'blueviolet'
BROWN = 'brown'
BURLYWOOD = 'burlywood'
CADETBLUE = 'cadetblue'
CHARTREUSE = 'chartreuse'
CHOCOLATE = 'chocolate'
CORAL = 'coral'
CORNFLOWERBLUE = 'cornflowerblue'
CORNSILK = 'cornsilk'
CRIMSON = 'crimson'
CYAN = 'cyan'
DARKBLUE = 'darkblue'
DARKCYAN = 'darkcyan'
DARKGOLDENROD = 'darkgoldenrod'
DARKGRAY = 'darkgray'
DARKGREY = 'darkgrey'
DARKGREEN = 'darkgreen'
DARKKHAKI = 'darkkhaki'
DARKMAGENTA = 'darkmagenta'
DARKOLIVEGREEN = 'darkolivegreen'
DARKORANGE = 'darkorange'
DARKORCHID = 'darkorchid'
DARKRED = 'darkred'
DARKSALMON = 'darksalmon'
DARKSEAGREEN = 'darkseagreen'
DARKSLATEBLUE = 'darkslateblue'
DARKSLATEGRAY = 'darkslategray'
DARKSLATEGREY = 'darkslategrey'
DARKTURQUOISE = 'darkturquoise'
DARKVIOLET = 'darkviolet'
DEEPPINK = 'deeppink'
DEEPSKYBLUE = 'deepskyblue'
DIMGRAY = 'dimgray'
DIMGREY = 'dimgrey'
DODGERBLUE = 'dodgerblue'
FIREBRICK = 'firebrick'
FLORALWHITE = 'floralwhite'
FORESTGREEN = 'forestgreen'
FUCHSIA = 'fuchsia'
GAINSBORO = 'gainsboro'
GHOSTWHITE = 'ghostwhite'
GOLD = 'gold'
GOLDENROD = 'goldenrod'
GRAY = 'gray'
GREY = 'grey'
GREEN = 'green'
GREENYELLOW = 'greenyellow'
HONEYDEW = 'honeydew'
HOTPINK = 'hotpink'
INDIANRED = 'indianred'
INDIGO = 'indigo'
IVORY = 'ivory'
KHAKI = 'khaki'
LAVENDER = 'lavender'
LAVENDERBLUSH = 'lavenderblush'
LAWNGREEN = 'lawngreen'
LEMONCHIFFON = 'lemonchiffon'
LIGHTBLUE = 'lightblue'
LIGHTCORAL = 'lightcoral'
LIGHTCYAN = 'lightcyan'
LIGHTGOLDENRODYELLOW = 'lightgoldenrodyellow'
LIGHTGRAY = 'lightgray'
LIGHTGREY = 'lightgrey'
LIGHTGREEN = 'lightgreen'
LIGHTPINK = 'lightpink'
LIGHTSALMON = 'lightsalmon'
LIGHTSEAGREEN = 'lightseagreen'
LIGHTSKYBLUE = 'lightskyblue'
LIGHTSLATEGRAY = 'lightslategray'
LIGHTSLATEGREY = 'lightslategrey'
LIGHTSTEELBLUE = 'lightsteelblue'
LIGHTYELLOW = 'lightyellow'
LIME = 'lime'
LIMEGREEN = 'limegreen'
LINEN = 'linen'
MAGENTA = 'magenta'
MAROON = 'maroon'
MEDIUMAQUAMARINE = 'mediumaquamarine'
MEDIUMBLUE = 'mediumblue'
MEDIUMORCHID = 'mediumorchid'
MEDIUMPURPLE = 'mediumpurple'
MEDIUMSEAGREEN = 'mediumseagreen'
MEDIUMSLATEBLUE = 'mediumslateblue'
MEDIUMSPRINGGREEN = 'mediumspringgreen'
MEDIUMTURQUOISE = 'mediumturquoise'
MEDIUMVIOLETRED = 'mediumvioletred'
MIDNIGHTBLUE = 'midnightblue'
MINTCREAM = 'mintcream'
MISTYROSE = 'mistyrose'
MOCCASIN = 'moccasin'
NAVAJOWHITE = 'navajowhite'
NAVY = 'navy'
OLDLACE = 'oldlace'
OLIVE = 'olive'
OLIVEDRAB = 'olivedrab'
ORANGE = 'orange'
ORANGERED = 'orangered'
ORCHID = 'orchid'
PALEGOLDENROD = 'palegoldenrod'
PALEGREEN = 'palegreen'
PALETURQUOISE = 'paleturquoise'
PALEVIOLETRED = 'palevioletred'
PAPAYAWHIP = 'papayawhip'
PEACHPUFF = 'peachpuff'
PERU = 'peru'
PINK = 'pink'
PLUM = 'plum'
POWDERBLUE = 'powderblue'
PURPLE = 'purple'
REBECCAPURPLE = 'rebeccapurple'
RED = 'red'
ROSYBROWN = 'rosybrown'
ROYALBLUE = 'royalblue'
SADDLEBROWN = 'saddlebrown'
SALMON = 'salmon'
SANDYBROWN = 'sandybrown'
SEAGREEN = 'seagreen'
SEASHELL = 'seashell'
SIENNA = 'sienna'
SILVER = 'silver'
SKYBLUE = 'skyblue'
SLATEBLUE = 'slateblue'
SLATEGRAY = 'slategray'
SLATEGREY = 'slategrey'
SNOW = 'snow'
SPRINGGREEN = 'springgreen'
STEELBLUE = 'steelblue'
TAN = 'tan'
TEAL = 'teal'
THISTLE = 'thistle'
TOMATO = 'tomato'
TURQUOISE = 'turquoise'
VIOLET = 'violet'
WHEAT = 'wheat'
WHITE = 'white'
WHITESMOKE = 'whitesmoke'
YELLOW = 'yellow'
YELLOWGREEN = 'yellowgreen'
NAMED_COLOR = {
ALICEBLUE: rgb(0xF0, 0xF8, 0xFF),
ANTIQUEWHITE: rgb(0xFA, 0xEB, 0xD7),
AQUA: rgb(0x00, 0xFF, 0xFF),
AQUAMARINE: rgb(0x7F, 0xFF, 0xD4),
AZURE: rgb(0xF0, 0xFF, 0xFF),
BEIGE: rgb(0xF5, 0xF5, 0xDC),
BISQUE: rgb(0xFF, 0xE4, 0xC4),
BLACK: rgb(0x00, 0x00, 0x00),
BLANCHEDALMOND: rgb(0xFF, 0xEB, 0xCD),
BLUE: rgb(0x00, 0x00, 0xFF),
BLUEVIOLET: rgb(0x8A, 0x2B, 0xE2),
BROWN: rgb(0xA5, 0x2A, 0x2A),
BURLYWOOD: rgb(0xDE, 0xB8, 0x87),
CADETBLUE: rgb(0x5F, 0x9E, 0xA0),
CHARTREUSE: rgb(0x7F, 0xFF, 0x00),
CHOCOLATE: rgb(0xD2, 0x69, 0x1E),
CORAL: rgb(0xFF, 0x7F, 0x50),
CORNFLOWERBLUE: rgb(0x64, 0x95, 0xED),
CORNSILK: rgb(0xFF, 0xF8, 0xDC),
CRIMSON: rgb(0xDC, 0x14, 0x3C),
CYAN: rgb(0x00, 0xFF, 0xFF),
DARKBLUE: rgb(0x00, 0x00, 0x8B),
DARKCYAN: rgb(0x00, 0x8B, 0x8B),
DARKGOLDENROD: rgb(0xB8, 0x86, 0x0B),
DARKGRAY: rgb(0xA9, 0xA9, 0xA9),
DARKGREY: rgb(0xA9, 0xA9, 0xA9),
DARKGREEN: rgb(0x00, 0x64, 0x00),
DARKKHAKI: rgb(0xBD, 0xB7, 0x6B),
DARKMAGENTA: rgb(0x8B, 0x00, 0x8B),
DARKOLIVEGREEN: rgb(0x55, 0x6B, 0x2F),
DARKORANGE: rgb(0xFF, 0x8C, 0x00),
DARKORCHID: rgb(0x99, 0x32, 0xCC),
DARKRED: rgb(0x8B, 0x00, 0x00),
DARKSALMON: rgb(0xE9, 0x96, 0x7A),
DARKSEAGREEN: rgb(0x8F, 0xBC, 0x8F),
DARKSLATEBLUE: rgb(0x48, 0x3D, 0x8B),
DARKSLATEGRAY: rgb(0x2F, 0x4F, 0x4F),
DARKSLATEGREY: rgb(0x2F, 0x4F, 0x4F),
DARKTURQUOISE: rgb(0x00, 0xCE, 0xD1),
DARKVIOLET: rgb(0x94, 0x00, 0xD3),
DEEPPINK: rgb(0xFF, 0x14, 0x93),
DEEPSKYBLUE: rgb(0x00, 0xBF, 0xFF),
DIMGRAY: rgb(0x69, 0x69, 0x69),
DIMGREY: rgb(0x69, 0x69, 0x69),
DODGERBLUE: rgb(0x1E, 0x90, 0xFF),
FIREBRICK: rgb(0xB2, 0x22, 0x22),
FLORALWHITE: rgb(0xFF, 0xFA, 0xF0),
FORESTGREEN: rgb(0x22, 0x8B, 0x22),
FUCHSIA: rgb(0xFF, 0x00, 0xFF),
GAINSBORO: rgb(0xDC, 0xDC, 0xDC),
GHOSTWHITE: rgb(0xF8, 0xF8, 0xFF),
GOLD: rgb(0xFF, 0xD7, 0x00),
GOLDENROD: rgb(0xDA, 0xA5, 0x20),
GRAY: rgb(0x80, 0x80, 0x80),
GREY: rgb(0x80, 0x80, 0x80),
GREEN: rgb(0x00, 0x80, 0x00),
GREENYELLOW: rgb(0xAD, 0xFF, 0x2F),
HONEYDEW: rgb(0xF0, 0xFF, 0xF0),
HOTPINK: rgb(0xFF, 0x69, 0xB4),
INDIANRED: rgb(0xCD, 0x5C, 0x5C),
INDIGO: rgb(0x4B, 0x00, 0x82),
IVORY: rgb(0xFF, 0xFF, 0xF0),
KHAKI: rgb(0xF0, 0xE6, 0x8C),
LAVENDER: rgb(0xE6, 0xE6, 0xFA),
LAVENDERBLUSH: rgb(0xFF, 0xF0, 0xF5),
LAWNGREEN: rgb(0x7C, 0xFC, 0x00),
LEMONCHIFFON: rgb(0xFF, 0xFA, 0xCD),
LIGHTBLUE: rgb(0xAD, 0xD8, 0xE6),
LIGHTCORAL: rgb(0xF0, 0x80, 0x80),
LIGHTCYAN: rgb(0xE0, 0xFF, 0xFF),
LIGHTGOLDENRODYELLOW: rgb(0xFA, 0xFA, 0xD2),
LIGHTGRAY: rgb(0xD3, 0xD3, 0xD3),
LIGHTGREY: rgb(0xD3, 0xD3, 0xD3),
LIGHTGREEN: rgb(0x90, 0xEE, 0x90),
LIGHTPINK: rgb(0xFF, 0xB6, 0xC1),
LIGHTSALMON: rgb(0xFF, 0xA0, 0x7A),
LIGHTSEAGREEN: rgb(0x20, 0xB2, 0xAA),
LIGHTSKYBLUE: rgb(0x87, 0xCE, 0xFA),
LIGHTSLATEGRAY: rgb(0x77, 0x88, 0x99),
LIGHTSLATEGREY: rgb(0x77, 0x88, 0x99),
LIGHTSTEELBLUE: rgb(0xB0, 0xC4, 0xDE),
LIGHTYELLOW: rgb(0xFF, 0xFF, 0xE0),
LIME: rgb(0x00, 0xFF, 0x00),
LIMEGREEN: rgb(0x32, 0xCD, 0x32),
LINEN: rgb(0xFA, 0xF0, 0xE6),
MAGENTA: rgb(0xFF, 0x00, 0xFF),
MAROON: rgb(0x80, 0x00, 0x00),
MEDIUMAQUAMARINE: rgb(0x66, 0xCD, 0xAA),
MEDIUMBLUE: rgb(0x00, 0x00, 0xCD),
MEDIUMORCHID: rgb(0xBA, 0x55, 0xD3),
MEDIUMPURPLE: rgb(0x93, 0x70, 0xDB),
MEDIUMSEAGREEN: rgb(0x3C, 0xB3, 0x71),
MEDIUMSLATEBLUE: rgb(0x7B, 0x68, 0xEE),
MEDIUMSPRINGGREEN: rgb(0x00, 0xFA, 0x9A),
MEDIUMTURQUOISE: rgb(0x48, 0xD1, 0xCC),
MEDIUMVIOLETRED: rgb(0xC7, 0x15, 0x85),
MIDNIGHTBLUE: rgb(0x19, 0x19, 0x70),
MINTCREAM: rgb(0xF5, 0xFF, 0xFA),
MISTYROSE: rgb(0xFF, 0xE4, 0xE1),
MOCCASIN: rgb(0xFF, 0xE4, 0xB5),
NAVAJOWHITE: rgb(0xFF, 0xDE, 0xAD),
NAVY: rgb(0x00, 0x00, 0x80),
OLDLACE: rgb(0xFD, 0xF5, 0xE6),
OLIVE: rgb(0x80, 0x80, 0x00),
OLIVEDRAB: rgb(0x6B, 0x8E, 0x23),
ORANGE: rgb(0xFF, 0xA5, 0x00),
ORANGERED: rgb(0xFF, 0x45, 0x00),
ORCHID: rgb(0xDA, 0x70, 0xD6),
PALEGOLDENROD: rgb(0xEE, 0xE8, 0xAA),
PALEGREEN: rgb(0x98, 0xFB, 0x98),
PALETURQUOISE: rgb(0xAF, 0xEE, 0xEE),
PALEVIOLETRED: rgb(0xDB, 0x70, 0x93),
PAPAYAWHIP: rgb(0xFF, 0xEF, 0xD5),
PEACHPUFF: rgb(0xFF, 0xDA, 0xB9),
PERU: rgb(0xCD, 0x85, 0x3F),
PINK: rgb(0xFF, 0xC0, 0xCB),
PLUM: rgb(0xDD, 0xA0, 0xDD),
POWDERBLUE: rgb(0xB0, 0xE0, 0xE6),
PURPLE: rgb(0x80, 0x00, 0x80),
REBECCAPURPLE: rgb(0x66, 0x33, 0x99),
RED: rgb(0xFF, 0x00, 0x00),
ROSYBROWN: rgb(0xBC, 0x8F, 0x8F),
ROYALBLUE: rgb(0x41, 0x69, 0xE1),
SADDLEBROWN: rgb(0x8B, 0x45, 0x13),
SALMON: rgb(0xFA, 0x80, 0x72),
SANDYBROWN: rgb(0xF4, 0xA4, 0x60),
SEAGREEN: rgb(0x2E, 0x8B, 0x57),
SEASHELL: rgb(0xFF, 0xF5, 0xEE),
SIENNA: rgb(0xA0, 0x52, 0x2D),
SILVER: rgb(0xC0, 0xC0, 0xC0),
SKYBLUE: rgb(0x87, 0xCE, 0xEB),
SLATEBLUE: rgb(0x6A, 0x5A, 0xCD),
SLATEGRAY: rgb(0x70, 0x80, 0x90),
SLATEGREY: rgb(0x70, 0x80, 0x90),
SNOW: rgb(0xFF, 0xFA, 0xFA),
SPRINGGREEN: rgb(0x00, 0xFF, 0x7F),
STEELBLUE: rgb(0x46, 0x82, 0xB4),
TAN: rgb(0xD2, 0xB4, 0x8C),
TEAL: rgb(0x00, 0x80, 0x80),
THISTLE: rgb(0xD8, 0xBF, 0xD8),
TOMATO: rgb(0xFF, 0x63, 0x47),
TURQUOISE: rgb(0x40, 0xE0, 0xD0),
VIOLET: rgb(0xEE, 0x82, 0xEE),
WHEAT: rgb(0xF5, 0xDE, 0xB3),
WHITE: rgb(0xFF, 0xFF, 0xFF),
WHITESMOKE: rgb(0xF5, 0xF5, 0xF5),
YELLOW: rgb(0xFF, 0xFF, 0x00),
YELLOWGREEN: rgb(0x9A, 0xCD, 0x32),
}
```
#### File: engine/block_layout/test_block_non_replaced_normal_flow.py
```python
from colosseum.constants import AUTO, BLOCK, RTL, SOLID
from colosseum.declaration import CSS
from ...utils import LayoutTestCase, TestNode
class WidthTests(LayoutTestCase):
def test_no_horizontal_properties(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, height=10)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (1024, 10)},
'padding_box': {'position': (0, 0), 'size': (1024, 10)},
'content': {'position': (0, 0), 'size': (1024, 10)},
}
)
def test_left_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, height=10, margin_left=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (1024, 10)},
'padding_box': {'position': (0, 0), 'size': (1024, 10)},
'content': {'position': (0, 0), 'size': (1024, 10)},
}
)
def test_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, height=10, margin_right=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (1024, 10)},
'padding_box': {'position': (0, 0), 'size': (1024, 10)},
'content': {'position': (0, 0), 'size': (1024, 10)},
}
)
def test_left_and_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, height=10, margin_left=AUTO, margin_right=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (1024, 10)},
'padding_box': {'position': (0, 0), 'size': (1024, 10)},
'content': {'position': (0, 0), 'size': (1024, 10)},
}
)
def test_width(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (50, 10)},
'padding_box': {'position': (0, 0), 'size': (50, 10)},
'content': {'position': (0, 0), 'size': (50, 10)},
}
)
def test_width_auto_left_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10, margin_left=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (974, 0), 'size': (50, 10)},
'padding_box': {'position': (974, 0), 'size': (50, 10)},
'content': {'position': (974, 0), 'size': (50, 10)},
}
)
def test_width_auto_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10, margin_right=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (50, 10)},
'padding_box': {'position': (0, 0), 'size': (50, 10)},
'content': {'position': (0, 0), 'size': (50, 10)},
}
)
def test_width_auto_left_and_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10, margin_left=AUTO, margin_right=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (487, 0), 'size': (50, 10)},
'padding_box': {'position': (487, 0), 'size': (50, 10)},
'content': {'position': (487, 0), 'size': (50, 10)},
}
)
def test_width_fixed_left_and_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10, margin_left=30, margin_right=40)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (30, 0), 'size': (50, 10)},
'padding_box': {'position': (30, 0), 'size': (50, 10)},
'content': {'position': (30, 0), 'size': (50, 10)},
}
)
def test_width_fixed_left_and_right_margin_rtl(self):
node = TestNode(
name='div', style=CSS(
display=BLOCK, width=50, height=10,
margin_left=30, margin_right=40, direction=RTL
)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (934, 0), 'size': (50, 10)},
'padding_box': {'position': (934, 0), 'size': (50, 10)},
'content': {'position': (934, 0), 'size': (50, 10)},
}
)
def test_width_exceeds_parent(self):
node = TestNode(
name='div', style=CSS(
display=BLOCK, width=500, height=20,
padding=50, border_width=60, border_style=SOLID,
margin=70
)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (70, 70), 'size': (720, 240)},
'padding_box': {'position': (130, 130), 'size': (600, 120)},
'content': {'position': (180, 180), 'size': (500, 20)},
}
)
def test_width_exceeds_parent_auto_left_and_right_margins(self):
node = TestNode(
name='div', style=CSS(
display=BLOCK, width=500, height=20,
padding=50, border_width=60, border_style=SOLID,
margin_left=AUTO, margin_right=AUTO
)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (152, 0), 'size': (720, 240)},
'padding_box': {'position': (212, 60), 'size': (600, 120)},
'content': {'position': (262, 110), 'size': (500, 20)},
}
)
class HeightTests(LayoutTestCase):
def test_no_vertical_properties(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=10)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (10, 0)},
'padding_box': {'position': (0, 0), 'size': (10, 0)},
'content': {'position': (0, 0), 'size': (10, 0)},
}
)
def test_height(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=10, height=50)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (10, 50)},
'padding_box': {'position': (0, 0), 'size': (10, 50)},
'content': {'position': (0, 0), 'size': (10, 50)},
}
)
def test_height_auto_top_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=10, height=50, margin_top=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (10, 50)},
'padding_box': {'position': (0, 0), 'size': (10, 50)},
'content': {'position': (0, 0), 'size': (10, 50)},
}
)
def test_height_auto_bottom_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=10, height=50, margin_bottom=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (10, 50)},
'padding_box': {'position': (0, 0), 'size': (10, 50)},
'content': {'position': (0, 0), 'size': (10, 50)},
}
)
```
#### File: colosseum/tests/test_color.py
```python
from unittest import TestCase
from colosseum.colors import hsl, rgb
class ColorTests(TestCase):
def assertEqualColor(self, a, b):
self.assertEqual(a.rgb.r, b.rgb.r)
self.assertEqual(a.rgb.g, b.rgb.g)
self.assertEqual(a.rgb.b, b.rgb.b)
self.assertEqual(a.rgb.a, b.rgb.a)
def test_rgb_repr(self):
self.assertEqual(repr(rgb(10, 20, 30, 0.5)), "rgba(10, 20, 30, 0.5)")
def test_hsl_repr(self):
self.assertEqual(repr(hsl(10, 0.2, 0.3, 0.5)), "hsla(10, 0.2, 0.3, 0.5)")
def test_hsl_blacks(self):
self.assertEqualColor(hsl(0, 0.0, 0.0), rgb(0x00, 0x00, 0x00))
self.assertEqualColor(hsl(60, 0.0, 0.0), rgb(0x00, 0x00, 0x00))
self.assertEqualColor(hsl(180, 0.0, 0.0), rgb(0x00, 0x00, 0x00))
self.assertEqualColor(hsl(240, 0.0, 0.0), rgb(0x00, 0x00, 0x00))
self.assertEqualColor(hsl(360, 0.0, 0.0), rgb(0x00, 0x00, 0x00))
def test_hsl_whites(self):
self.assertEqualColor(hsl(0, 0.0, 1.0), rgb(0xff, 0xff, 0xff))
self.assertEqualColor(hsl(60, 0.0, 1.0), rgb(0xff, 0xff, 0xff))
self.assertEqualColor(hsl(180, 0.0, 1.0), rgb(0xff, 0xff, 0xff))
self.assertEqualColor(hsl(240, 0.0, 1.0), rgb(0xff, 0xff, 0xff))
self.assertEqualColor(hsl(360, 0.0, 1.0), rgb(0xff, 0xff, 0xff))
def test_hsl_grays(self):
self.assertEqualColor(hsl(0, 0.0, 0.2), rgb(0x33, 0x33, 0x33))
self.assertEqualColor(hsl(0, 0.0, 0.4), rgb(0x66, 0x66, 0x66))
self.assertEqualColor(hsl(0, 0.0, 0.5), rgb(0x80, 0x80, 0x80))
self.assertEqualColor(hsl(0, 0.0, 0.6), rgb(0x99, 0x99, 0x99))
self.assertEqualColor(hsl(0, 0.0, 0.8), rgb(0xcc, 0xcc, 0xcc))
def test_hsl_primaries(self):
self.assertEqualColor(hsl(0, 1.0, 0.5), rgb(0xff, 0x00, 0x00))
self.assertEqualColor(hsl(60, 1.0, 0.5), rgb(0xff, 0xff, 0x00))
self.assertEqualColor(hsl(120, 1.0, 0.5), rgb(0x00, 0xff, 0x00))
self.assertEqualColor(hsl(180, 1.0, 0.5), rgb(0x00, 0xff, 0xff))
self.assertEqualColor(hsl(240, 1.0, 0.5), rgb(0x00, 0x00, 0xff))
self.assertEqualColor(hsl(300, 1.0, 0.5), rgb(0xff, 0x00, 0xff))
self.assertEqualColor(hsl(360, 1.0, 0.5), rgb(0xff, 0x00, 0x00))
def test_hsl_muted(self):
self.assertEqualColor(hsl(0, 0.25, 0.25), rgb(0x50, 0x30, 0x30))
self.assertEqualColor(hsl(60, 0.25, 0.25), rgb(0x50, 0x50, 0x30))
self.assertEqualColor(hsl(120, 0.25, 0.25), rgb(0x30, 0x50, 0x30))
self.assertEqualColor(hsl(180, 0.25, 0.25), rgb(0x30, 0x50, 0x50))
self.assertEqualColor(hsl(240, 0.25, 0.25), rgb(0x30, 0x30, 0x50))
self.assertEqualColor(hsl(300, 0.25, 0.25), rgb(0x50, 0x30, 0x50))
self.assertEqualColor(hsl(360, 0.25, 0.25), rgb(0x50, 0x30, 0x30))
self.assertEqualColor(hsl(0, 0.25, 0.75), rgb(0xcf, 0xaf, 0xaf))
self.assertEqualColor(hsl(60, 0.25, 0.75), rgb(0xcf, 0xcf, 0xaf))
self.assertEqualColor(hsl(120, 0.25, 0.75), rgb(0xaf, 0xcf, 0xaf))
self.assertEqualColor(hsl(180, 0.25, 0.75), rgb(0xaf, 0xcf, 0xcf))
self.assertEqualColor(hsl(240, 0.25, 0.75), rgb(0xaf, 0xaf, 0xcf))
self.assertEqualColor(hsl(300, 0.25, 0.75), rgb(0xcf, 0xaf, 0xcf))
self.assertEqualColor(hsl(360, 0.25, 0.75), rgb(0xcf, 0xaf, 0xaf))
self.assertEqualColor(hsl(0, 0.75, 0.75), rgb(0xef, 0x8f, 0x8f))
self.assertEqualColor(hsl(60, 0.75, 0.75), rgb(0xef, 0xef, 0x8f))
self.assertEqualColor(hsl(120, 0.75, 0.75), rgb(0x8f, 0xef, 0x8f))
self.assertEqualColor(hsl(180, 0.75, 0.75), rgb(0x8f, 0xef, 0xef))
self.assertEqualColor(hsl(240, 0.75, 0.75), rgb(0x8f, 0x8f, 0xef))
self.assertEqualColor(hsl(300, 0.75, 0.75), rgb(0xef, 0x8f, 0xef))
self.assertEqualColor(hsl(360, 0.75, 0.75), rgb(0xef, 0x8f, 0x8f))
self.assertEqualColor(hsl(0, 0.75, 0.25), rgb(0x70, 0x10, 0x10))
self.assertEqualColor(hsl(60, 0.75, 0.25), rgb(0x70, 0x70, 0x10))
self.assertEqualColor(hsl(120, 0.75, 0.25), rgb(0x10, 0x70, 0x10))
self.assertEqualColor(hsl(180, 0.75, 0.25), rgb(0x10, 0x70, 0x70))
self.assertEqualColor(hsl(240, 0.75, 0.25), rgb(0x10, 0x10, 0x70))
self.assertEqualColor(hsl(300, 0.75, 0.25), rgb(0x70, 0x10, 0x70))
self.assertEqualColor(hsl(360, 0.75, 0.25), rgb(0x70, 0x10, 0x10))
def test_hsl_alpha(self):
self.assertEqualColor(hsl(60, 0.0, 0.0, 0.3), rgb(0x00, 0x00, 0x00, 0.3))
self.assertEqualColor(hsl(60, 0.0, 1.0, 0.3), rgb(0xff, 0xff, 0xff, 0.3))
self.assertEqualColor(hsl(60, 1.0, 0.5, 0.3), rgb(0xff, 0xff, 0x00, 0.3))
self.assertEqualColor(hsl(60, 0.25, 0.25, 0.3), rgb(0x50, 0x50, 0x30, 0.3))
self.assertEqualColor(hsl(60, 0.25, 0.75, 0.3), rgb(0xcf, 0xcf, 0xaf, 0.3))
self.assertEqualColor(hsl(60, 0.75, 0.75, 0.3), rgb(0xef, 0xef, 0x8f, 0.3))
self.assertEqualColor(hsl(60, 0.75, 0.25, 0.3), rgb(0x70, 0x70, 0x10, 0.3))
```
#### File: colosseum/tests/test_wrappers.py
```python
from collections import OrderedDict
from itertools import permutations
from unittest import TestCase
from colosseum.units import px
from colosseum.wrappers import (
Border,
BorderBottom,
BorderLeft,
BorderRight,
BorderSpacing,
BorderTop,
Cursor,
ImmutableList,
Outline,
Quotes,
Shorthand,
)
class BorderSpacingTests(TestCase):
def test_valid_1_arg_string(self):
border_spacing = BorderSpacing('1')
self.assertEqual(border_spacing.horizontal, '1')
self.assertEqual(border_spacing.vertical, '1')
self.assertEqual(str(border_spacing), '1')
self.assertEqual(repr(border_spacing), "BorderSpacing('1')")
def test_valid_1_arg_int(self):
border_spacing = BorderSpacing(1)
self.assertEqual(border_spacing.horizontal, 1)
self.assertEqual(border_spacing.vertical, 1)
self.assertEqual(str(border_spacing), '1')
self.assertEqual(repr(border_spacing), "BorderSpacing(1)")
def test_valid_1_arg_px(self):
border_spacing = BorderSpacing(1 * px)
self.assertEqual(border_spacing.horizontal, 1 * px)
self.assertEqual(border_spacing.vertical, 1 * px)
self.assertEqual(str(border_spacing), '1px')
self.assertEqual(repr(border_spacing), "BorderSpacing(1px)")
def test_valid_2_arg_str(self):
border_spacing = BorderSpacing('1', '2')
self.assertEqual(border_spacing.horizontal, '1')
self.assertEqual(border_spacing.vertical, '2')
self.assertEqual(str(border_spacing), '1 2')
self.assertEqual(repr(border_spacing), "BorderSpacing('1', '2')")
def test_valid_2_arg_int(self):
border_spacing = BorderSpacing(1, 2)
self.assertEqual(border_spacing.horizontal, 1)
self.assertEqual(border_spacing.vertical, 2)
self.assertEqual(str(border_spacing), '1 2')
self.assertEqual(repr(border_spacing), 'BorderSpacing(1, 2)')
def test_valid_2_arg_px(self):
border_spacing = BorderSpacing(1 * px, 2 * px)
self.assertEqual(border_spacing.horizontal, 1 * px)
self.assertEqual(border_spacing.vertical, 2 * px)
self.assertEqual(str(border_spacing), '1px 2px')
self.assertEqual(repr(border_spacing), 'BorderSpacing(1px, 2px)')
def test_invalid_arg_number(self):
with self.assertRaises(TypeError):
BorderSpacing(1, 2, 3)
class QuotesTests(TestCase):
# Valid cases
def test_quotes_valid_1_pair(self):
quotes = Quotes([('<', '>')])
self.assertEqual(quotes.opening(level=0), '<')
self.assertEqual(quotes.closing(level=0), '>')
self.assertEqual(len(quotes), 1)
self.assertEqual(str(quotes), "'<' '>'")
self.assertEqual(repr(quotes), "Quotes([('<', '>')])")
def test_quotes_valid_2_pairs(self):
quotes = Quotes([('<', '>'), ('{', '}')])
self.assertEqual(quotes.opening(level=0), '<')
self.assertEqual(quotes.closing(level=0), '>')
self.assertEqual(quotes.opening(level=1), '{')
self.assertEqual(quotes.closing(level=1), '}')
self.assertEqual(len(quotes), 2)
self.assertEqual(str(quotes), "'<' '>' '{' '}'")
self.assertEqual(repr(quotes), "Quotes([('<', '>'), ('{', '}')])")
# Invalid cases
def test_quotes_invalid_1_pair_level(self):
quotes = Quotes([('<', '>')])
with self.assertRaises(IndexError):
quotes.opening(level=1)
with self.assertRaises(IndexError):
quotes.closing(level=1)
class TestShorthand(TestCase):
def test_shorthand_invalid_empty(self):
with self.assertRaises(ValueError):
Shorthand()
class TestShorthandOutline(TestCase):
def test_shorthand_outline_valid_empty(self):
outline = Outline()
self.assertEqual(str(outline), '')
self.assertEqual(repr(outline), 'Outline()')
def test_shorthand_outline_valid_1_kwargs(self):
for property_name in ['outline_color', 'outline_style', 'outline_width']:
outline = Outline(**{property_name: 1})
self.assertEqual(str(outline), '1')
self.assertEqual(getattr(outline, property_name), 1)
def test_shorthand_outline_valid_2_kwargs(self):
perms = permutations(['outline_color', 'outline_style', 'outline_width'], 2)
for (prop_1, prop_2) in perms:
kwargs = {prop_1: 1, prop_2: 2}
outline = Outline(**kwargs)
self.assertEqual(str(outline), ' '.join(str(v[1]) for v in sorted(kwargs.items())))
def test_shorthand_outline_valid_3_kwargs(self):
perms = permutations(['outline_color', 'outline_style', 'outline_width'])
for (prop_1, prop_2, prop_3) in perms:
kwargs = {prop_1: 1, prop_2: 2, prop_3: 3}
outline = Outline(**kwargs)
self.assertEqual(str(outline), ' '.join(str(v[1]) for v in sorted(kwargs.items())))
def test_shorthand_outline_valid_get_values(self):
perms = permutations(['outline_color', 'outline_style', 'outline_width'])
for (prop_1, prop_2, prop_3) in perms:
kwargs = {prop_1: 1, prop_2: 2, prop_3: 3}
outline = Outline(**kwargs)
self.assertEqual(getattr(outline, prop_1), kwargs[prop_1])
self.assertEqual(getattr(outline, prop_2), kwargs[prop_2])
self.assertEqual(getattr(outline, prop_3), kwargs[prop_3])
def test_shorthand_outline_valid_set_values(self):
perms = permutations(['outline_color', 'outline_style', 'outline_width'])
for (prop_1, prop_2, prop_3) in perms:
kwargs = {prop_1: 1, prop_2: 2, prop_3: 3}
outline = Outline(**kwargs)
self.assertEqual(getattr(outline, prop_1), kwargs[prop_1])
self.assertEqual(getattr(outline, prop_2), kwargs[prop_2])
self.assertEqual(getattr(outline, prop_3), kwargs[prop_3])
def test_shorthand_outline_equality(self):
perms = permutations(['outline_color', 'outline_style', 'outline_width'])
for (prop_1, prop_2, prop_3) in perms:
kwargs = {prop_1: 1, prop_2: 2, prop_3: 3}
outline1 = Outline(**kwargs)
outline2 = Outline(**kwargs)
self.assertEqual(outline1, outline2)
def test_shorthand_outline_valid_to_dict(self):
expected_output = ['outline_color', 'outline_style', 'outline_width']
perms = permutations(expected_output)
for (prop_1, prop_2, prop_3) in perms:
kwargs = {prop_1: 1, prop_2: 2, prop_3: 3}
outline = Outline(**kwargs)
self.assertEqual(outline.to_dict(), kwargs)
# Invalid cases
def test_shorthand_outline_invalid_kwargs(self):
with self.assertRaises(ValueError):
Outline(foobar='foobar')
class TestShorthandBorder(TestCase):
def test_shorthand_boder_valid_empty(self):
for wrapper_class in [Border, BorderBottom, BorderLeft, BorderRight, BorderTop]:
wrapper = wrapper_class()
self.assertEqual(str(wrapper), '')
self.assertEqual(repr(wrapper), wrapper_class.__name__ + '()')
def test_shorthand_outline_valid_1_kwargs(self):
for direction, wrapper_class in {'': Border,
'bottom_': BorderBottom,
'left_': BorderLeft,
'right_': BorderRight,
'top_': BorderTop}.items():
for property_name in ['border_{direction}color'.format(direction=direction),
'border_{direction}style'.format(direction=direction),
'border_{direction}width'.format(direction=direction)]:
wrapper = wrapper_class(**{property_name: 1})
self.assertEqual(str(wrapper), '1')
self.assertEqual(getattr(wrapper, property_name), 1)
def test_shorthand_outline_valid_2_kwargs(self):
for direction, wrapper_class in {'': Border,
'bottom_': BorderBottom,
'left_': BorderLeft,
'right_': BorderRight,
'top_': BorderTop}.items():
perms = permutations(['border_{direction}color'.format(direction=direction),
'border_{direction}style'.format(direction=direction),
'border_{direction}width'.format(direction=direction)], 2)
for (prop_1, prop_2) in perms:
kwargs = OrderedDict()
# This is to guarantee the proper order
for property_name in wrapper_class.VALID_KEYS:
if prop_1 == property_name:
kwargs[prop_1] = 1
elif prop_2 == property_name:
kwargs[prop_2] = 2
wrapper = wrapper_class(**kwargs)
self.assertEqual(str(wrapper), ' '.join(str(v[1]) for v in kwargs.items()))
def test_shorthand_outline_valid_3_kwargs(self):
for direction, wrapper_class in {'': Border,
'bottom_': BorderBottom,
'left_': BorderLeft,
'right_': BorderRight,
'top_': BorderTop}.items():
perms = permutations(['border_{direction}color'.format(direction=direction),
'border_{direction}style'.format(direction=direction),
'border_{direction}width'.format(direction=direction)])
for (prop_1, prop_2, prop_3) in perms:
kwargs = OrderedDict()
# This is to guarantee the proper order
for idx, property_name in enumerate(wrapper_class.VALID_KEYS):
kwargs[property_name] = idx + 1
wrapper = wrapper_class(**kwargs)
self.assertEqual(str(wrapper), ' '.join(str(v[1]) for v in kwargs.items()))
def test_shorthand_outline_valid_get_values(self):
for direction, wrapper_class in {'': Border,
'bottom_': BorderBottom,
'left_': BorderLeft,
'right_': BorderRight,
'top_': BorderTop}.items():
perms = permutations(['border_{direction}color'.format(direction=direction),
'border_{direction}style'.format(direction=direction),
'border_{direction}width'.format(direction=direction)])
for (prop_1, prop_2, prop_3) in perms:
kwargs = OrderedDict()
# This is to guarantee the proper order
for idx, property_name in enumerate(wrapper_class.VALID_KEYS):
kwargs[property_name] = idx + 1
wrapper = wrapper_class(**kwargs)
self.assertEqual(getattr(wrapper, prop_1), kwargs[prop_1])
self.assertEqual(getattr(wrapper, prop_2), kwargs[prop_2])
self.assertEqual(getattr(wrapper, prop_3), kwargs[prop_3])
def test_shorthand_outline_valid_set_values(self):
for direction, wrapper_class in {'': Border,
'bottom_': BorderBottom,
'left_': BorderLeft,
'right_': BorderRight,
'top_': BorderTop}.items():
perms = permutations(['border_{direction}color'.format(direction=direction),
'border_{direction}style'.format(direction=direction),
'border_{direction}width'.format(direction=direction)])
for (prop_1, prop_2, prop_3) in perms:
kwargs = OrderedDict()
# This is to guarantee the proper order
for idx, property_name in enumerate(wrapper_class.VALID_KEYS):
kwargs[property_name] = idx + 1
wrapper = wrapper_class(**kwargs)
self.assertEqual(getattr(wrapper, prop_1), kwargs[prop_1])
self.assertEqual(getattr(wrapper, prop_2), kwargs[prop_2])
self.assertEqual(getattr(wrapper, prop_3), kwargs[prop_3])
def test_shorthand_outline_equality(self):
for direction, wrapper_class in {'': Border,
'bottom_': BorderBottom,
'left_': BorderLeft,
'right_': BorderRight,
'top_': BorderTop}.items():
perms = permutations(['border_{direction}color'.format(direction=direction),
'border_{direction}style'.format(direction=direction),
'border_{direction}width'.format(direction=direction)])
for (prop_1, prop_2, prop_3) in perms:
kwargs = OrderedDict()
# This is to guarantee the proper order
for idx, property_name in enumerate(wrapper_class.VALID_KEYS):
kwargs[property_name] = idx + 1
wrapper1 = wrapper_class(**kwargs)
wrapper2 = wrapper_class(**kwargs)
self.assertEqual(wrapper1, wrapper2)
def test_shorthand_outline_valid_to_dict(self):
for direction, wrapper_class in {'': Border,
'bottom_': BorderBottom,
'left_': BorderLeft,
'right_': BorderRight,
'top_': BorderTop}.items():
perms = permutations(['border_{direction}color'.format(direction=direction),
'border_{direction}style'.format(direction=direction),
'border_{direction}width'.format(direction=direction)])
for (prop_1, prop_2, prop_3) in perms:
kwargs = OrderedDict()
# This is to guarantee the proper order
for idx, property_name in enumerate(wrapper_class.VALID_KEYS):
kwargs[property_name] = idx + 1
wrapper = wrapper_class(**kwargs)
self.assertEqual(wrapper.to_dict(), kwargs)
# Invalid cases
def test_shorthand_outline_invalid_kwargs(self):
for wrapper_class in [Border, BorderBottom, BorderLeft, BorderRight, BorderTop]:
with self.assertRaises(ValueError):
wrapper_class(foobar='foobar')
class ImmutableListTests(TestCase):
def test_immutable_list_initial(self):
# Check initial
ilist = ImmutableList()
self.assertEqual(str(ilist), '')
self.assertEqual(repr(ilist), 'ImmutableList()')
self.assertEqual(len(ilist), 0)
def test_immutable_list_creation(self):
# Check value
ilist = ImmutableList([1])
self.assertEqual(str(ilist), "1")
self.assertEqual(repr(ilist), "ImmutableList([1])")
self.assertEqual(len(ilist), 1)
# Check values
ilist = ImmutableList(['1', '2'])
self.assertEqual(str(ilist), "1, 2")
self.assertEqual(repr(ilist), "ImmutableList(['1', '2'])")
self.assertEqual(len(ilist), 2)
def test_immutable_list_get_item(self):
# Check get item
ilist = ImmutableList(['1', '2'])
self.assertEqual(ilist[0], '1')
self.assertEqual(ilist[-1], '2')
def test_immutable_list_set_item(self):
# Check immutable
ilist = ImmutableList()
with self.assertRaises(TypeError):
ilist[0] = 'initial'
def test_immutable_list_equality(self):
# Check equality
ilist1 = ImmutableList(['1', 2])
ilist2 = ImmutableList(['1', 2])
ilist3 = ImmutableList([2, '1'])
self.assertEqual(ilist1, ilist2)
self.assertNotEqual(ilist1, ilist3)
def test_immutable_list_hash(self):
# Check hash
ilist1 = ImmutableList(['1', 2])
ilist2 = ImmutableList(['1', 2])
self.assertEqual(hash(ilist1), hash(ilist2))
def test_immutable_list_id(self):
# Check id
ilist1 = ImmutableList(['1', 2])
ilist2 = ImmutableList(['1', 2])
self.assertNotEqual(id(ilist1), id(ilist2))
self.assertNotEqual(id(ilist1), id(ilist1.copy()))
self.assertNotEqual(id(ilist2), id(ilist1.copy()))
def test_immutable_list_copy(self):
# Check copy
ilist1 = ImmutableList(['1', 2])
ilist2 = ImmutableList(['1', 2])
self.assertEqual(hash(ilist2), hash(ilist1.copy()))
self.assertEqual(ilist1, ilist1.copy())
class CursorTests(TestCase):
def test_cursor_initial(self):
# Check initial
ilist = Cursor()
self.assertEqual(str(ilist), '')
self.assertEqual(repr(ilist), 'Cursor()')
self.assertEqual(len(ilist), 0)
def test_cursor_creation(self):
# Check value
ilist = Cursor([1])
self.assertEqual(str(ilist), "1")
self.assertEqual(repr(ilist), "Cursor([1])")
self.assertEqual(len(ilist), 1)
# Check values
ilist = Cursor(['1', '2'])
self.assertEqual(str(ilist), "1, 2")
self.assertEqual(repr(ilist), "Cursor(['1', '2'])")
self.assertEqual(len(ilist), 2)
``` |
{
"source": "jonboland/gardenlife",
"score": 3
} |
#### File: gardenlife/gardenlife/event_funcs.py
```python
from datetime import datetime
def check_date_validity(date):
"""Check date validity by attempting to create datetime object with strptime."""
datetime.strptime(date, "%d/%m/%Y")
def clear_summary_values(window):
"""Clear all garden summary tab values."""
for value in (
"GARDEN NAME",
"LOCATION",
"SIZE",
"OWNED BY",
"OWNED FOR",
"TOTAL CREATURES",
"TOTAL PLANTS",
"TOTAL TASKS",
"OUTSTANDING TASKS",
):
window[f"-SUMMARY {value}-"].update("")
def update_task_summaries(window, garden):
"""Update total and outstanding task summaries."""
window["-SUMMARY TOTAL TASKS-"].update(len(garden.tasks))
window["-SUMMARY OUTSTANDING TASKS-"].update(
sum(
task.get_current_progress() in {"Due", "Overdue", "Very overdue"}
for task in garden.tasks.values()
)
)
def update_garden_dropdown(window, gardens):
"""Sort garden names and update garden dropdown."""
garden_names = sorted(list(gardens))
window["-SELECT GARDEN-"].update(values=garden_names, size=(34, 10))
def clear_garden_values(window):
"""Clear all manage garden tab values."""
for value in ("GARDEN NAME", "LOCATION", "SIZE", "OWNER NAMES", "OWNED SINCE"):
window[f"-{value}-"].update("")
def update_creature_dropdowns(window, garden):
"""Sort, filter, and update creature tab dropdowns."""
creature_names = sorted([""] + list(garden.creatures))
types = {c.org_type for c in garden.creatures.values() if c.org_type}
creature_types = sorted([""] + list(types))
window["-CREATURE NAME-"].update(values=creature_names, size=(25, 10))
window["-CREATURE TYPE-"].update(values=creature_types, size=(25, 10))
def clear_creature_values(window):
"""Clear all manage creature tab values."""
for value in ("NAME", "TYPE", "APPEARED DATE", "STATUS", "NOTES"):
window[f"-CREATURE {value}-"].update("")
for value in ("IMPACT", "PREVALENCE", "TREND"):
window[f"-CREATURE {value} SLIDER-"].update(3)
def update_plant_dropdowns(window, garden):
"""Sort, filter, and update plant tab dropdowns."""
plant_names = sorted([""] + list(garden.plants))
types = {p.org_type for p in garden.plants.values() if p.org_type}
plant_types = sorted([""] + list(types))
window["-PLANT NAME-"].update(values=plant_names, size=(25, 10))
window["-PLANT TYPE-"].update(values=plant_types, size=(25, 10))
def clear_plant_values(window):
"""Clear all manage plant tab values."""
for value in ("NAME", "TYPE", "PLANTED DATE", "EDIBLE", "STATUS", "NOTES"):
window[f"-PLANT {value}-"].update("")
for value in ("IMPACT", "PREVALENCE", "TREND"):
window[f"-PLANT {value} SLIDER-"].update(3)
def update_task_dropdown(window, garden):
"""Sort and update task name dropdown."""
task_names = sorted([""] + list(garden.tasks))
window["-TASK NAME-"].update(values=task_names, size=(25, 10))
def clear_task_values(window):
"""Clear all manage task tab values."""
for value in (
"NAME",
"PROGRESS",
"NEXT DUE",
"ASSIGNEE",
"LENGTH",
"STATUS",
"NOTES",
"START",
"FREQUENCY",
"COUNT",
"BY MONTH",
"INTERVAL",
):
window[f"-TASK {value}-"].update("")
def clear_organism_links(window, garden):
"""Clear creature and plant links."""
window["-TASK LINKED CREATURES-"].update(sorted(list(garden.creatures)))
window["-TASK LINKED PLANTS-"].update(sorted(list(garden.plants)))
def update_all_item_dropdowns(window, garden):
"""Update creature, plant, and task dropdowns."""
update_creature_dropdowns(window, garden)
update_plant_dropdowns(window, garden)
update_task_dropdown(window, garden)
def clear_all_item_dropdowns(window):
"""Clear all creature, plant, and task dropdowns."""
for value in (
"CREATURE NAME",
"CREATURE TYPE",
"PLANT NAME",
"PLANT TYPE",
"TASK NAME",
):
window[f"-{value}-"].update(values="", size=(25, 10))
def clear_all_item_values_and_links(window, garden):
"""Clear all creature, plant, and task values and links."""
clear_creature_values(window)
clear_plant_values(window)
clear_task_values(window)
clear_organism_links(window, garden)
def clear_all_values_and_links(window, garden):
"""Clear all garden, summary, creature, plant, and task values and links."""
clear_garden_values(window)
clear_summary_values(window)
clear_all_item_values_and_links(window, garden)
```
#### File: gardenlife/gardenlife/organisms.py
```python
from organism import Organism
class Creature(Organism):
"""Class to represent a creature."""
def __init__(self, *args, appeared=None, **kwargs):
super().__init__(*args, **kwargs)
self.appeared = appeared
def __str__(self):
return f"Creature name: {self.name}. Creature type: {self.org_type}."
class Plant(Organism):
"""Class to represent a plant."""
def __init__(self, *args, edible=False, planted=None, **kwargs):
super().__init__(*args, **kwargs)
self.edible = edible
self.planted = planted
def __str__(self):
return f"Plant name: {self.name}. Plant type: {self.org_type}."
```
#### File: gardenlife/gardenlife/subwindows.py
```python
import pickle
import sys
import PySimpleGUI as sg
from constants import ACCENT_COLOR, CREATURE_HEADS, PLANT_HEADS, TASK_HEADS
import summary_funcs
def unsaved_changes_window(window, gardens):
"""
Display confirmation window when user attempts to close the application
with unsaved changes. Options are save, don't save, and cancel.
"""
window.Disable()
confirm_layout = [
[sg.Text("Would you like to save your changes?", pad=(0, 15))],
[
sg.Button("Save", size=(10, 1)),
sg.Button("Don't Save", size=(10, 1)),
sg.Button("Cancel", size=(10, 1)),
],
]
confirm_window = sg.Window(
"Confirm",
confirm_layout,
keep_on_top=True,
element_justification="center",
)
while True:
confirm_event, confirm_values = confirm_window.read()
# print(confirm_event, confirm_values)
if confirm_event == "Save":
with open("gardens.pickle", "wb") as file:
pickle.dump(gardens, file)
sys.exit()
if confirm_event == "Don't Save":
sys.exit()
if confirm_event in ("Cancel", sg.WIN_CLOSED):
confirm_window.close()
window.Enable()
break
def add_progress_window(window, task):
"""Display window enabling user to add progress to the selected task."""
window.Disable()
progress_layout = [
[
sg.Column(
[
[sg.Checkbox(date, default=value, key=date)]
for date, value in task.get_all_progress().items()
],
size=(200, 200),
scrollable=True,
),
],
[sg.Button("Add")],
]
progress_window = sg.Window("Add Progress", progress_layout, keep_on_top=True)
while True:
progress_event, progress_values = progress_window.read()
# print(progress_event, progress_values)
if progress_event == "Add":
task.update_completed_dates(progress_values)
progress_window.close()
window.Enable()
return True
if progress_event == sg.WIN_CLOSED:
progress_window.close()
window.Enable()
break
def view_creatures_window(window, garden):
"""Display window containing summary of all the creatures in the currently selected garden."""
window.Disable()
header_row = [[summary_funcs.summary_head_format(title) for title in CREATURE_HEADS]]
creatures = [
summary_funcs.creature_fields(creature)
for creature in summary_funcs.sorted_organisms(garden.creatures.values())
]
creature_table = header_row + creatures
creature_summary_column = [summary_funcs.organism_column_format(creature_table)]
creature_summary_layout = [creature_summary_column, [sg.Button("Close")]]
creature_summary_window = sg.Window("Creature Summary", creature_summary_layout, keep_on_top=True)
while True:
creature_sum_event, creature_sum_values = creature_summary_window.read()
# print(creature_sum_event, creature_sum_values)
if creature_sum_event in (sg.WIN_CLOSED, "Close"):
creature_summary_window.close()
window.Enable()
break
def view_plants_window(window, garden, attr="name", title=""):
"""
Display window containing summary of the plants in the currently selected garden.
The window can be filtered by an attribute such as whether the plant is edible.
"""
window.Disable()
header_row = [[summary_funcs.summary_head_format(title) for title in PLANT_HEADS]]
plants = [
summary_funcs.plant_fields(plant)
for plant in summary_funcs.sorted_organisms(garden.plants.values())
if getattr(plant, attr)
]
plant_table = header_row + plants
plant_summary_column = [summary_funcs.organism_column_format(plant_table)]
plant_summary_layout = [plant_summary_column, [sg.Button("Close")]]
plant_summary_window = sg.Window(f"{title}Plant Summary", plant_summary_layout, keep_on_top=True)
while True:
plant_sum_event, plant_sum_values = plant_summary_window.read()
# print(plant_sum_event, plant_sum_values)
if plant_sum_event in (sg.WIN_CLOSED, "Close"):
plant_summary_window.close()
window.Enable()
break
def view_tasks_window(window, garden):
"""
Display window containing summary of the tasks in the currently selected garden.
The tasks are sorted by status, progress, due date, assignee, and name.
"""
window.Disable()
name_head = [
sg.Input(TASK_HEADS[0], size=(18, 1), text_color="white", background_color=ACCENT_COLOR)
]
other_head = [summary_funcs.summary_head_format(title) for title in TASK_HEADS[1:]]
header_row = [name_head + other_head]
tasks = [
summary_funcs.task_fields(task) for task in summary_funcs.sorted_tasks(garden.tasks.values())
]
task_table = header_row + tasks
task_summary_column = [sg.Column(task_table, size=(880, 500), scrollable=True)]
task_summary_layout = [task_summary_column, [sg.Button("Close")]]
task_summary_window = sg.Window("Task Summary", task_summary_layout, keep_on_top=True)
while True:
task_sum_event, task_sum_values = task_summary_window.read()
# print(task_sum_event, task_sum_values)
if task_sum_event in (sg.WIN_CLOSED, "Close"):
task_summary_window.close()
window.Enable()
break
```
#### File: gardenlife/gardenlife/task.py
```python
from datetime import datetime
from dateutil.rrule import rrule
from constants import FREQS
from status import Status
class Task:
"""Class to represent a garden task."""
def __init__(
self,
name,
description=None,
assignee=None,
length=None,
linked_creatures=None,
linked_plants=None,
):
self.name = name
self.schedule = [self._set_date()]
self.description = description
self.assignee = assignee
self.length = length
self.completed_dates = []
self.linked_creatures = linked_creatures
self.linked_plants = linked_plants
self.raw_schedule = None
self.status = Status()
def __repr__(self):
return f"Task: {self.name}"
def __eq__(self, other):
return repr(self) == other
def set_schedule(self, start_date, freq, count, bymonth, interval):
"""Set task's scheduled dates using dateutils.rrule."""
# Stores the raw schedule values to repopulate UI fields
self.raw_schedule = {
"start date": start_date,
"freq": freq,
"count": count,
"bymonth": bymonth,
"interval": interval,
}
# Converts string to datetime object. Sets start date to today if not supplied
start_date = self._set_date(start_date)
# Sets the frequency to the required value or monthly if not supplied
freq = FREQS.get(freq, FREQS["Monthly"])
count = int(count) if count else 1
bymonth = [int(month) for month in bymonth.split(" ")] if bymonth else None
interval = int(interval) if interval else 1
# Creates the specified list of scheduled dates with dateutils.rrule
self.schedule = list(
rrule(
dtstart=start_date,
freq=freq,
count=count,
bymonth=bymonth,
interval=interval,
)
)
def update_completed_dates(self, all_progress):
"""
Take a dict containing all scheduled dates as keys in string format.
Add or removes dates from completed dates list based on their boolean values.
"""
for date_string, boolean in all_progress.items():
date = self._string_to_date(date_string)
if boolean:
self._add_completed_date(date)
else:
self._remove_completed_date(date)
self.completed_dates.sort()
def _add_completed_date(self, date):
# Add date to completed date list if not already present
if date not in self.completed_dates:
self.completed_dates.append(date)
def _remove_completed_date(self, date):
# Remove date from completed dates list
try:
self.completed_dates.remove(date)
except ValueError:
pass
def get_all_progress(self):
"""
Return a dict containing all scheduled dates in string format with bool
indicating whether they are in the completed dates list.
Any completed dates that are not in the current schedule are also included.
"""
return {
self._date_to_string(date): (date in self.completed_dates)
for date in sorted(self.schedule + self.completed_dates)
}
def get_current_progress(self, current_date=None):
"""Return current task progress."""
# Convert string to datetime object. Set current date to today if no date supplied
current_date = self._set_date(current_date)
if not self.completed_dates:
if current_date < self.schedule[0]:
return "Not yet due"
elif current_date == self.schedule[0]:
return "Due"
missed_dates_no_completed = sum(
date < current_date for date in self.schedule
)
if missed_dates_no_completed == 1:
return "Overdue"
# If number of missed dates isn't 1 it must be greater than 1
return "Very overdue"
if current_date in self.schedule and current_date > self.completed_dates[-1]:
return "Due"
# Scheduled dates since task last completed, before or on the current date
missed_dates_with_completed = sum(
date > self.completed_dates[-1] and date < current_date
for date in self.schedule
)
if missed_dates_with_completed == 1:
return "Overdue"
elif missed_dates_with_completed > 1:
return "Very overdue"
# If there aren't any missed dates the task is up to date
return "Completed"
def get_next_due_date(self):
"""Return task's next due date in string format."""
if not self.completed_dates:
return self._date_to_string(self.schedule[0])
elif self.schedule[-1] <= self.completed_dates[-1]:
return "No further due dates"
next_due = min(
date for date in self.schedule if date > self.completed_dates[-1]
)
return self._date_to_string(next_due)
def _set_date(self, date=None):
# Return datetime object from string or today if not date
if date:
return self._string_to_date(date)
return datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
def _string_to_date(self, date_string):
# Convert a string into a datetime object
return datetime.strptime(date_string, "%d/%m/%Y")
def _date_to_string(self, date_object):
# Convert a datetime object into a string
return datetime.strftime(date_object, "%d/%m/%Y")
```
#### File: gardenlife/tests/test_creature.py
```python
import pytest
import context
import organisms
@pytest.fixture
def badger():
return organisms.Creature(
"badger",
"mammal",
appeared="03/07/2020",
notes="Digs holes in various parts of the garden.",
age=10,
impact=3,
prevalence=2,
trend=4,
)
def test_impact_neutral(badger):
assert badger.get_level("impact") == "Neutral"
def test_prevalence_low(badger):
badger.prevalence = 1
assert badger.get_level("prevalence") == "Very Low"
def test_invalid_impact_level(badger):
with pytest.raises(ValueError) as excinfo:
badger.impact = "high"
assert str(excinfo.value) == "high is not a valid impact level"
def test_date_appeared(badger):
assert badger.appeared == "03/07/2020"
def test_notes(badger):
assert badger.notes == "Digs holes in various parts of the garden."
def test_unarchived(badger):
badger.status.archive()
badger.status.unarchive()
assert badger.status.status == "Current"
if __name__ == "__main__":
pytest.main()
``` |
{
"source": "jonboland/patientflow",
"score": 2
} |
#### File: patients/tests/test_views.py
```python
from django.test import TestCase
from django.shortcuts import reverse
class HomePageTest(TestCase):
def test_get(self):
response = self.client.get(reverse('home-page'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'index.html')
```
#### File: patientflow/staff/mixins.py
```python
from urllib.parse import urlparse
from django.contrib.auth.mixins import AccessMixin
from django.contrib.auth.views import redirect_to_login
from django.shortcuts import resolve_url
class OrganiserAndLoginRequiredMixin(AccessMixin):
"""Verify that the current user is authenticated and is an organiser."""
def handle_no_permission(self):
path = self.request.build_absolute_uri()
resolved_login_url = resolve_url(self.get_login_url())
# If the login url is the same scheme and net location then use the
# path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if (not login_scheme or login_scheme == current_scheme) and (
not login_netloc or login_netloc == current_netloc
):
path = self.request.get_full_path()
return redirect_to_login(
path,
resolved_login_url,
self.get_redirect_field_name(),
)
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated or not request.user.is_organiser:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
``` |
{
"source": "jonboland/x86-64-assembly",
"score": 2
} |
#### File: generators/exercises/collatz_conjecture.py
```python
FUNC_PROTO = """\
#include "vendor/unity.h"
extern int steps(int number);
"""
def gen_func_body(prop, inp, expected):
number = inp["number"]
return f"TEST_ASSERT_EQUAL_INT({expected}, {prop}({number}));\n"
```
#### File: generators/exercises/two_fer.py
```python
FUNC_PROTO = """\
#include "vendor/unity.h"
#define BUFFER_SIZE 256
extern void two_fer(const char *name, char *buffer);
"""
def gen_func_body(prop, inp, expected):
str_list = []
name = inp["name"]
str_list.append("char buffer[BUFFER_SIZE];\n\n")
if name:
str_list.append(f'{prop}("{name}", buffer);\n')
else:
str_list.append(f"{prop}(NULL, buffer);\n")
str_list.append(f'TEST_ASSERT_EQUAL_STRING("{expected}", buffer);\n')
return "".join(str_list)
``` |
{
"source": "JonBons/python-a2s",
"score": 3
} |
#### File: python-a2s/a2s/datacls.py
```python
import collections
import copy
class DataclsBase:
def __init__(self, **kwargs):
for name, value in self._defaults.items():
if name in kwargs:
value = kwargs[name]
setattr(self, name, copy.copy(value))
def __iter__(self):
for name in self.__annotations__:
yield (name, getattr(self, name))
def __repr__(self):
return "{}({})".format(
self.__class__.__name__,
", ".join(name + "=" + repr(value) for name, value in self))
class DataclsMeta(type):
def __new__(cls, name, bases, prop):
values = collections.OrderedDict()
for member_name in prop["__annotations__"].keys():
# Check if member has a default value set as class variable
if member_name in prop:
# Store default value and remove the class variable
values[member_name] = prop[member_name]
del prop[member_name]
else:
# Set None as the default value
values[member_name] = None
prop["__slots__"] = list(values.keys())
prop["_defaults"] = values
bases = (DataclsBase, *bases)
return super().__new__(cls, name, bases, prop)
def __prepare__(self, *args, **kwargs):
return collections.OrderedDict()
``` |
{
"source": "JonBoyleCoding/fava-envelope",
"score": 2
} |
#### File: fava-envelope/fava_envelope/__init__.py
```python
from fava.ext import FavaExtensionBase
from fava import __version__ as fava_version
from beancount.core.number import Decimal, D
from .modules.beancount_envelope import BeancountEnvelope
import ast
class EnvelopeBudget(FavaExtensionBase):
'''
'''
report_title = "Envelope Budget"
def generate_budget_df(self,currency):
self.currency=currency
module = BeancountEnvelope(
self.ledger.entries,
self.ledger.options,
self.currency
)
self.income_tables, self.envelope_tables, self.currency = module.envelope_tables()
def get_budgets_months_available(self,currency):
self.generate_budget_df(currency)
return self.income_tables.columns
def check_month_in_available_months(self,month,currency):
if month:
if month in self.get_budgets_months_available(currency):
return True
return False
def get_currencies(self):
if "currencies" in self.config:
return self.config["currencies"]
else:
return None
def generate_income_query_tables(self, month):
income_table_types = []
income_table_types.append(("Name", str(str)))
income_table_types.append(("Amount", str(Decimal)))
income_table_rows = []
if month is not None:
row = {}
income_table_rows.append({
"Name": "Funds for month",
"Amount": self.income_tables[month]["Avail Income"]
})
income_table_rows.append({
"Name": "Overspent in prev month",
"Amount": self.income_tables[month]["Overspent"]
})
income_table_rows.append({
"Name": "Budgeted for month",
"Amount": self.income_tables[month]["Budgeted"]
})
income_table_rows.append({
"Name": "To be budgeted for month",
"Amount": self.income_tables[month]["To Be Budgeted"]
})
income_table_rows.append({
"Name": "Budgeted in the future",
"Amount": self.income_tables[month]["Budgeted Future"]
})
return income_table_types, income_table_rows
def generate_envelope_query_tables(self, month):
envelope_table_types = []
envelope_table_types.append(("Account", str(str)))
envelope_table_types.append(("Budgeted", str(Decimal)))
envelope_table_types.append(("Activity", str(Decimal)))
envelope_table_types.append(("Available", str(Decimal)))
envelope_table_rows = []
if month is not None:
for index, e_row in self.envelope_tables.iterrows():
row = {}
row["Account"] = index
row["Budgeted"] = e_row[month, "budgeted"]
row["Activity"] = e_row[month, "activity"]
row["Available"] = e_row[month, "available"]
envelope_table_rows.append(row)
return envelope_table_types, envelope_table_rows
def use_new_querytable(self):
"""
from redstreet/fava_investor
fava added the ledger as a first required argument to
querytable.querytable after version 1.18, so in order to support both,
we have to detect the version and adjust how we call it from inside our
template
"""
split_version = fava_version.split('.')
if len(split_version) != 2:
split_version = split_version[:2]
major, minor = split_version
return int(major) > 1 or (int(major) == 1 and int(minor) > 18)
``` |
{
"source": "JonBoyleCoding/qtile",
"score": 2
} |
#### File: test/layouts/test_matrix.py
```python
import pytest
import libqtile.config
from libqtile import layout
from libqtile.confreader import Config
from test.conftest import no_xinerama
from test.layouts.layout_utils import assert_focus_path, assert_focused
class MatrixConfig(Config):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
layout.Matrix(columns=2)
]
floating_layout = libqtile.layout.floating.Floating()
keys = []
mouse = []
screens = []
def matrix_config(x):
return no_xinerama(pytest.mark.parametrize("qtile", [MatrixConfig], indirect=True)(x))
@matrix_config
def test_matrix_simple(qtile):
qtile.test_window("one")
assert qtile.c.layout.info()["rows"] == [["one"]]
qtile.test_window("two")
assert qtile.c.layout.info()["rows"] == [["one", "two"]]
qtile.test_window("three")
assert qtile.c.layout.info()["rows"] == [["one", "two"], ["three"]]
@matrix_config
def test_matrix_navigation(qtile):
qtile.test_window("one")
qtile.test_window("two")
qtile.test_window("three")
qtile.test_window("four")
qtile.test_window("five")
qtile.c.layout.right()
assert qtile.c.layout.info()["current_window"] == (0, 2)
qtile.c.layout.up()
assert qtile.c.layout.info()["current_window"] == (0, 1)
qtile.c.layout.up()
assert qtile.c.layout.info()["current_window"] == (0, 0)
qtile.c.layout.up()
assert qtile.c.layout.info()["current_window"] == (0, 2)
qtile.c.layout.down()
assert qtile.c.layout.info()["current_window"] == (0, 0)
qtile.c.layout.down()
assert qtile.c.layout.info()["current_window"] == (0, 1)
qtile.c.layout.right()
assert qtile.c.layout.info()["current_window"] == (1, 1)
qtile.c.layout.right()
assert qtile.c.layout.info()["current_window"] == (0, 1)
@matrix_config
def test_matrix_add_remove_columns(qtile):
qtile.test_window("one")
qtile.test_window("two")
qtile.test_window("three")
qtile.test_window("four")
qtile.test_window("five")
qtile.c.layout.add()
assert qtile.c.layout.info()["rows"] == [["one", "two", "three"], ["four", "five"]]
qtile.c.layout.delete()
assert qtile.c.layout.info()["rows"] == [["one", "two"], ["three", "four"], ["five"]]
@matrix_config
def test_matrix_window_focus_cycle(qtile):
# setup 3 tiled and two floating clients
qtile.test_window("one")
qtile.test_window("two")
qtile.test_window("float1")
qtile.c.window.toggle_floating()
qtile.test_window("float2")
qtile.c.window.toggle_floating()
qtile.test_window("three")
# test preconditions
assert qtile.c.layout.info()['clients'] == ['one', 'two', 'three']
# last added window has focus
assert_focused(qtile, "three")
# assert window focus cycle, according to order in layout
assert_focus_path(qtile, 'float1', 'float2', 'one', 'two', 'three')
@matrix_config
def test_matrix_next_no_clients(qtile):
qtile.c.layout.next()
@matrix_config
def test_matrix_previous_no_clients(qtile):
qtile.c.layout.previous()
``` |
{
"source": "JonBoynton/JSConvert",
"score": 2
} |
#### File: jsconvert/jsrules/components.py
```python
from jsconvert.transpiler import CodeRule
from jsconvert.comp import Extendable, Separator, VariableType, Expression
from jsconvert.lang import KW_do, KW_import
__author__ = "<NAME>"
__copyright__ = "<NAME> 2022"
__license__ = "Apache License, Version 2.0"
__all__ = [
"Comp", "NamType", "Attr", "Else", "Catch",
"CatchOnly", "Finaly", "DeclareVar", "ObjType", "ClosedStatementBlock",
"OpenStatementBlock", "DeclareLet", "DeclareConst", "DeclareClass", "DeclareClassExt",
"DoWhile", "ImportFrom", "Retrn", "ArrayBegin", "LabelStm",
"CaseStm", "BreakStm", "Meth", "Func", "Getr", "Setr", "Constr"]
class Comp(CodeRule):
def __init__(self):
super().__init__("Compare", ["Compare"])
def apply(self, b, offset):
b.add(b.current().name+" ")
return 1
class Meth(CodeRule):
def __init__(self, name="Method"):
super().__init__(name, [name])
def apply(self, b, offset):
b.insert_code("")
return 1
class Getr(Meth):
def __init__(self):
super().__init__("Getter")
class Func(Meth):
def __init__(self):
super().__init__("KW_function")
class Setr(Meth):
def __init__(self):
super().__init__("Setter")
class Constr(Meth):
def __init__(self):
super().__init__("KW_constructor")
class NamType(CodeRule):
def __init__(self):
super().__init__("NameType", ["Function", "NameType"])
def apply(self, buffer, offset):
buffer.add(str(buffer.current(1)))
return 2
class NextStatement(CodeRule):
def __init__(self, name, cls):
super().__init__("end-"+name, ["End", cls])
def apply(self, b, offset):
if b.current().name != "}":
return 0
b.trim()
b.new_line(-1)
b.add("} ")
b.add(self.name[4:])
if not isinstance(b.current(offset+1), Expression):
b.space()
return 2
class Else(NextStatement):
def __init__(self):
super().__init__("else", "KW_else")
class Catch(NextStatement):
def __init__(self):
super().__init__("catch", "KW_catch")
class CatchOnly(NextStatement):
def __init__(self):
super().__init__("catch", "XW_catch")
class Finaly(NextStatement):
def __init__(self):
super().__init__("finally", "KW_finally")
# class ObjStatementType(CodeRule):
# def __init__(self):
# super().__init__("single_statement", ["ObjectType", "Block"])
#
# def apply(self, b, offset):
# if b.current(2).name == "{":
# return 0
#
# b.new_line(1)
# return 2
class ClosedStatementBlock(CodeRule):
def __init__(self):
super().__init__("open_statment_block", ["StatementBlock", "Block", "Begin"])
def apply(self, b, offset):
b.add("{")
b.new_line(1)
return 3
class OpenStatementBlock(CodeRule):
def __init__(self):
super().__init__("closed_statment_block", ["StatementBlock", "Block"])
def apply(self, b, offset):
sb = b.get_sub_buffer(b.next())
b.new_line(1)
while sb.offset < sb.size:
sb.rules.process(sb)
b.add(("".join(sb.buf)).strip())
ofs = sb.size + 2
if b.current(ofs).name == ";":
b.add(";")
ofs += 1
b.new_line(-1)
return ofs
class ObjType(CodeRule):
def __init__(self):
super().__init__("object-type", ["ObjectType"])
def apply(self, b, offset):
c = b.current()
sb = b.get_sub_buffer(c)
sb.inobject = True
ch = c.get_children()
sb.add("{")
sb.new_line(1)
cnt = 2
ln = len(ch)
i = 1
while i < ln:
c = ch[i]
if c.name == ":":
i += 1
sb.add(": ")
cnt += sb.append_buffer(sb.get_sub_buffer(ch[i]))+1
elif c.name == ",":
sb.trim()
sb.add(",")
sb.new_line()
elif c.name == "}":
sb.trim()
sb.new_line(-1)
sb.add("}")
if c.extended:
sb.add(".")
elif isinstance(c, VariableType):
sb.add(c.name)
else:
sb.add(str(c))
cnt += 1
i += 1
sb.offset = sb.size
b.append_buffer(sb)
return cnt
class Declareable(CodeRule):
def __init__(self, name, entry):
super().__init__(name, [entry])
def apply(self, b, offset):
b.add(str(b.current()))
if not isinstance(b.next(), Separator):
b.space()
return 1
class DeclareVar(Declareable):
def __init__(self):
super().__init__("declare_var", "KW_var")
class DeclareLet(Declareable):
def __init__(self):
super().__init__("declare_let", "KW_let")
class DeclareConst(Declareable):
def __init__(self):
super().__init__("declare_const", "KW_const")
class Attr(Declareable):
def __init__(self):
super().__init__("Attribute", "Attribute")
class DoWhile(CodeRule):
def __init__(self):
super().__init__("do_while", ["End", "KW_while"])
def apply(self, b, offset):
if not isinstance(b.current(1).get_previous(), KW_do):
return 0
b.trim()
b.new_line(-1)
b.add("} while")
return 2
class ImportFrom(CodeRule):
def __init__(self):
super().__init__("import_from", ["KW_from"])
def apply(self, b, offset):
if not isinstance(b.current().par, KW_import):
return 0
b.trim()
b.add(" from ")
return 1
class Retrn(Declareable):
def __init__(self):
super().__init__("return", "KW_return")
class ArrayBegin(CodeRule):
def __init__(self):
super().__init__("array_begin", ["ArrayType", "Begin"])
def apply(self, b, offset):
if isinstance(b.prev(), Extendable):
b.trim()
b.add("[")
return 2
class LabelStm(CodeRule):
def __init__(self):
super().__init__("Label", ["Label", "Separator"])
def apply(self, b, offset):
b.add(b.current().value+":")
b.new_line()
return 2
class CaseStm(CodeRule):
def __init__(self):
super().__init__("case", ["KW_case", "ANY", "End"])
def apply(self, b, offset):
sb = b.get_sub_buffer(b.current())
while sb.offset < sb.size:
sb.rules.process(sb)
b.trim()
b.new_line()
b.add("case "+("".join(sb.buf)).strip())
b.space()
return sb.size+1
class DeclareClass(CodeRule):
def __init__(self):
super().__init__("class-extension", ["KW_class", "Declaration"])
def apply(self, b, offset):
b.insert_code("")
b.add(str(b.next()))
b.space()
return 2
class DeclareClassExt(CodeRule):
def __init__(self):
super().__init__("declare-class-extension", ["KW_class", "Declaration", "KW_extends"])
def apply(self, b, offset):
b.insert_code("")
b.add(str(b.next()) + " "+str(b.current(offset)))
b.space()
return 3
class BreakStm(CodeRule):
def __init__(self):
super().__init__("Break", ["KW_break"])
def apply(self, b, offset):
b.add(b.current().name)
if not b.next().name == ";":
b.space()
return 1
``` |
{
"source": "jonbrew/Arbiter",
"score": 4
} |
#### File: Arbiter/source/Exchange.py
```python
from abc import ABC, abstractmethod
class Exchange(ABC):
def __init__(self, name, api_base):
self.name = name
self.api_base = api_base
self.prices = {}
def get_coins(self):
return list(self.prices.keys())
def update(self):
self.update_coins()
self.update_prices()
@abstractmethod
def update_coins(self):
pass
@abstractmethod
def update_prices(self):
pass
```
#### File: source/exchanges/Bittrex.py
```python
import Const
from Exchange import Exchange
import requests
class Bittrex(Exchange):
def __init__(self):
super().__init__('Bittrex', 'https://bittrex.com')
self.prices = {}
def update_coins(self):
self.prices.clear()
coins = requests.get(self.api_base+'/api/v1.1/public/getcurrencies')
if coins.status_code is Const.SC_OK :
coins = coins.json()
else :
print(Const.BOLD+Const.FAIL+'Unable to reach '+self.name+' API'+Const.ENDC)
return
for supported in Const.COINS :
for c in coins['result'] :
if c['Currency'] == supported and c['IsActive'] :
self.prices[supported] = {}
break
def update_prices(self):
ticker = requests.get(self.api_base+'/api/v1.1/public/getmarketsummaries')
if ticker.status_code is Const.SC_OK :
ticker = ticker.json()
else :
print(Const.BOLD+Const.FAIL+'Unable to reach '+self.name+' API'+Const.ENDC)
return
for c in self.get_coins() :
for r in ticker['result'] :
if r['MarketName'] == Const.BTC+'-'+c :
self.prices[c]['bid'] = float(r['Bid'])
self.prices[c]['ask'] = float(r['Ask'])
self.prices[c]['last'] = float(r['Last'])
break
```
#### File: Arbiter/source/Utils.py
```python
def pct_inc(p1, p2) :
return ((p2-p1)/p1)*100
``` |
{
"source": "jonbulica99/deeplator",
"score": 3
} |
#### File: deeplator/deeplator/jsonrpc.py
```python
import json
import urllib.request as request
JSONRPC_VERSION = "2.0"
HEADERS = {"content-type": "application/json"}
class JSONRPCBuilder():
def __init__(self, method, params):
self.method = method
self.params = params
def dump(self):
data = {
"jsonrpc": JSONRPC_VERSION,
"method": self.method,
"params": self.params,
"id": 0
}
return data
def dumps(self):
data = self.dump()
data_str = json.dumps(data)
return data_str.encode("utf-8")
def send(self, url):
req = request.Request(url, data=self.dumps(), headers=HEADERS)
data_str = request.urlopen(req).read()
resp = json.loads(data_str.decode("utf-8"))
if "result" in resp:
return resp["result"]
else:
raise JSONRPCError(resp["error"])
class JSONRPCError(Exception):
def __init__(self, error_obj):
self.code = error_obj["code"]
self.message = error_obj["message"]
if "data" in error_obj:
self.data = error_obj["data"]
def __str__(self):
return "{}: {}".format(self.code, self.message)
``` |
{
"source": "jonburdo/dvc",
"score": 2
} |
#### File: repo/experiments/pull.py
```python
import logging
from dvc.exceptions import DvcException, InvalidArgumentError
from dvc.repo import locked
from dvc.repo.scm_context import scm_context
from .utils import exp_commits, resolve_name
logger = logging.getLogger(__name__)
@locked
@scm_context
def pull(
repo, git_remote, exp_name, *args, force=False, pull_cache=False, **kwargs
):
exp_ref_dict = resolve_name(repo.scm, exp_name, git_remote)
exp_ref = exp_ref_dict[exp_name]
if not exp_ref:
raise InvalidArgumentError(
f"Experiment '{exp_name}' does not exist in '{git_remote}'"
)
def on_diverged(refname: str, rev: str) -> bool:
if repo.scm.get_ref(refname) == rev:
return True
raise DvcException(
f"Local experiment '{exp_name}' has diverged from remote "
"experiment with the same name. To override the local experiment "
"re-run with '--force'."
)
refspec = f"{exp_ref}:{exp_ref}"
logger.debug("git pull experiment '%s' -> '%s'", git_remote, refspec)
from dvc.scm import TqdmGit
with TqdmGit(desc="Fetching git refs") as pbar:
repo.scm.fetch_refspecs(
git_remote,
[refspec],
force=force,
on_diverged=on_diverged,
progress=pbar.update_git,
)
if pull_cache:
_pull_cache(repo, exp_ref, **kwargs)
def _pull_cache(repo, exp_ref, dvc_remote=None, jobs=None, run_cache=False):
revs = list(exp_commits(repo.scm, [exp_ref]))
logger.debug("dvc fetch experiment '%s'", exp_ref)
repo.fetch(jobs=jobs, remote=dvc_remote, run_cache=run_cache, revs=revs)
``` |
{
"source": "Jon-Burr/dbobj",
"score": 3
} |
#### File: src/dbmeta/weakcoll.py
```python
import weakref
from future.utils import itervalues, iteritems
class WeakColl(object):
""" An iterable collection of weakrefs to objects
When iterating over these the actual objects are returned if the refs
are still alive, otherwise the reference is removed from the list.
This is not particularly carefully optimised...
"""
def __init__(self):
# Mapping of object IDs to their references
self._refs = {}
def flush(self, r=None):
""" Remove any dead references """
self._refs = {id(r()): r for r in itervalues(self._refs) if r() is not None}
def __len__(self):
""" Get the number of still living references """
self.flush()
return len(self._refs)
def __iter__(self):
""" Iterate over any still living referenced objects """
self.flush()
return (r() for r in itervalues(self._refs))
def __contains__(self, obj):
""" Is an object in the collection """
return id(obj) in self._refs
def remove(self, obj, permissive=True):
""" Remove all references to an object from the collection
A KeyError will only be raised if permissive is False
"""
try:
del self._refs[id(obj)]
except KeyError:
if not permissive:
raise
def rm_by_ref(self, r):
""" Remove by the reference value.
This should only be used by the weakref callback.
"""
try:
obj_id = next(k for (k, v) in iteritems(self._refs) if v is r)
except StopIteration:
return
del self._refs[obj_id]
def append(self, obj):
""" Add an object to the collection (pass the actual object in here, not
a weakref)
"""
if obj not in self:
self._refs[id(obj)] = weakref.ref(obj, self.rm_by_ref)
``` |
{
"source": "Jon-Burr/memoclass",
"score": 3
} |
#### File: memoclass/tests/test_class.py
```python
from memoclass.memoclass import MemoClass, mutates
from memoclass.memoize import memomethod
import pytest
class PartialSum(MemoClass):
def __init__(self, stored, **kwargs):
super(PartialSum, self).__init__(
mutable_attrs=["call_count"], **kwargs)
self.stored = stored
self.call_count = 0
@memomethod
def __call__(self, other):
self.call_count += 1
return self.stored + other
@mutates
def do_mutate(self):
pass
@classmethod
def reset(cls):
cls.__call__.clear_cache()
@memomethod
def call_twice(self, other):
self(other)
return self(other)
def test_cls():
""" Make sure that the test class is working """
assert PartialSum(5)(3) == 8
def test_cache():
""" Make sure that the cache is working """
PartialSum.reset()
a = PartialSum(5)
assert a(3) == 8
a(3)
assert a(5) == 10
assert a.call_count == 2
a = None
def test_mutate():
""" Make sure that the mutates functionality is working """
PartialSum.reset()
a = PartialSum(5)
assert a(3) == 8
a.stored = 3
assert a(3) == 6
assert a.call_count == 2
a.do_mutate()
assert a(3) == 6
assert a.call_count == 3
def test_disable():
""" Make sure that disabling the cache works correctly """
PartialSum.reset()
a = PartialSum(5)
a.disable_caches()
a(3)
a(3)
assert a.call_count == 2
def test_lock():
""" Make sure that locking works correctly """
PartialSum.reset()
a = PartialSum(5)
a.disable_caches()
with a.locked():
a(3)
a(3)
assert a.call_count == 1
with pytest.raises(ValueError):
a.stored = 5
a(3)
a(3)
assert a.call_count == 3
with a.locked():
a(3)
assert a.call_count == 4
def test_lockedfunc():
""" Make sure that a locking function works properly """
PartialSum.reset()
a = PartialSum(5)
a.disable_caches()
assert a.call_twice(3) == 8
assert a.call_count == 1
```
#### File: memoclass/tests/test_mutates_with.py
```python
from memoclass.memoize import memomethod
from memoclass.memoclass import MemoClass
class Provider(MemoClass):
def __init__(self, value):
self.receiver = None
self.value = value
super(Provider, self).__init__()
def mutates_with_this(self):
if self.receiver is None:
return ()
else:
return (self.receiver,)
class Receiver(MemoClass):
def __init__(self, provider):
self.provider = provider
super(Receiver, self).__init__()
@memomethod
def append(self, value):
return self.provider.value + value
def test_cls():
""" Make sure the classes work at all """
p = Provider([1, 2, 3])
r = Receiver(p)
p.receiver = r
assert r.append([4, 5]) == [1, 2, 3, 4, 5]
def test_no_receiver():
""" Test what happens when the receiver is set (should result in incorrect
behaviour)
"""
p = Provider([1, 2, 3])
r = Receiver(p)
r.append([4, 5])
p.value = [2, 3]
# This is the wrong value now - changing p.value has *not* changed r.append
assert r.append([4, 5]) == [1, 2, 3, 4, 5]
def test_receiver():
""" Make sure that the provider mutates the receiver correctly """
p = Provider([1, 2, 3])
r = Receiver(p)
p.receiver = r
r.append([4, 5])
p.value = [2, 3]
assert r.append([4, 5]) == [2, 3, 4, 5]
``` |
{
"source": "Jon-Burr/panda-client",
"score": 2
} |
#### File: panda-client/pandatools/Client.py
```python
import os
if os.environ.has_key('PANDA_DEBUG'):
print "DEBUG : importing %s" % __name__
import re
import sys
import time
import stat
import types
try:
import json
except:
import simplejson as json
import random
import urllib
import struct
import commands
import cPickle as pickle
import xml.dom.minidom
import socket
import tempfile
import MiscUtils
import PLogger
# configuration
try:
baseURL = os.environ['PANDA_URL']
except:
baseURL = 'http://pandaserver.cern.ch:25080/server/panda'
try:
baseURLSSL = os.environ['PANDA_URL_SSL']
except:
baseURLSSL = 'https://pandaserver.cern.ch:25443/server/panda'
baseURLDQ2 = 'http://atlddmcat-reader.cern.ch/dq2'
baseURLDQ2SSL = 'https://atlddmcat-writer.cern.ch:443/dq2'
baseURLSUB = "http://pandaserver.cern.ch:25080/trf/user"
baseURLMON = "http://panda.cern.ch:25980/server/pandamon/query"
baseURLCSRV = "http://pandacache.cern.ch:25080/server/panda"
baseURLCSRVSSL = "http://pandacache.cern.ch:25443/server/panda"
#baseURLCSRV = "http://aipanda011.cern.ch:25080/server/panda"
#baseURLCSRVSSL = "http://aipanda011.cern.ch:25443/server/panda"
# exit code
EC_Failed = 255
# default max size per job
maxTotalSize = long(14*1024*1024*1024)
# safety size for input size calculation
safetySize = long(500*1024*1024)
# suffix for shadow dataset
suffixShadow = "_shadow"
# limit on maxCpuCount
maxCpuCountLimit = 1000000000
# retrieve pathena config
try:
# get default timeout
defTimeOut = socket.getdefaulttimeout()
# set timeout
socket.setdefaulttimeout(60)
except:
pass
if os.environ.has_key('PANDA_DEBUG'):
print "DEBUG : getting panda cache server's name"
# get panda cache server's name
try:
getServerURL = baseURLCSRV + '/getServer'
res = urllib.urlopen(getServerURL)
# overwrite URL
baseURLCSRVSSL = "https://%s/server/panda" % res.read()
except:
type, value, traceBack = sys.exc_info()
print type,value
print "ERROR : could not getServer from %s" % getServerURL
sys.exit(EC_Failed)
try:
# reset timeout
socket.setdefaulttimeout(defTimeOut)
except:
pass
if os.environ.has_key('PANDA_DEBUG'):
print "DEBUG : ready"
# look for a grid proxy certificate
def _x509():
# see X509_USER_PROXY
try:
return os.environ['X509_USER_PROXY']
except:
pass
# see the default place
x509 = '/tmp/x509up_u%s' % os.getuid()
if os.access(x509,os.R_OK):
return x509
# no valid proxy certificate
# FIXME
print "No valid grid proxy certificate found"
return ''
# look for a CA certificate directory
def _x509_CApath():
# use X509_CERT_DIR
try:
return os.environ['X509_CERT_DIR']
except:
pass
# get X509_CERT_DIR
gridSrc = _getGridSrc()
com = "%s echo $X509_CERT_DIR" % gridSrc
tmpOut = commands.getoutput(com)
return tmpOut.split('\n')[-1]
# keep list of tmp files for cleanup
globalTmpDir = ''
# curl class
class _Curl:
# constructor
def __init__(self):
# path to curl
self.path = 'curl --user-agent "dqcurl" '
# verification of the host certificate
self.verifyHost = True
# request a compressed response
self.compress = True
# SSL cert/key
self.sslCert = ''
self.sslKey = ''
# verbose
self.verbose = False
# GET method
def get(self,url,data,rucioAccount=False):
# make command
com = '%s --silent --get' % self.path
if not self.verifyHost or not url.startswith('https://'):
com += ' --insecure'
else:
tmp_x509_CApath = _x509_CApath()
if tmp_x509_CApath != '':
com += ' --capath %s' % tmp_x509_CApath
if self.compress:
com += ' --compressed'
if self.sslCert != '':
com += ' --cert %s' % self.sslCert
com += ' --cacert %s' % self.sslCert
if self.sslKey != '':
com += ' --key %s' % self.sslKey
# max time of 10 min
com += ' -m 600'
# add rucio account info
if rucioAccount:
if os.environ.has_key('RUCIO_ACCOUNT'):
data['account'] = os.environ['RUCIO_ACCOUNT']
if os.environ.has_key('RUCIO_APPID'):
data['appid'] = os.environ['RUCIO_APPID']
data['client_version'] = '2.4.1'
# data
strData = ''
for key in data.keys():
strData += 'data="%s"\n' % urllib.urlencode({key:data[key]})
# write data to temporary config file
if globalTmpDir != '':
tmpFD,tmpName = tempfile.mkstemp(dir=globalTmpDir)
else:
tmpFD,tmpName = tempfile.mkstemp()
os.write(tmpFD,strData)
os.close(tmpFD)
com += ' --config %s' % tmpName
com += ' %s' % url
# execute
if self.verbose:
print com
print strData[:-1]
s,o = commands.getstatusoutput(com)
if o != '\x00':
try:
tmpout = urllib.unquote_plus(o)
o = eval(tmpout)
except:
pass
ret = (s,o)
# remove temporary file
os.remove(tmpName)
ret = self.convRet(ret)
if self.verbose:
print ret
return ret
# POST method
def post(self,url,data,rucioAccount=False):
# make command
com = '%s --silent' % self.path
if not self.verifyHost or not url.startswith('https://'):
com += ' --insecure'
else:
tmp_x509_CApath = _x509_CApath()
if tmp_x509_CApath != '':
com += ' --capath %s' % tmp_x509_CApath
if self.compress:
com += ' --compressed'
if self.sslCert != '':
com += ' --cert %s' % self.sslCert
com += ' --cacert %s' % self.sslCert
if self.sslKey != '':
com += ' --key %s' % self.sslKey
# max time of 10 min
com += ' -m 600'
# add rucio account info
if rucioAccount:
if os.environ.has_key('RUCIO_ACCOUNT'):
data['account'] = os.environ['RUCIO_ACCOUNT']
if os.environ.has_key('RUCIO_APPID'):
data['appid'] = os.environ['RUCIO_APPID']
data['client_version'] = '2.4.1'
# data
strData = ''
for key in data.keys():
strData += 'data="%s"\n' % urllib.urlencode({key:data[key]})
# write data to temporary config file
if globalTmpDir != '':
tmpFD,tmpName = tempfile.mkstemp(dir=globalTmpDir)
else:
tmpFD,tmpName = tempfile.mkstemp()
os.write(tmpFD,strData)
os.close(tmpFD)
com += ' --config %s' % tmpName
com += ' %s' % url
# execute
if self.verbose:
print com
print strData[:-1]
s,o = commands.getstatusoutput(com)
if o != '\x00':
try:
tmpout = urllib.unquote_plus(o)
o = eval(tmpout)
except:
pass
ret = (s,o)
# remove temporary file
os.remove(tmpName)
ret = self.convRet(ret)
if self.verbose:
print ret
return ret
# PUT method
def put(self,url,data):
# make command
com = '%s --silent' % self.path
if not self.verifyHost or not url.startswith('https://'):
com += ' --insecure'
else:
tmp_x509_CApath = _x509_CApath()
if tmp_x509_CApath != '':
com += ' --capath %s' % tmp_x509_CApath
if self.compress:
com += ' --compressed'
if self.sslCert != '':
com += ' --cert %s' % self.sslCert
com += ' --cacert %s' % self.sslCert
if self.sslKey != '':
com += ' --key %s' % self.sslKey
# emulate PUT
for key in data.keys():
com += ' -F "%s=@%s"' % (key,data[key])
com += ' %s' % url
if self.verbose:
print com
# execute
ret = commands.getstatusoutput(com)
ret = self.convRet(ret)
if self.verbose:
print ret
return ret
# convert return
def convRet(self,ret):
if ret[0] != 0:
ret = (ret[0]%255,ret[1])
# add messages to silent errors
if ret[0] == 35:
ret = (ret[0],'SSL connect error. The SSL handshaking failed. Check grid certificate/proxy.')
elif ret[0] == 7:
ret = (ret[0],'Failed to connect to host.')
elif ret[0] == 55:
ret = (ret[0],'Failed sending network data.')
elif ret[0] == 56:
ret = (ret[0],'Failure in receiving network data.')
return ret
'''
public methods
'''
# get site specs
def getSiteSpecs(siteType=None):
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getSiteSpecs'
data = {}
if siteType != None:
data['siteType'] = siteType
status,output = curl.get(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getSiteSpecs : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get cloud specs
def getCloudSpecs():
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getCloudSpecs'
status,output = curl.get(url,{})
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getCloudSpecs : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# refresh spacs at runtime
def refreshSpecs():
global PandaSites
global PandaClouds
# get Panda Sites
tmpStat,PandaSites = getSiteSpecs()
if tmpStat != 0:
print "ERROR : cannot get Panda Sites"
sys.exit(EC_Failed)
for id, val in PandaSites.iteritems():
if 'setokens' not in val:
if 'setokens_output' in val:
val['setokens'] = val['setokens_output']
else:
val['setokens'] = {}
# get cloud info
tmpStat,PandaClouds = getCloudSpecs()
if tmpStat != 0:
print "ERROR : cannot get Panda Clouds"
sys.exit(EC_Failed)
# initialize spacs
refreshSpecs()
# get LRC
def getLRC(site):
ret = None
# look for DQ2ID
for id,val in PandaSites.iteritems():
if id == site or val['ddm'] == site:
if not val['dq2url'] in [None,"","None"]:
ret = val['dq2url']
break
return ret
# get LFC
def getLFC(site):
ret = None
# use explicit matching for sitename
if PandaSites.has_key(site):
val = PandaSites[site]
if not val['lfchost'] in [None,"","None"]:
ret = val['lfchost']
return ret
# look for DQ2ID
for id,val in PandaSites.iteritems():
if id == site or val['ddm'] == site:
if not val['lfchost'] in [None,"","None"]:
ret = val['lfchost']
break
return ret
# get SEs
def getSE(site):
ret = []
# use explicit matching for sitename
if PandaSites.has_key(site):
val = PandaSites[site]
if not val['se'] in [None,"","None"]:
for tmpSE in val['se'].split(','):
match = re.search('.+://([^:/]+):*\d*/*',tmpSE)
if match != None:
ret.append(match.group(1))
return ret
# look for DQ2ID
for id,val in PandaSites.iteritems():
if id == site or val['ddm'] == site:
if not val['se'] in [None,"","None"]:
for tmpSE in val['se'].split(','):
match = re.search('.+://([^:/]+):*\d*/*',tmpSE)
if match != None:
ret.append(match.group(1))
break
# return
return ret
# convert DQ2 ID to Panda siteid
def convertDQ2toPandaID(site, getAll=False):
keptSite = ''
siteList = []
for tmpID,tmpSpec in PandaSites.iteritems():
# # exclude long,xrootd,local queues
if isExcudedSite(tmpID):
continue
# get list of DQ2 IDs
srmv2ddmList = []
for tmpDdmID in tmpSpec['setokens'].values():
srmv2ddmList.append(convSrmV2ID(tmpDdmID))
# use Panda sitename
if convSrmV2ID(site) in srmv2ddmList:
keptSite = tmpID
# keep non-online site just in case
if tmpSpec['status']=='online':
if not getAll:
return keptSite
siteList.append(keptSite)
if getAll:
return ','.join(siteList)
return keptSite
# convert DQ2 ID to Panda site IDs
def convertDQ2toPandaIDList(site):
sites = []
sitesOff = []
for tmpID,tmpSpec in PandaSites.iteritems():
# # exclude long,xrootd,local queues
if isExcudedSite(tmpID):
continue
# get list of DQ2 IDs
srmv2ddmList = []
for tmpDdmID in tmpSpec['setokens'].values():
srmv2ddmList.append(convSrmV2ID(tmpDdmID))
# use Panda sitename
if convSrmV2ID(site) in srmv2ddmList:
# append
if tmpSpec['status']=='online':
if not tmpID in sites:
sites.append(tmpID)
else:
# keep non-online site just in case
if not tmpID in sitesOff:
sitesOff.append(tmpID)
# return
if sites != []:
return sites
return sitesOff
# convert to long queue
def convertToLong(site):
tmpsite = re.sub('ANALY_','ANALY_LONG_',site)
tmpsite = re.sub('_\d+$','',tmpsite)
# if sitename exists
if PandaSites.has_key(tmpsite):
site = tmpsite
return site
# submit jobs
def submitJobs(jobs,verbose=False):
# set hostname
hostname = commands.getoutput('hostname')
for job in jobs:
job.creationHost = hostname
# serialize
strJobs = pickle.dumps(jobs)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/submitJobs'
data = {'jobs':strJobs}
status,output = curl.post(url,data)
if status!=0:
print output
return status,None
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR submitJobs : %s %s" % (type,value)
return EC_Failed,None
# get job status
def getJobStatus(ids):
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getJobStatus'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR getJobStatus : %s %s" % (type,value)
return EC_Failed,None
# kill jobs
def killJobs(ids,verbose=False):
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/killJobs'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR killJobs : %s %s" % (type,value)
return EC_Failed,None
# kill task
def killTask(jediTaskID,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/killTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR killTask : %s %s" % (type,value)
return EC_Failed,None
# finish task
def finishTask(jediTaskID,soft=False,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/finishTask'
data = {'jediTaskID':jediTaskID}
if soft:
data['soft'] = True
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR finishTask : %s %s" % (type,value)
return EC_Failed,None
# retry task
def retryTask(jediTaskID,verbose=False,properErrorCode=False,newParams=None):
if newParams == None:
newParams = {}
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/retryTask'
data = {'jediTaskID':jediTaskID,
'properErrorCode':properErrorCode}
if newParams != {}:
data['newParams'] = json.dumps(newParams)
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR retryTask : %s %s" % (type,value)
return EC_Failed,None
# reassign jobs
def reassignJobs(ids):
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/reassignJobs'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR reassignJobs : %s %s" % (type,value)
return EC_Failed,None
# query PandaIDs
def queryPandaIDs(ids):
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/queryPandaIDs'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR queryPandaIDs : %s %s" % (type,value)
return EC_Failed,None
# query last files in datasets
def queryLastFilesInDataset(datasets,verbose=False):
# serialize
strDSs = pickle.dumps(datasets)
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# execute
url = baseURL + '/queryLastFilesInDataset'
data = {'datasets':strDSs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR queryLastFilesInDataset : %s %s" % (type,value)
return EC_Failed,None
# put file
def putFile(file,verbose=False,useCacheSrv=False,reuseSandbox=False):
# size check for noBuild
sizeLimit = 10*1024*1024
fileSize = os.stat(file)[stat.ST_SIZE]
if not os.path.basename(file).startswith('sources.'):
if fileSize > sizeLimit:
errStr = 'Exceeded size limit (%sB >%sB). ' % (fileSize,sizeLimit)
errStr += 'Your working directory contains too large files which cannot be put on cache area. '
errStr += 'Please submit job without --noBuild/--libDS so that your files will be uploaded to SE'
# get logger
tmpLog = PLogger.getPandaLogger()
tmpLog.error(errStr)
return EC_Failed,'False'
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# check duplicationn
if reuseSandbox:
# get CRC
fo = open(file)
fileContent = fo.read()
fo.close()
footer = fileContent[-8:]
checkSum,isize = struct.unpack("II",footer)
# check duplication
url = baseURLSSL + '/checkSandboxFile'
data = {'fileSize':fileSize,'checkSum':checkSum}
status,output = curl.post(url,data)
if status != 0:
return EC_Failed,'ERROR: Could not check Sandbox duplication with %s' % status
elif output.startswith('FOUND:'):
# found reusable sandbox
hostName,reuseFileName = output.split(':')[1:]
# set cache server hostname
global baseURLCSRVSSL
baseURLCSRVSSL = "https://%s:25443/server/panda" % hostName
# return reusable filename
return 0,"NewFileName:%s" % reuseFileName
# execute
if useCacheSrv:
url = baseURLCSRVSSL + '/putFile'
else:
url = baseURLSSL + '/putFile'
data = {'file':file}
return curl.put(url,data)
# delete file
def deleteFile(file):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
# execute
url = baseURLSSL + '/deleteFile'
data = {'file':file}
return curl.post(url,data)
# check dataset in map by ignoring case sensitivity
def checkDatasetInMap(name,outMap):
try:
for tmpKey in outMap.keys():
if name.upper() == tmpKey.upper():
return True
except:
pass
return False
# get real dataset name from map by ignoring case sensitivity
def getDatasetValueInMap(name,outMap):
for tmpKey in outMap.keys():
if name.upper() == tmpKey.upper():
return tmpKey
# return original name
return name
# query files in dataset
def queryFilesInDataset(name,verbose=False,v_vuids=None,getDsString=False,dsStringOnly=False):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# for container failure
status,out = 0,''
nameVuidsMap = {}
dsString = ''
try:
errStr = ''
# get VUID
if v_vuids == None:
url = baseURLDQ2 + '/ws_repository/rpc'
if re.search(',',name) != None:
# comma-separated list
names = name.split(',')
elif name.endswith('/'):
# container
names = [name]
else:
names = [name]
# loop over all names
vuidList = []
iLookUp = 0
for tmpName in names:
iLookUp += 1
if iLookUp % 20 == 0:
time.sleep(1)
data = {'operation':'queryDatasetByName','dsn':tmpName,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.get(url,data,rucioAccount=True)
if status != 0 or out == '\x00' or (re.search('\*',tmpName) == None and not checkDatasetInMap(tmpName,out)):
errStr = "ERROR : could not find %s in DQ2 DB. Check if the dataset name is correct" \
% tmpName
sys.exit(EC_Failed)
# parse
if re.search('\*',tmpName) == None:
# get real dataset name
tmpName = getDatasetValueInMap(tmpName,out)
vuidList.append(out[tmpName]['vuids'])
# mapping between name and vuids
nameVuidsMap[tuple(out[tmpName]['vuids'])] = tmpName
# string to expand wildcard
dsString += '%s,' % tmpName
else:
# using wildcard
for outKeyName in out.keys():
# skip sub/dis
if re.search('_dis\d+$',outKeyName) != None or re.search('_sub\d+$',outKeyName) != None:
continue
# append
vuidList.append(out[outKeyName]['vuids'])
# mapping between name and vuids
nameVuidsMap[tuple(out[outKeyName]['vuids'])] = outKeyName
# string to expand wildcard
dsString += '%s,' % outKeyName
else:
vuidList = [v_vuids]
if dsStringOnly:
return dsString[:-1]
# reset for backward comatiblity when * or , is not used
if re.search('\*',name) == None and re.search(',',name) == None:
nameVuidsMap = {}
dsString = ''
# get files
url = baseURLDQ2 + '/ws_content/rpc'
ret = {}
generalLFNmap = {}
iLookUp = 0
for vuids in vuidList:
iLookUp += 1
if iLookUp % 20 == 0:
time.sleep(1)
data = {'operation': 'queryFilesInDataset','vuids':vuids,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.post(url,data,rucioAccount=True)
if status != 0:
errStr = "ERROR : could not get files in %s" % name
sys.exit(EC_Failed)
# parse
if out == '\x00' or len(out) < 2 or out==():
# empty
continue
for guid,vals in out[0].iteritems():
# remove attemptNr
generalLFN = re.sub('\.\d+$','',vals['lfn'])
# choose greater attempt to avoid duplication
if generalLFNmap.has_key(generalLFN):
if vals['lfn'] > generalLFNmap[generalLFN]:
# remove lesser attempt
del ret[generalLFNmap[generalLFN]]
else:
continue
# append to map
generalLFNmap[generalLFN] = vals['lfn']
ret[vals['lfn']] = {'guid' : guid,
'fsize' : vals['filesize'],
'md5sum' : vals['checksum'],
'scope' : vals['scope']}
# add dataset name
if nameVuidsMap.has_key(tuple(vuids)):
ret[vals['lfn']]['dataset'] = nameVuidsMap[tuple(vuids)]
except:
print status,out
if errStr != '':
print errStr
else:
print "ERROR : invalid DQ2 response"
sys.exit(EC_Failed)
if getDsString:
return ret,dsString[:-1]
return ret
# get datasets
def getDatasets(name,verbose=False,withWC=False,onlyNames=False):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
try:
errStr = ''
# get VUID
url = baseURLDQ2 + '/ws_repository/rpc'
data = {'operation':'queryDatasetByName','dsn':name,'version':0,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
if onlyNames:
data['API'] = '30'
data['onlyNames'] = int(onlyNames)
status,out = curl.get(url,data,rucioAccount=True)
if status != 0:
errStr = "ERROR : could not access DQ2 server"
sys.exit(EC_Failed)
# parse
datasets = {}
if out == '\x00' or ((not withWC) and (not checkDatasetInMap(name,out))):
# no datasets
return datasets
# get names only
if isinstance(out,types.DictionaryType):
return out
else:
# wrong format
errStr = "ERROR : DQ2 didn't give a dictionary for %s" % name
sys.exit(EC_Failed)
# get VUIDs
for dsname,idMap in out.iteritems():
# check format
if idMap.has_key('vuids') and len(idMap['vuids'])>0:
datasets[dsname] = idMap['vuids'][0]
else:
# wrong format
errStr = "ERROR : could not parse HTTP response for %s" % name
sys.exit(EC_Failed)
except:
print status,out
if errStr != '':
print errStr
else:
print "ERROR : invalid DQ2 response"
sys.exit(EC_Failed)
return datasets
# disable expiring file check
globalUseShortLivedReplicas = False
def useExpiringFiles():
global globalUseShortLivedReplicas
globalUseShortLivedReplicas = True
# get expiring files
globalCompleteDsMap = {}
globalExpFilesMap = {}
globalExpOkFilesMap = {}
globalExpCompDq2FilesMap = {}
def getExpiringFiles(dsStr,removedDS,siteID,verbose,getOKfiles=False):
# convert * in dsStr
if re.search('\*',dsStr) != None:
dsStr = queryFilesInDataset(dsStr,verbose,dsStringOnly=True)
# reuse map
global globalExpFilesMap
global globalExpOkFilesMap
global expCompDq2FilesList
global globalUseShortLivedReplicas
mapKey = (dsStr,siteID)
if globalExpFilesMap.has_key(mapKey):
if getOKfiles:
return globalExpFilesMap[mapKey],globalExpOkFilesMap[mapKey],globalExpCompDq2FilesMap[mapKey]
return globalExpFilesMap[mapKey]
# get logger
tmpLog = PLogger.getPandaLogger()
if verbose:
tmpLog.debug("checking metadata for %s, removed=%s " % (dsStr,str(removedDS)))
# get DQ2 location and used data
tmpLocations,dsUsedDsMap = getLocations(dsStr,[],'',False,verbose,getDQ2IDs=True,
removedDatasets=removedDS,
useOutContainer=True,
includeIncomplete=True,
notSiteStatusCheck=True)
# get all sites matching with site's DQ2ID here, to work with brokeroff sites
fullSiteList = convertDQ2toPandaIDList(PandaSites[siteID]['ddm'])
# get datasets at the site
datasets = []
for tmpDsUsedDsMapKey,tmpDsUsedDsVal in dsUsedDsMap.iteritems():
siteMatched = False
for tmpTargetID in fullSiteList:
# check with short/long siteID
if tmpDsUsedDsMapKey in [tmpTargetID,convertToLong(tmpTargetID)]:
datasets = tmpDsUsedDsVal
siteMatched = True
break
if siteMatched:
break
# not found
if datasets == []:
tmpLog.error("cannot find datasets at %s for replica metadata check" % siteID)
sys.exit(EC_Failed)
# loop over all datasets
convertedOrigSite = convSrmV2ID(PandaSites[siteID]['ddm'])
expFilesMap = {'datasets':[],'files':[]}
expOkFilesList = []
expCompDq2FilesList = []
for dsName in datasets:
# get DQ2 IDs for the siteID
dq2Locations = []
if tmpLocations.has_key(dsName):
for tmpLoc in tmpLocations[dsName]:
# check Panda site IDs
for tmpPandaSiteID in convertDQ2toPandaIDList(tmpLoc):
if tmpPandaSiteID in fullSiteList:
if not tmpLoc in dq2Locations:
dq2Locations.append(tmpLoc)
break
# check prefix mainly for MWT2 and MWT2_UC
convertedScannedID = convSrmV2ID(tmpLoc)
if convertedOrigSite.startswith(convertedScannedID) or \
convertedScannedID.startswith(convertedOrigSite):
if not tmpLoc in dq2Locations:
dq2Locations.append(tmpLoc)
# empty
if dq2Locations == []:
tmpLog.error("cannot find replica locations for %s:%s to check metadata" % (siteID,dsName))
sys.exit(EC_Failed)
# check completeness
compInDQ2 = False
global globalCompleteDsMap
if globalCompleteDsMap.has_key(dsName):
for tmpDQ2Loc in dq2Locations:
if tmpDQ2Loc in globalCompleteDsMap[dsName]:
compInDQ2 = True
break
# get metadata
metaList = getReplicaMetadata(dsName,dq2Locations,verbose)
# check metadata
metaOK = False
for metaItem in metaList:
# replica deleted
if isinstance(metaItem,types.StringType) and "No replica found at the location" in metaItem:
continue
if not globalUseShortLivedReplicas:
# check the archived attribute
if isinstance(metaItem['archived'],types.StringType) and metaItem['archived'].lower() in ['tobedeleted',]:
continue
# check replica lifetime
if metaItem.has_key('expirationdate') and isinstance(metaItem['expirationdate'],types.StringType):
try:
import datetime
expireDate = datetime.datetime.strptime(metaItem['expirationdate'],'%Y-%m-%d %H:%M:%S')
# expire in 7 days
if expireDate-datetime.datetime.utcnow() < datetime.timedelta(days=7):
continue
except:
pass
# all OK
metaOK = True
break
# expiring
if not metaOK:
# get files
expFilesMap['datasets'].append(dsName)
expFilesMap['files'] += queryFilesInDataset(dsName,verbose)
else:
tmpFilesList = queryFilesInDataset(dsName,verbose)
expOkFilesList += tmpFilesList
# complete
if compInDQ2:
expCompDq2FilesList += tmpFilesList
# keep to avoid redundant lookup
globalExpFilesMap[mapKey] = expFilesMap
globalExpOkFilesMap[mapKey] = expOkFilesList
globalExpCompDq2FilesMap[mapKey] = expCompDq2FilesList
if expFilesMap['datasets'] != []:
msgStr = 'ignore replicas of '
for tmpDsStr in expFilesMap['datasets']:
msgStr += '%s,' % tmpDsStr
msgStr = msgStr[:-1]
msgStr += ' at %s due to archived=ToBeDeleted or short lifetime < 7days. ' % siteID
msgStr += 'If you want to use those replicas in spite of short lifetime, use --useShortLivedReplicas'
tmpLog.info(msgStr)
# return
if getOKfiles:
return expFilesMap,expOkFilesList,expCompDq2FilesList
return expFilesMap
# get replica metadata
def getReplicaMetadata(name,dq2Locations,verbose):
# get logger
tmpLog = PLogger.getPandaLogger()
if verbose:
tmpLog.debug("getReplicaMetadata for %s" % (name))
# instantiate curl
curl = _Curl()
curl.verbose = verbose
try:
errStr = ''
# get VUID
url = baseURLDQ2 + '/ws_repository/rpc'
data = {'operation':'queryDatasetByName','dsn':name,'version':0,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.get(url,data,rucioAccount=True)
if status != 0:
errStr = "ERROR : could not access DQ2 server"
sys.exit(EC_Failed)
# parse
datasets = {}
if out == '\x00' or not checkDatasetInMap(name,out):
errStr = "ERROR : VUID for %s was not found in DQ2" % name
sys.exit(EC_Failed)
# get VUIDs
vuid = out[name]['vuids'][0]
# get replica metadata
retList = []
for location in dq2Locations:
url = baseURLDQ2 + '/ws_location/rpc'
data = {'operation':'queryDatasetReplicaMetadata','vuid':vuid,
'location':location,'API':'0_3_0',
'tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.post(url,data,rucioAccount=True)
if status != 0:
errStr = "ERROR : could not access DQ2 server to get replica metadata"
sys.exit(EC_Failed)
# append
retList.append(out)
# return
return retList
except:
print status,out
if errStr != '':
print errStr
else:
print "ERROR : invalid DQ2 response"
sys.exit(EC_Failed)
# query files in shadow datasets associated to container
def getFilesInShadowDataset(contName,suffixShadow,verbose=False):
fileList = []
# query files in PandaDB first to get running/failed files + files which are being added
tmpList = getFilesInUseForAnal(contName,verbose)
for tmpItem in tmpList:
if not tmpItem in fileList:
# append
fileList.append(tmpItem)
# get elements in container
elements = getElementsFromContainer(contName,verbose)
for tmpEle in elements:
# remove merge
tmpEle = re.sub('\.merge$','',tmpEle)
shadowDsName = "%s%s" % (tmpEle,suffixShadow)
# check existence
tmpDatasets = getDatasets(shadowDsName,verbose)
if len(tmpDatasets) == 0:
continue
# get files in shadow dataset
tmpList = queryFilesInDataset(shadowDsName,verbose)
for tmpItem in tmpList:
if not tmpItem in fileList:
# append
fileList.append(tmpItem)
return fileList
# query files in shadow dataset associated to old dataset
def getFilesInShadowDatasetOld(outDS,suffixShadow,verbose=False):
shadowList = []
# query files in PandaDB first to get running/failed files + files which are being added
tmpShadowList = getFilesInUseForAnal(outDS,verbose)
for tmpItem in tmpShadowList:
shadowList.append(tmpItem)
# query files in shadow dataset
for tmpItem in queryFilesInDataset("%s%s" % (outDS,suffixShadow),verbose):
if not tmpItem in shadowList:
shadowList.append(tmpItem)
return shadowList
# list datasets by GUIDs
def listDatasetsByGUIDs(guids,dsFilter,verbose=False,forColl=False):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# get filter
dsFilters = []
if dsFilter != '':
dsFilters = dsFilter.split(',')
# get logger
tmpLog = PLogger.getPandaLogger()
retMap = {}
allMap = {}
iLookUp = 0
guidLfnMap = {}
checkedDSList = []
# loop over all GUIDs
for guid in guids:
# check existing map to avid redundant lookup
if guidLfnMap.has_key(guid):
retMap[guid] = guidLfnMap[guid]
continue
iLookUp += 1
if iLookUp % 20 == 0:
time.sleep(1)
# get vuids
url = baseURLDQ2 + '/ws_content/rpc'
data = {'operation': 'queryDatasetsWithFileByGUID','guid':guid,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.get(url,data,rucioAccount=True)
# failed
if status != 0:
if not verbose:
print status,out
errStr = "could not get dataset vuids for %s" % guid
tmpLog.error(errStr)
sys.exit(EC_Failed)
# GUID was not registered in DQ2
if out == '\x00' or out == ():
if verbose:
errStr = "DQ2 gave an empty list for GUID=%s" % guid
tmpLog.debug(errStr)
allMap[guid] = []
continue
tmpVUIDs = list(out)
# get dataset name
url = baseURLDQ2 + '/ws_repository/rpc'
data = {'operation':'queryDatasetByVUIDs','vuids':tmpVUIDs,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.post(url,data,rucioAccount=True)
# failed
if status != 0:
if not verbose:
print status,out
errStr = "could not get dataset name for %s" % guid
tmpLog.error(errStr)
sys.exit(EC_Failed)
# empty
if out == '\x00':
errStr = "DQ2 gave an empty list for VUID=%s" % tmpVUIDs
tmpLog.error(errStr)
sys.exit(EC_Failed)
# datasets are deleted
if out == {}:
allMap[guid] = []
continue
# check with filter
tmpDsNames = []
tmpAllDsNames = []
for tmpDsName in out.keys():
# ignore junk datasets
if tmpDsName.startswith('panda') or \
tmpDsName.startswith('user') or \
tmpDsName.startswith('group') or \
re.search('_sub\d+$',tmpDsName) != None or \
re.search('_dis\d+$',tmpDsName) != None or \
re.search('_shadow$',tmpDsName) != None:
continue
tmpAllDsNames.append(tmpDsName)
# check with filter
if dsFilters != []:
flagMatch = False
for tmpFilter in dsFilters:
# replace . to \.
tmpFilter = tmpFilter.replace('.','\.')
# replace * to .*
tmpFilter = tmpFilter.replace('*','.*')
if re.search('^'+tmpFilter,tmpDsName) != None:
flagMatch = True
break
# not match
if not flagMatch:
continue
# append
tmpDsNames.append(tmpDsName)
# empty
if tmpDsNames == []:
# there may be multiple GUIDs for the same event, and some may be filtered by --eventPickDS
allMap[guid] = tmpAllDsNames
continue
# duplicated
if len(tmpDsNames) != 1:
if not forColl:
errStr = "there are multiple datasets %s for GUID:%s. Please set --eventPickDS and/or --eventPickStreamName to choose one dataset"\
% (str(tmpAllDsNames),guid)
else:
errStr = "there are multiple datasets %s for GUID:%s. Please set --eventPickDS to choose one dataset"\
% (str(tmpAllDsNames),guid)
tmpLog.error(errStr)
sys.exit(EC_Failed)
# get LFN
if not tmpDsNames[0] in checkedDSList:
tmpMap = queryFilesInDataset(tmpDsNames[0],verbose)
for tmpLFN,tmpVal in tmpMap.iteritems():
guidLfnMap[tmpVal['guid']] = (tmpDsNames[0],tmpLFN)
checkedDSList.append(tmpDsNames[0])
# append
if not guidLfnMap.has_key(guid):
errStr = "LFN for %s in not found in %s" % (guid,tmpDsNames[0])
tmpLog.error(errStr)
sys.exit(EC_Failed)
retMap[guid] = guidLfnMap[guid]
# return
return retMap,allMap
# register dataset
def addDataset(name,verbose=False,location='',dsExist=False,allowProdDisk=False,dsCheck=True):
# generate DUID/VUID
duid = MiscUtils.wrappedUuidGen()
vuid = MiscUtils.wrappedUuidGen()
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
try:
errStr = ''
# add
if not dsExist:
url = baseURLDQ2SSL + '/ws_repository/rpc'
nTry = 3
for iTry in range(nTry):
data = {'operation':'addDataset','dsn': name,'duid': duid,'vuid':vuid,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen(),'update':'yes'}
status,out = curl.post(url,data,rucioAccount=True)
if not dsCheck and out != None and re.search('DQDatasetExistsException',out) != None:
dsExist = True
break
elif status != 0 or (out != None and re.search('Exception',out) != None):
if iTry+1 == nTry:
errStr = "ERROR : could not add dataset to DQ2 repository"
sys.exit(EC_Failed)
time.sleep(20)
else:
break
# get VUID
if dsExist:
# check location
tmpLocations = getLocations(name,[],'',False,verbose,getDQ2IDs=True)
if location in tmpLocations:
return
# get VUID
url = baseURLDQ2 + '/ws_repository/rpc'
data = {'operation':'queryDatasetByName','dsn':name,'version':0,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.get(url,data,rucioAccount=True)
if status != 0:
errStr = "ERROR : could not get VUID from DQ2"
sys.exit(EC_Failed)
# parse
vuid = out[name]['vuids'][0]
# add replica
if re.search('SCRATCHDISK$',location) != None or re.search('USERDISK$',location) != None \
or re.search('LOCALGROUPDISK$',location) != None \
or (allowProdDisk and (re.search('PRODDISK$',location) != None or \
re.search('DATADISK$',location) != None)):
url = baseURLDQ2SSL + '/ws_location/rpc'
nTry = 3
for iTry in range(nTry):
data = {'operation':'addDatasetReplica','vuid':vuid,'site':location,
'complete':0,'transferState':1,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.post(url,data,rucioAccount=True)
if status != 0 or out != 1:
if iTry+1 == nTry:
errStr = "ERROR : could not register location : %s" % location
sys.exit(EC_Failed)
time.sleep(20)
else:
break
else:
errStr = "ERROR : registration at %s is disallowed" % location
sys.exit(EC_Failed)
except:
print status,out
if errStr != '':
print errStr
else:
print "ERROR : invalid DQ2 response"
sys.exit(EC_Failed)
# create dataset container
def createContainer(name,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
try:
errStr = ''
# add
url = baseURLDQ2SSL + '/ws_dq2/rpc'
nTry = 3
for iTry in range(nTry):
data = {'operation':'container_create','name': name,
'API':'030','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.post(url,data,rucioAccount=True)
if status != 0 or (out != None and re.search('Exception',out) != None):
if iTry+1 == nTry:
errStr = "ERROR : could not create container in DQ2"
sys.exit(EC_Failed)
time.sleep(20)
else:
break
except:
print status,out
if errStr != '':
print errStr
else:
print "ERROR : invalid DQ2 response"
sys.exit(EC_Failed)
# add datasets to container
def addDatasetsToContainer(name,datasets,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
try:
errStr = ''
# add
url = baseURLDQ2SSL + '/ws_dq2/rpc'
nTry = 3
for iTry in range(nTry):
data = {'operation':'container_register','name': name,
'datasets':datasets,'API':'030',
'tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.post(url,data,rucioAccount=True)
if status != 0 or (out != None and re.search('Exception',out) != None):
if iTry+1 == nTry:
errStr = "ERROR : could not add DQ2 datasets to container"
sys.exit(EC_Failed)
time.sleep(20)
else:
break
except:
print status,out
if errStr != '':
print errStr
else:
print "ERROR : invalid DQ2 response"
sys.exit(EC_Failed)
# get container elements
def getElementsFromContainer(name,verbose=False):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
try:
errStr = ''
# get elements
url = baseURLDQ2 + '/ws_dq2/rpc'
data = {'operation':'container_retrieve','name': name,
'API':'030','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.get(url,data,rucioAccount=True)
if status != 0 or (isinstance(out,types.StringType) and re.search('Exception',out) != None):
errStr = "ERROR : could not get container %s from DQ2" % name
sys.exit(EC_Failed)
return out
except:
print status,out
type, value, traceBack = sys.exc_info()
print "%s %s" % (type,value)
if errStr != '':
print errStr
else:
print "ERROR : invalid DQ2 response"
sys.exit(EC_Failed)
# convert srmv2 site to srmv1 site ID
def convSrmV2ID(tmpSite):
# keep original name to avoid double conversion
origSite = tmpSite
# doesn't convert FR/IT/UK sites
for tmpPrefix in ['IN2P3-','INFN-','UKI-','GRIF-','DESY-','UNI-','RU-',
'LIP-','RO-']:
if tmpSite.startswith(tmpPrefix):
tmpSite = re.sub('_[A-Z,0-9]+DISK$', 'DISK',tmpSite)
tmpSite = re.sub('_[A-Z,0-9]+TAPE$', 'DISK',tmpSite)
tmpSite = re.sub('_PHYS-[A-Z,0-9]+$','DISK',tmpSite)
tmpSite = re.sub('_PERF-[A-Z,0-9]+$','DISK',tmpSite)
tmpSite = re.sub('_DET-[A-Z,0-9]+$', 'DISK',tmpSite)
tmpSite = re.sub('_SOFT-[A-Z,0-9]+$','DISK',tmpSite)
tmpSite = re.sub('_TRIG-DAQ$','DISK',tmpSite)
return tmpSite
# parch for CERN EOS
if tmpSite.startswith('CERN-PROD_EOS'):
return 'CERN-PROD_EOSDISK'
# parch for CERN TMP
if tmpSite.startswith('CERN-PROD_TMP'):
return 'CERN-PROD_TMPDISK'
# parch for CERN OLD
if tmpSite.startswith('CERN-PROD_OLD') or tmpSite.startswith('CERN-PROD_LOCAL'):
return 'CERN-PROD_OLDDISK'
# patch for SRM v2
tmpSite = re.sub('-[^-_]+_[A-Z,0-9]+DISK$', 'DISK',tmpSite)
tmpSite = re.sub('-[^-_]+_[A-Z,0-9]+TAPE$', 'DISK',tmpSite)
tmpSite = re.sub('-[^-_]+_PHYS-[A-Z,0-9]+$','DISK',tmpSite)
tmpSite = re.sub('-[^-_]+_PERF-[A-Z,0-9]+$','DISK',tmpSite)
tmpSite = re.sub('-[^-_]+_DET-[A-Z,0-9]+$', 'DISK',tmpSite)
tmpSite = re.sub('-[^-_]+_SOFT-[A-Z,0-9]+$','DISK',tmpSite)
tmpSite = re.sub('-[^-_]+_TRIG-DAQ$','DISK',tmpSite)
# SHOULD BE REMOVED Once all sites and DQ2 migrate to srmv2
# patch for BNL
if tmpSite in ['BNLDISK','BNLTAPE']:
tmpSite = 'BNLPANDA'
# patch for LYON
if tmpSite in ['LYONDISK','LYONTAPE']:
tmpSite = 'IN2P3-CCDISK'
# patch for TAIWAN
if tmpSite.startswith('ASGC'):
tmpSite = 'TAIWANDISK'
# patch for CERN
if tmpSite.startswith('CERN'):
tmpSite = 'CERNDISK'
# patche for some special sites where automatic conjecture is impossible
if tmpSite == 'UVIC':
tmpSite = 'VICTORIA'
# US T2s
if origSite == tmpSite:
tmpSite = re.sub('_[A-Z,0-9]+DISK$', '',tmpSite)
tmpSite = re.sub('_[A-Z,0-9]+TAPE$', '',tmpSite)
tmpSite = re.sub('_PHYS-[A-Z,0-9]+$','',tmpSite)
tmpSite = re.sub('_PERF-[A-Z,0-9]+$','',tmpSite)
tmpSite = re.sub('_DET-[A-Z,0-9]+$', '',tmpSite)
tmpSite = re.sub('_SOFT-[A-Z,0-9]+$','',tmpSite)
tmpSite = re.sub('_TRIG-DAQ$','',tmpSite)
if tmpSite == 'NET2':
tmpSite = 'BU'
if tmpSite == 'MWT2_UC':
tmpSite = 'MWT2'
# return
return tmpSite
# check tape sites
def isTapeSite(origTmpSite):
if re.search('TAPE$',origTmpSite) != None or \
re.search('PROD_TZERO$',origTmpSite) != None or \
re.search('PROD_TMPDISK$',origTmpSite) != None or \
re.search('PROD_DAQ$',origTmpSite) != None:
return True
return False
# check online site
def isOnlineSite(origTmpSite):
# get PandaID
tmpPandaSite = convertDQ2toPandaID(origTmpSite)
# check if Panda site
if not PandaSites.has_key(tmpPandaSite):
return False
# exclude long,local queues
if isExcudedSite(tmpPandaSite):
return False
# status
if PandaSites[tmpPandaSite]['status'] == 'online':
return True
return False
# get locations
def getLocations(name,fileList,cloud,woFileCheck,verbose=False,expCloud=False,getReserved=False,
getTapeSites=False,getDQ2IDs=False,locCandidates=None,removeDS=False,
removedDatasets=[],useOutContainer=False,includeIncomplete=False,
notSiteStatusCheck=False,useCVMFS=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# get logger
tmpLog = PLogger.getPandaLogger()
try:
errStr = ''
names = name.split(',')
# loop over all names
retSites = []
retSiteMap = {}
resRetSiteMap = {}
resBadStSites = {}
resTapeSites = {}
retDQ2IDs = []
retDQ2IDmap = {}
allOut = {}
iLookUp = 0
resUsedDsMap = {}
global globalCompleteDsMap
# convert candidates for SRM v2
if locCandidates != None:
locCandidatesSrmV2 = []
for locTmp in locCandidates:
locCandidatesSrmV2.append(convSrmV2ID(locTmp))
# loop over all names
for tmpName in names:
# ignore removed datasets
if tmpName in removedDatasets:
continue
iLookUp += 1
if iLookUp % 20 == 0:
time.sleep(1)
# container
containerFlag = False
if tmpName.endswith('/'):
containerFlag = True
# get VUID
url = baseURLDQ2 + '/ws_repository/rpc'
data = {'operation':'queryDatasetByName','dsn':tmpName,'version':0,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.get(url,data,rucioAccount=True)
if status != 0 or out == '\x00' or (not checkDatasetInMap(tmpName,out)):
errStr = "ERROR : could not find %s in DQ2 DB. Check if the dataset name is correct" \
% tmpName
if getReserved and getTapeSites:
sys.exit(EC_Failed)
if verbose:
print errStr
return retSites
# get real datasetname
tmpName = getDatasetValueInMap(tmpName,out)
# parse
duid = out[tmpName]['duid']
# get replica location
url = baseURLDQ2 + '/ws_location/rpc'
if containerFlag:
data = {'operation':'listContainerReplicas','cn':tmpName,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
else:
data = {'operation':'listDatasetReplicas','duid':duid,
'API':'0_3_0','tuid':MiscUtils.wrappedUuidGen()}
status,out = curl.post(url,data,rucioAccount=True)
if status != 0:
errStr = "ERROR : could not query location for %s" % tmpName
sys.exit(EC_Failed)
# convert container format to dataset's one
outTmp = {}
if containerFlag:
# count number of complete elements
for tmpEleName,tmpEleVal in out.iteritems():
# ignore removed datasets
if tmpEleName in removedDatasets:
continue
for tmpEleVUID,tmpEleLocs in tmpEleVal.iteritems():
# get complete locations
tmpFoundFlag = False
for tmpEleLoc in tmpEleLocs[1]:
# don't use TAPE
if isTapeSite(tmpEleLoc):
if not resTapeSites.has_key(tmpEleLoc):
resTapeSites[tmpEleLoc] = []
if not tmpEleName in resTapeSites[tmpEleLoc]:
resTapeSites[tmpEleLoc].append(tmpEleName)
continue
# append
if not outTmp.has_key(tmpEleLoc):
outTmp[tmpEleLoc] = [{'found':0,'useddatasets':[]}]
# increment
outTmp[tmpEleLoc][0]['found'] += 1
# append list
if not tmpEleName in outTmp[tmpEleLoc][0]['useddatasets']:
outTmp[tmpEleLoc][0]['useddatasets'].append(tmpEleName)
# found online site
if isOnlineSite(tmpEleLoc):
tmpFoundFlag = True
# add to global map
if not globalCompleteDsMap.has_key(tmpEleName):
globalCompleteDsMap[tmpEleName] = []
globalCompleteDsMap[tmpEleName].append(tmpEleLoc)
# use incomplete locations if no complete replica at online sites
if includeIncomplete or not tmpFoundFlag:
for tmpEleLoc in tmpEleLocs[0]:
# don't use TAPE
if isTapeSite(tmpEleLoc):
if not resTapeSites.has_key(tmpEleLoc):
resTapeSites[tmpEleLoc] = []
if not tmpEleName in resTapeSites[tmpEleLoc]:
resTapeSites[tmpEleLoc].append(tmpEleName)
continue
# append
if not outTmp.has_key(tmpEleLoc):
outTmp[tmpEleLoc] = [{'found':0,'useddatasets':[]}]
# increment
outTmp[tmpEleLoc][0]['found'] += 1
# append list
if not tmpEleName in outTmp[tmpEleLoc][0]['useddatasets']:
outTmp[tmpEleLoc][0]['useddatasets'].append(tmpEleName)
else:
# check completeness
tmpIncompList = []
tmpFoundFlag = False
for tmpOutKey,tmpOutVar in out.iteritems():
# don't use TAPE
if isTapeSite(tmpOutKey):
if not resTapeSites.has_key(tmpOutKey):
resTapeSites[tmpOutKey] = []
if not tmpName in resTapeSites[tmpOutKey]:
resTapeSites[tmpOutKey].append(tmpName)
continue
# protection against unchecked
tmpNfound = tmpOutVar[0]['found']
# complete or not
if isinstance(tmpNfound,types.IntType) and tmpNfound == tmpOutVar[0]['total']:
outTmp[tmpOutKey] = [{'found':1,'useddatasets':[tmpName]}]
# found online site
if isOnlineSite(tmpOutKey):
tmpFoundFlag = True
# add to global map
if not globalCompleteDsMap.has_key(tmpName):
globalCompleteDsMap[tmpName] = []
globalCompleteDsMap[tmpName].append(tmpOutKey)
else:
# keep just in case
if not tmpOutKey in tmpIncompList:
tmpIncompList.append(tmpOutKey)
# use incomplete replicas when no complete at online sites
if includeIncomplete or not tmpFoundFlag:
for tmpOutKey in tmpIncompList:
outTmp[tmpOutKey] = [{'found':1,'useddatasets':[tmpName]}]
# replace
out = outTmp
# sum
for tmpOutKey,tmpOutVar in out.iteritems():
if not allOut.has_key(tmpOutKey):
allOut[tmpOutKey] = [{'found':0,'useddatasets':[]}]
allOut[tmpOutKey][0]['found'] += tmpOutVar[0]['found']
allOut[tmpOutKey][0]['useddatasets'] += tmpOutVar[0]['useddatasets']
# replace
out = allOut
if verbose:
print out
# choose sites where most files are available
if not woFileCheck:
tmpMaxFiles = -1
for origTmpSite,origTmpInfo in out.iteritems():
# get PandaID
tmpPandaSite = convertDQ2toPandaID(origTmpSite)
# check status
if PandaSites.has_key(tmpPandaSite) and (notSiteStatusCheck or PandaSites[tmpPandaSite]['status'] == 'online'):
# don't use TAPE
if isTapeSite(origTmpSite):
if not resTapeSites.has_key(origTmpSite):
if origTmpInfo[0].has_key('useddatasets'):
resTapeSites[origTmpSite] = origTmpInfo[0]['useddatasets']
else:
resTapeSites[origTmpSite] = names
continue
# check the number of available files
if tmpMaxFiles < origTmpInfo[0]['found']:
tmpMaxFiles = origTmpInfo[0]['found']
# remove sites
for origTmpSite in out.keys():
if out[origTmpSite][0]['found'] < tmpMaxFiles:
# use sites where most files are avaialble if output container is not used
if not useOutContainer:
del out[origTmpSite]
if verbose:
print out
tmpFirstDump = True
for origTmpSite,origTmpInfo in out.iteritems():
# don't use TAPE
if isTapeSite(origTmpSite):
if not resTapeSites.has_key(origTmpSite):
resTapeSites[origTmpSite] = origTmpInfo[0]['useddatasets']
continue
# collect DQ2 IDs
if not origTmpSite in retDQ2IDs:
retDQ2IDs.append(origTmpSite)
for tmpUDS in origTmpInfo[0]['useddatasets']:
if not retDQ2IDmap.has_key(tmpUDS):
retDQ2IDmap[tmpUDS] = []
if not origTmpSite in retDQ2IDmap[tmpUDS]:
retDQ2IDmap[tmpUDS].append(origTmpSite)
# patch for SRM v2
tmpSite = convSrmV2ID(origTmpSite)
# if candidates are limited
if locCandidates != None and (not tmpSite in locCandidatesSrmV2):
continue
if verbose:
tmpLog.debug('%s : %s->%s' % (tmpName,origTmpSite,tmpSite))
# check cloud, DQ2 ID and status
tmpSiteBeforeLoop = tmpSite
for tmpID,tmpSpec in PandaSites.iteritems():
# reset
tmpSite = tmpSiteBeforeLoop
# get list of DQ2 IDs
srmv2ddmList = []
for tmpDdmID in tmpSpec['setokens'].values():
srmv2ddmList.append(convSrmV2ID(tmpDdmID))
# dump
if tmpFirstDump:
if verbose:
pass
if tmpSite in srmv2ddmList or convSrmV2ID(tmpSpec['ddm']).startswith(tmpSite) \
or (useCVMFS and tmpSpec['iscvmfs'] == True):
# overwrite tmpSite for srmv1
tmpSite = convSrmV2ID(tmpSpec['ddm'])
# exclude long,xrootd,local queues
if isExcudedSite(tmpID):
continue
if not tmpSite in retSites:
retSites.append(tmpSite)
# just collect locations when file check is disabled
if woFileCheck:
break
# append site
if tmpSpec['status'] == 'online' or notSiteStatusCheck:
# return sites in a cloud when it is specified or all sites
if tmpSpec['cloud'] == cloud or (not expCloud):
appendMap = retSiteMap
else:
appendMap = resRetSiteMap
# mapping between location and Panda siteID
if not appendMap.has_key(tmpSite):
appendMap[tmpSite] = []
if not tmpID in appendMap[tmpSite]:
appendMap[tmpSite].append(tmpID)
if origTmpInfo[0].has_key('useddatasets'):
if not tmpID in resUsedDsMap:
resUsedDsMap[tmpID] = []
resUsedDsMap[tmpID] += origTmpInfo[0]['useddatasets']
else:
# not interested in another cloud
if tmpSpec['cloud'] != cloud and expCloud:
continue
# keep bad status sites for info
if not resBadStSites.has_key(tmpSpec['status']):
resBadStSites[tmpSpec['status']] = []
if not tmpID in resBadStSites[tmpSpec['status']]:
resBadStSites[tmpSpec['status']].append(tmpID)
tmpFirstDump = False
# retrun DQ2 IDs
if getDQ2IDs:
if includeIncomplete:
return retDQ2IDmap,resUsedDsMap
return retDQ2IDs
# return list when file check is not required
if woFileCheck:
return retSites
# use reserved map when the cloud doesn't hold the dataset
if retSiteMap == {} and (not expCloud) and (not getReserved):
retSiteMap = resRetSiteMap
# reset reserved map for expCloud
if getReserved and expCloud:
resRetSiteMap = {}
# return map
if verbose:
if not getReserved:
tmpLog.debug("getLocations -> %s" % retSiteMap)
else:
tmpLog.debug("getLocations pri -> %s" % retSiteMap)
tmpLog.debug("getLocations sec -> %s" % resRetSiteMap)
# print bad status sites for info
if retSiteMap == {} and resRetSiteMap == {} and resBadStSites != {}:
msgFirstFlag = True
for tmpStatus,tmpSites in resBadStSites.iteritems():
# ignore panda secific site
if tmpStatus.startswith('panda_'):
continue
if msgFirstFlag:
tmpLog.warning("the following sites hold %s but they are not online" % name)
msgFirstFlag = False
print " status=%s : %s" % (tmpStatus,tmpSites)
if not getReserved:
return retSiteMap
elif not getTapeSites:
return retSiteMap,resRetSiteMap
elif not removeDS:
return retSiteMap,resRetSiteMap,resTapeSites
else:
return retSiteMap,resRetSiteMap,resTapeSites,resUsedDsMap
except:
print status,out
if errStr != '':
print errStr
else:
type, value, traceBack = sys.exc_info()
print "ERROR : invalid DQ2 response - %s %s" % (type,value)
sys.exit(EC_Failed)
#@ Returns number of events per file in a given dataset
#SP 2006
#
def nEvents(name, verbose=False, askServer=True, fileList = {}, scanDir = '.', askUser=True):
# @ These declarations can be moved to the configuration section at the very beginning
# Here just for code clarity
#
# Parts of the query
str1="/?dset="
str2="&get=evperfile"
# Form full query string
m_query = baseURLMON+str1+name+str2
manualEnter = True
# Send query get number of events per file
if askServer:
nEvents=urllib.urlopen(m_query).read()
if verbose:
print m_query
print nEvents
if re.search('HTML',nEvents) == None and nEvents != '-1':
manualEnter = False
else:
# use ROOT to get # of events
try:
import ROOT
rootFile = ROOT.TFile("%s/%s" % (scanDir,fileList[0]))
tree = ROOT.gDirectory.Get( 'CollectionTree' )
nEvents = tree.GetEntriesFast()
# disable
if nEvents > 0:
manualEnter = False
except:
if verbose:
type, value, traceBack = sys.exc_info()
print "ERROR : could not get nEvents with ROOT - %s %s" % (type,value)
# In case of error PANDAMON server returns full HTML page
# Normally return an integer
if manualEnter:
if askUser:
if askServer:
print "Could not get the # of events from MetaDB for %s " % name
while True:
str = raw_input("Enter the number of events per file (or set --nEventsPerFile) : ")
try:
nEvents = int(str)
break
except:
pass
else:
print "ERROR : Could not get the # of events from MetaDB for %s " % name
sys.exit(EC_Failed)
if verbose:
print "Dataset %s has %s evetns per file" % (name,nEvents)
return int(nEvents)
# get PFN from LRC
def _getPFNsLRC(lfns,dq2url,verbose):
pfnMap = {}
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# get PoolFileCatalog
iLFN = 0
strLFNs = ''
url = dq2url + 'lrc/PoolFileCatalog'
firstError = True
# check if GUID lookup is supported
useGUID = True
status,out = curl.get(url,{'guids':'test'})
if status ==0 and out == 'Must GET or POST a list of LFNs!':
useGUID = False
for lfn,vals in lfns.iteritems():
iLFN += 1
# make argument
if useGUID:
strLFNs += '%s ' % vals['guid']
else:
strLFNs += '%s ' % lfn
if iLFN % 40 == 0 or iLFN == len(lfns):
# get PoolFileCatalog
strLFNs = strLFNs.rstrip()
if useGUID:
data = {'guids':strLFNs}
else:
data = {'lfns':strLFNs}
# avoid too long argument
strLFNs = ''
# execute
status,out = curl.get(url,data)
time.sleep(2)
if out.startswith('Error'):
# LFN not found
continue
if status != 0 or (not out.startswith('<?xml')):
if firstError:
print status,out
print "ERROR : LRC %s returned invalid response" % dq2url
firstError = False
continue
# parse
try:
root = xml.dom.minidom.parseString(out)
files = root.getElementsByTagName('File')
for file in files:
# get PFN and LFN nodes
physical = file.getElementsByTagName('physical')[0]
pfnNode = physical.getElementsByTagName('pfn')[0]
logical = file.getElementsByTagName('logical')[0]
lfnNode = logical.getElementsByTagName('lfn')[0]
# convert UTF8 to Raw
pfn = str(pfnNode.getAttribute('name'))
lfn = str(lfnNode.getAttribute('name'))
# remove /srm/managerv1?SFN=
pfn = re.sub('/srm/managerv1\?SFN=','',pfn)
# append
pfnMap[lfn] = pfn
except:
print status,out
type, value, traceBack = sys.exc_info()
print "ERROR : could not parse XML - %s %s" % (type, value)
sys.exit(EC_Failed)
# return
return pfnMap
# get list of missing LFNs from LRC
def getMissLFNsFromLRC(files,url,verbose=False,nFiles=0):
# get PFNs
pfnMap = _getPFNsLRC(files,url,verbose)
# check Files
missFiles = []
for file in files:
if not file in pfnMap.keys():
missFiles.append(file)
return missFiles
# get PFN list from LFC
def _getPFNsLFC(fileMap,site,explicitSE,verbose=False,nFiles=0):
pfnMap = {}
for path in sys.path:
# look for base package
basePackage = __name__.split('.')[-2]
if os.path.exists(path) and os.path.isdir(path) and basePackage in os.listdir(path):
lfcClient = '%s/%s/LFCclient.py' % (path,basePackage)
if explicitSE:
stList = getSE(site)
else:
stList = []
lfcHost = getLFC(site)
inFile = '%s_in' % MiscUtils.wrappedUuidGen()
outFile = '%s_out' % MiscUtils.wrappedUuidGen()
# write GUID/LFN
ifile = open(inFile,'w')
fileKeys = fileMap.keys()
fileKeys.sort()
for lfn in fileKeys:
vals = fileMap[lfn]
ifile.write('%s %s\n' % (vals['guid'],lfn))
ifile.close()
# construct command
gridSrc = _getGridSrc()
com = '%s python -Wignore %s -l %s -i %s -o %s -n %s' % (gridSrc,lfcClient,lfcHost,inFile,outFile,nFiles)
for index,stItem in enumerate(stList):
if index != 0:
com += ',%s' % stItem
else:
com += ' -s %s' % stItem
if verbose:
com += ' -v'
print com
# exeute
status = os.system(com)
if status == 0:
ofile = open(outFile)
line = ofile.readline()
line = re.sub('\n','',line)
exec 'pfnMap = %s' %line
ofile.close()
# remove tmp files
try:
os.remove(inFile)
os.remove(outFile)
except:
pass
# failed
if status != 0:
print "ERROR : failed to access LFC %s" % lfcHost
return {}
break
# return
return pfnMap
# get list of missing LFNs from LFC
def getMissLFNsFromLFC(fileMap,site,explicitSE,verbose=False,nFiles=0,shadowList=[],dsStr='',removedDS=[],
skipScan=False):
# get logger
tmpLog = PLogger.getPandaLogger()
missList = []
# ignore files in shadow
if shadowList != []:
tmpFileMap = {}
for lfn,vals in fileMap.iteritems():
if not lfn in shadowList:
tmpFileMap[lfn] = vals
else:
tmpFileMap = fileMap
# ignore expiring files
if dsStr != '':
tmpTmpFileMap = {}
expFilesMap,expOkFilesList,expCompInDQ2FilesList = getExpiringFiles(dsStr,removedDS,site,verbose,getOKfiles=True)
# collect files in incomplete replicas
for lfn,vals in tmpFileMap.iteritems():
if lfn in expOkFilesList and not lfn in expCompInDQ2FilesList:
tmpTmpFileMap[lfn] = vals
tmpFileMap = tmpTmpFileMap
# skipScan use only complete replicas
if skipScan and expCompInDQ2FilesList == []:
tmpLog.info("%s may hold %s files at most in incomplete replicas but they are not used when --skipScan is set" % \
(site,len(expOkFilesList)))
# get PFNS
if tmpFileMap != {} and not skipScan:
tmpLog.info("scanning LFC %s for files in incompete datasets at %s" % (getLFC(site),site))
pfnMap = _getPFNsLFC(tmpFileMap,site,explicitSE,verbose,nFiles)
else:
pfnMap = {}
for lfn,vals in fileMap.iteritems():
if (not vals['guid'] in pfnMap.keys()) and (not lfn in shadowList) \
and not lfn in expCompInDQ2FilesList:
missList.append(lfn)
# return
return missList
# get grid source file
def _getGridSrc():
# set Grid setup.sh if needed
status,out = commands.getstatusoutput('voms-proxy-info --version')
athenaStatus,athenaPath = commands.getstatusoutput('which athena.py')
if status == 0:
gridSrc = ''
if athenaStatus == 0 and athenaPath.startswith('/afs/in2p3.fr'):
# for LYON, to avoid missing LD_LIBRARY_PATH
gridSrc = '/afs/in2p3.fr/grid/profiles/lcg_env.sh'
elif athenaStatus == 0 and re.search('^/afs/\.*cern.ch',athenaPath) != None:
# for CERN, VDT is already installed
gridSrc = '/dev/null'
else:
# set Grid setup.sh
if os.environ.has_key('PATHENA_GRID_SETUP_SH'):
gridSrc = os.environ['PATHENA_GRID_SETUP_SH']
else:
if not os.environ.has_key('CMTSITE'):
os.environ['CMTSITE'] = ''
if os.environ['CMTSITE'] == 'CERN' or (athenaStatus == 0 and \
re.search('^/afs/\.*cern.ch',athenaPath) != None):
gridSrc = '/dev/null'
elif os.environ['CMTSITE'] == 'BNL':
gridSrc = '/afs/usatlas.bnl.gov/osg/client/@sys/current/setup.sh'
else:
# try to determin site using path to athena
if athenaStatus == 0 and athenaPath.startswith('/afs/in2p3.fr'):
# LYON
gridSrc = '/afs/in2p3.fr/grid/profiles/lcg_env.sh'
elif athenaStatus == 0 and athenaPath.startswith('/cvmfs/atlas.cern.ch'):
# CVMFS
if not os.environ.has_key('ATLAS_LOCAL_ROOT_BASE'):
os.environ['ATLAS_LOCAL_ROOT_BASE'] = '/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase'
gridSrc = os.environ['ATLAS_LOCAL_ROOT_BASE'] + '/user/pandaGridSetup.sh'
else:
print "ERROR : PATHENA_GRID_SETUP_SH is not defined in envvars"
print " for CERN on SLC6 : export PATHENA_GRID_SETUP_SH=/dev/null"
print " for CERN on SLC5 : export PATHENA_GRID_SETUP_SH=/afs/cern.ch/project/gd/LCG-share/current_3.2/etc/profile.d/grid_env.sh"
print " for LYON : export PATHENA_GRID_SETUP_SH=/afs/in2p3.fr/grid/profiles/lcg_env.sh"
print " for BNL : export PATHENA_GRID_SETUP_SH=/afs/usatlas.bnl.gov/osg/client/@sys/current/setup.sh"
return False
# check grid-proxy
if gridSrc != '':
gridSrc = 'source %s > /dev/null;' % gridSrc
# some grid_env.sh doen't correct PATH/LD_LIBRARY_PATH
gridSrc = "unset LD_LIBRARY_PATH; unset PYTHONPATH; unset MANPATH; export PATH=/usr/local/bin:/bin:/usr/bin; %s" % gridSrc
# return
return gridSrc
# get DN
def getDN(origString):
shortName = ''
distinguishedName = ''
for line in origString.split('/'):
if line.startswith('CN='):
distinguishedName = re.sub('^CN=','',line)
distinguishedName = re.sub('\d+$','',distinguishedName)
distinguishedName = re.sub('\.','',distinguishedName)
distinguishedName = distinguishedName.strip()
if re.search(' ',distinguishedName) != None:
# look for full name
distinguishedName = distinguishedName.replace(' ','')
break
elif shortName == '':
# keep short name
shortName = distinguishedName
distinguishedName = ''
# use short name
if distinguishedName == '':
distinguishedName = shortName
# return
return distinguishedName
from HTMLParser import HTMLParser
class _monHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.map = {}
self.switch = False
self.td = False
def getMap(self):
retMap = {}
if len(self.map) > 1:
names = self.map[0]
vals = self.map[1]
# values
try:
retMap['total'] = int(vals[names.index('Jobs')])
except:
retMap['total'] = 0
try:
retMap['finished'] = int(vals[names.index('Finished')])
except:
retMap['finished'] = 0
try:
retMap['failed'] = int(vals[names.index('Failed')])
except:
retMap['failed'] = 0
retMap['running'] = retMap['total'] - retMap['finished'] - \
retMap['failed']
return retMap
def handle_data(self, data):
if self.switch:
if self.td:
self.td = False
self.map[len(self.map)-1].append(data)
else:
self.map[len(self.map)-1][-1] += data
else:
if data == "Job Sets:":
self.switch = True
def handle_starttag(self, tag, attrs):
if self.switch and tag == 'tr':
self.map[len(self.map)] = []
if self.switch and tag == 'td':
self.td = True
def handle_endtag(self, tag):
if self.switch and self.td:
self.map[len(self.map)-1].append("")
self.td = False
# get jobInfo from Mon
def getJobStatusFromMon(id,verbose=False):
# get name
shortName = ''
distinguishedName = ''
for line in commands.getoutput('%s grid-proxy-info -identity' % _getGridSrc()).split('/'):
if line.startswith('CN='):
distinguishedName = re.sub('^CN=','',line)
distinguishedName = re.sub('\d+$','',distinguishedName)
distinguishedName = distinguishedName.strip()
if re.search(' ',distinguishedName) != None:
# look for full name
break
elif shortName == '':
# keep short name
shortName = distinguishedName
distinguishedName = ''
# use short name
if distinguishedName == '':
distinguishedName = shortName
# instantiate curl
curl = _Curl()
curl.verbose = verbose
data = {'job':'*',
'jobDefinitionID' : id,
'user' : distinguishedName,
'days' : 100}
# execute
status,out = curl.get(baseURLMON,data)
if status != 0 or re.search('Panda monitor and browser',out)==None:
return {}
# parse
parser = _monHTMLParser()
for line in out.split('\n'):
if re.search('Job Sets:',line) != None:
parser.feed( line )
break
return parser.getMap()
def isDirectAccess(site,usingRAW=False,usingTRF=False,usingARA=False):
# unknown site
if not PandaSites.has_key(site):
return False
# parse copysetup
params = PandaSites[site]['copysetup'].split('^')
# doesn't use special parameters
if len(params) < 5:
return False
# directIn
directIn = params[4]
if directIn != 'True':
return False
# xrootd uses local copy for RAW
newPrefix = params[2]
if newPrefix.startswith('root:'):
if usingRAW:
return False
# official TRF doesn't work with direct dcap/xrootd
if usingTRF and (not usingARA):
if newPrefix.startswith('root:') or newPrefix.startswith('dcap:') or \
newPrefix.startswith('dcache:') or newPrefix.startswith('gsidcap:'):
return False
# return
return True
# run brokerage
def runBrokerage(sites,atlasRelease,cmtConfig=None,verbose=False,trustIS=False,cacheVer='',processingType='',
loggingFlag=False,memorySize=0,useDirectIO=False,siteGroup=None,maxCpuCount=-1,rootVer=''):
# use only directIO sites
nonDirectSites = []
if useDirectIO:
tmpNewSites = []
for tmpSite in sites:
if isDirectAccess(tmpSite):
tmpNewSites.append(tmpSite)
else:
nonDirectSites.append(tmpSite)
sites = tmpNewSites
if sites == []:
if not loggingFlag:
return 0,'ERROR : no candidate.'
else:
return 0,{'site':'ERROR : no candidate.','logInfo':[]}
# choose at most 50 sites randomly to avoid too many lookup
random.shuffle(sites)
sites = sites[:50]
# serialize
strSites = pickle.dumps(sites)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/runBrokerage'
data = {'sites':strSites,
'atlasRelease':atlasRelease}
if cmtConfig != None:
data['cmtConfig'] = cmtConfig
if trustIS:
data['trustIS'] = True
if maxCpuCount > 0:
data['maxCpuCount'] = maxCpuCount
if cacheVer != '':
# change format if needed
cacheVer = re.sub('^-','',cacheVer)
match = re.search('^([^_]+)_(\d+\.\d+\.\d+\.\d+\.*\d*)$',cacheVer)
if match != None:
cacheVer = '%s-%s' % (match.group(1),match.group(2))
else:
# nightlies
match = re.search('_(rel_\d+)$',cacheVer)
if match != None:
# use base release as cache version
cacheVer = '%s:%s' % (atlasRelease,match.group(1))
# use cache for brokerage
data['atlasRelease'] = cacheVer
# use ROOT ver
if rootVer != '' and data['atlasRelease'] == '':
data['atlasRelease'] = 'ROOT-%s' % rootVer
if processingType != '':
# set processingType mainly for HC
data['processingType'] = processingType
# enable logging
if loggingFlag:
data['loggingFlag'] = True
# memory size
if not memorySize in [-1,0,None,'NULL']:
data['memorySize'] = memorySize
# site group
if not siteGroup in [None,-1]:
data['siteGroup'] = siteGroup
status,output = curl.get(url,data)
try:
if not loggingFlag:
return status,output
else:
outputPK = pickle.loads(output)
# add directIO info
if nonDirectSites != []:
if not outputPK.has_key('logInfo'):
outputPK['logInfo'] = []
for tmpSite in nonDirectSites:
msgBody = 'action=skip site=%s reason=nondirect - not directIO site' % tmpSite
outputPK['logInfo'].append(msgBody)
return status,outputPK
except:
type, value, traceBack = sys.exc_info()
print output
print "ERROR runBrokerage : %s %s" % (type,value)
return EC_Failed,None
# run rebrokerage
def runReBrokerage(jobID,libDS='',cloud=None,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/runReBrokerage'
data = {'jobID':jobID}
if cloud != None:
data['cloud'] = cloud
if not libDS in ['',None,'NULL']:
data['libDS'] = libDS
retVal = curl.get(url,data)
# communication error
if retVal[0] != 0:
return retVal
# succeeded
if retVal[1] == True:
return 0,''
# server error
errMsg = retVal[1]
if errMsg.startswith('ERROR: '):
# remove ERROR:
errMsg = re.sub('ERROR: ','',errMsg)
return EC_Failed,errMsg
# retry failed jobs in Active
def retryFailedJobsInActive(jobID,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/retryFailedJobsInActive'
data = {'jobID':jobID}
retVal = curl.get(url,data)
# communication error
if retVal[0] != 0:
return retVal
# succeeded
if retVal[1] == True:
return 0,''
# server error
errMsg = retVal[1]
if errMsg.startswith('ERROR: '):
# remove ERROR:
errMsg = re.sub('ERROR: ','',errMsg)
return EC_Failed,errMsg
# send brokerage log
def sendBrokerageLog(jobID,jobsetID,brokerageLogs,verbose):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
msgList = []
for tmpMsgBody in brokerageLogs:
if not jobsetID in [None,'NULL']:
tmpMsg = ' : jobset=%s jobdef=%s : %s' % (jobsetID,jobID,tmpMsgBody)
else:
tmpMsg = ' : jobdef=%s : %s' % (jobID,tmpMsgBody)
msgList.append(tmpMsg)
# execute
url = baseURLSSL + '/sendLogInfo'
data = {'msgType':'analy_brokerage',
'msgList':pickle.dumps(msgList)}
retVal = curl.post(url,data)
return True
# exclude long,xrootd,local queues
def isExcudedSite(tmpID):
excludedSite = False
for exWord in ['ANALY_LONG_','_LOCAL','_test']:
if re.search(exWord,tmpID,re.I) != None:
excludedSite = True
break
return excludedSite
# get default space token
def getDefaultSpaceToken(fqans,defaulttoken):
# mapping is not defined
if defaulttoken == '':
return ''
# loop over all groups
for tmpStr in defaulttoken.split(','):
# extract group and token
items = tmpStr.split(':')
if len(items) != 2:
continue
tmpGroup = items[0]
tmpToken = items[1]
# look for group
if re.search(tmpGroup+'/',fqans) != None:
return tmpToken
# not found
return ''
# use dev server
def useDevServer():
global baseURL
baseURL = 'http://aipanda007.cern.ch:25080/server/panda'
global baseURLSSL
baseURLSSL = 'https://aipanda007.cern.ch:25443/server/panda'
global baseURLCSRV
baseURLCSRV = 'https://aipanda007.cern.ch:25443/server/panda'
global baseURLCSRVSSL
baseURLCSRVSSL = 'https://aipanda007.cern.ch:25443/server/panda'
global baseURLSUB
baseURLSUB = 'http://atlpan.web.cern.ch/atlpan'
# use INTR server
def useIntrServer():
global baseURL
baseURL = 'http://aipanda027.cern.ch:25080/server/panda'
global baseURLSSL
baseURLSSL = 'https://aipanda027.cern.ch:25443/server/panda'
# set server
def setServer(urls):
global baseURL
baseURL = urls.split(',')[0]
global baseURLSSL
baseURLSSL = urls.split(',')[-1]
# set cache server
def setCacheServer(urls):
global baseURLCSRV
baseURLCSRV = urls.split(',')[0]
global baseURLCSRVSSL
baseURLCSRVSSL = urls.split(',')[-1]
# register proxy key
def registerProxyKey(credname,origin,myproxy,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
curl.verifyHost = True
# execute
url = baseURLSSL + '/registerProxyKey'
data = {'credname': credname,
'origin' : origin,
'myproxy' : myproxy
}
return curl.post(url,data)
# get proxy key
def getProxyKey(verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getProxyKey'
status,output = curl.post(url,{})
if status!=0:
print output
return status,None
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR getProxyKey : %s %s" % (type,value)
return EC_Failed,None
# add site access
def addSiteAccess(siteID,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/addSiteAccess'
data = {'siteID': siteID}
status,output = curl.post(url,data)
if status!=0:
print output
return status,None
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR listSiteAccess : %s %s" % (type,value)
return EC_Failed,None
# list site access
def listSiteAccess(siteID,verbose=False,longFormat=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/listSiteAccess'
data = {}
if siteID != None:
data['siteID'] = siteID
if longFormat:
data['longFormat'] = True
status,output = curl.post(url,data)
if status!=0:
print output
return status,None
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR listSiteAccess : %s %s" % (type,value)
return EC_Failed,None
# update site access
def updateSiteAccess(method,siteid,userName,verbose=False,value=''):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/updateSiteAccess'
data = {'method':method,'siteid':siteid,'userName':userName}
if value != '':
data['attrValue'] = value
status,output = curl.post(url,data)
if status!=0:
print output
return status,None
try:
return status,output
except:
type, value, traceBack = sys.exc_info()
print "ERROR updateSiteAccess : %s %s" % (type,value)
return EC_Failed,None
# site access map
SiteAcessMapForWG = None
# add allowed sites
def addAllowedSites(verbose=False):
# get logger
tmpLog = PLogger.getPandaLogger()
if verbose:
tmpLog.debug('check site access')
# get access list
global SiteAcessMapForWG
SiteAcessMapForWG = {}
tmpStatus,tmpOut = listSiteAccess(None,verbose,True)
if tmpStatus != 0:
return False
global PandaSites
for tmpVal in tmpOut:
tmpID = tmpVal['primKey']
# keep info to map
SiteAcessMapForWG[tmpID] = tmpVal
# set online if the site is allowed
if tmpVal['status']=='approved':
if PandaSites.has_key(tmpID):
if PandaSites[tmpID]['status'] in ['brokeroff']:
PandaSites[tmpID]['status'] = 'online'
if verbose:
tmpLog.debug('set %s online' % tmpID)
return True
# check permission
def checkSiteAccessPermission(siteName,workingGroup,verbose):
# get site access if needed
if SiteAcessMapForWG == None:
ret = addAllowedSites(verbose)
if not ret:
return True
# don't check if site name is undefined
if siteName == None:
return True
# get logger
tmpLog = PLogger.getPandaLogger()
if verbose:
tmpLog.debug('checking site access permission')
tmpLog.debug('site=%s workingGroup=%s map=%s' % (siteName,workingGroup,str(SiteAcessMapForWG)))
# check
if (not SiteAcessMapForWG.has_key(siteName)) or SiteAcessMapForWG[siteName]['status'] != 'approved':
errStr = "You don't have permission to send jobs to %s with workingGroup=%s. " % (siteName,workingGroup)
# allowed member only
if PandaSites[siteName]['accesscontrol'] == 'grouplist':
tmpLog.error(errStr)
return False
else:
# reset workingGroup
if not workingGroup in ['',None]:
errStr += 'Resetting workingGroup to None'
tmpLog.warning(errStr)
return True
elif not workingGroup in ['',None]:
# check workingGroup
wgList = SiteAcessMapForWG[siteName]['workingGroups'].split(',')
if not workingGroup in wgList:
errStr = "Invalid workingGroup=%s. Must be one of %s. " % (workingGroup,str(wgList))
errStr += 'Resetting workingGroup to None'
tmpLog.warning(errStr)
return True
# no problems
return True
# get JobIDs in a time range
def getJobIDsInTimeRange(timeRange,dn=None,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getJobIDsInTimeRange'
data = {'timeRange':timeRange}
if dn != None:
data['dn'] = dn
status,output = curl.post(url,data)
if status!=0:
print output
return status,None
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR getJobIDsInTimeRange : %s %s" % (type,value)
return EC_Failed,None
# get JobIDs and jediTasks in a time range
def getJobIDsJediTasksInTimeRange(timeRange, dn=None, minTaskID=None, verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getJediTasksInTimeRange'
data = {'timeRange': timeRange,
'fullFlag': True}
if dn != None:
data['dn'] = dn
if minTaskID is not None:
data['minTaskID'] = minTaskID
status,output = curl.post(url,data)
if status!=0:
print output
return status, None
try:
jediTaskDicts = pickle.loads(output)
return 0, jediTaskDicts
except:
type, value, traceBack = sys.exc_info()
print "ERROR getJediTasksInTimeRange : %s %s" % (type,value)
return EC_Failed, None
# get details of jedi task
def getJediTaskDetails(taskDict,fullFlag,withTaskInfo,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getJediTaskDetails'
data = {'jediTaskID':taskDict['jediTaskID'],
'fullFlag':fullFlag,
'withTaskInfo':withTaskInfo}
status,output = curl.post(url,data)
if status != 0:
print output
return status,None
try:
tmpDict = pickle.loads(output)
# server error
if tmpDict == {}:
print "ERROR getJediTaskDetails got empty"
return EC_Failed,None
# copy
for tmpKey,tmpVal in tmpDict.iteritems():
taskDict[tmpKey] = tmpVal
return 0,taskDict
except:
errType,errValue = sys.exc_info()[:2]
print "ERROR getJediTaskDetails : %s %s" % (errType,errValue)
return EC_Failed,None
# get PandaIDs for a JobID
def getPandIDsWithJobID(jobID,dn=None,nJobs=0,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getPandIDsWithJobID'
data = {'jobID':jobID, 'nJobs':nJobs}
if dn != None:
data['dn'] = dn
status,output = curl.post(url,data)
if status!=0:
print output
return status,None
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR getPandIDsWithJobID : %s %s" % (type,value)
return EC_Failed,None
# check merge job generation for a JobID
def checkMergeGenerationStatus(jobID,dn=None,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/checkMergeGenerationStatus'
data = {'jobID':jobID}
if dn != None:
data['dn'] = dn
status,output = curl.post(url,data)
if status!=0:
print output
return status,None
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR checkMergeGenerationStatus : %s %s" % (type,value)
return EC_Failed,None
# get full job status
def getFullJobStatus(ids,verbose):
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getFullJobStatus'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR getFullJobStatus : %s %s" % (type,value)
return EC_Failed,None
# get slimmed file info
def getSlimmedFileInfoPandaIDs(ids,verbose):
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getSlimmedFileInfoPandaIDs'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR getSlimmedFileInfoPandaIDs : %s %s" % (type,value)
return EC_Failed,None
# get input files currently in used for analysis
def getFilesInUseForAnal(outDataset,verbose):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getDisInUseForAnal'
data = {'outDataset':outDataset}
status,output = curl.post(url,data)
try:
inputDisList = pickle.loads(output)
# failed
if inputDisList == None:
print "ERROR getFilesInUseForAnal : failed to get shadow dis list from the panda server"
sys.exit(EC_Failed)
# split to small chunks to avoid timeout
retLFNs = []
nDis = 3
iDis = 0
while iDis < len(inputDisList):
# serialize
strInputDisList = pickle.dumps(inputDisList[iDis:iDis+nDis])
# get LFNs
url = baseURLSSL + '/getLFNsInUseForAnal'
data = {'inputDisList':strInputDisList}
status,output = curl.post(url,data)
tmpLFNs = pickle.loads(output)
if tmpLFNs == None:
print "ERROR getFilesInUseForAnal : failed to get LFNs in shadow dis from the panda server"
sys.exit(EC_Failed)
retLFNs += tmpLFNs
iDis += nDis
time.sleep(1)
return retLFNs
except:
type, value, traceBack = sys.exc_info()
print "ERROR getFilesInUseForAnal : %s %s" % (type,value)
sys.exit(EC_Failed)
# set debug mode
def setDebugMode(pandaID,modeOn,verbose):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/setDebugMode'
data = {'pandaID':pandaID,'modeOn':modeOn}
status,output = curl.post(url,data)
try:
return status,output
except:
type, value = sys.exc_info()[:2]
errStr = "setDebugMode failed with %s %s" % (type,value)
return EC_Failed,errStr
# set tmp dir
def setGlobalTmpDir(tmpDir):
global globalTmpDir
globalTmpDir = tmpDir
# exclude site
def excludeSite(excludedSiteList,origFullExecString='',infoList=[]):
if excludedSiteList == []:
return
# decompose
excludedSite = []
for tmpItemList in excludedSiteList:
for tmpItem in tmpItemList.split(','):
if tmpItem != '' and not tmpItem in excludedSite:
excludedSite.append(tmpItem)
# get list of original excludedSites
origExcludedSite = []
if origFullExecString != '':
# extract original excludedSite
origFullExecString = urllib.unquote(origFullExecString)
matchItr = re.finditer('--excludedSite\s*=*([^ "]+)',origFullExecString)
for match in matchItr:
origExcludedSite += match.group(1).split(',')
else:
# use excludedSite since this is the first loop
origExcludedSite = excludedSite
# remove empty
if '' in origExcludedSite:
origExcludedSite.remove('')
# sites composed of long/short queues
compSites = ['CERN','LYON','BNL']
# remove sites
global PandaSites
for tmpPatt in excludedSite:
# skip empty
if tmpPatt == '':
continue
# check if the user sepcified
userSpecified = False
if tmpPatt in origExcludedSite:
userSpecified = True
# check if it is a composite
for tmpComp in compSites:
if tmpComp in tmpPatt:
# use generic ID to remove all queues
tmpPatt = tmpComp
break
sites = PandaSites.keys()
for site in sites:
# look for pattern
if tmpPatt in site:
try:
# add brokerage info
if userSpecified and PandaSites[site]['status'] == 'online' and not isExcudedSite(site):
msgBody = 'action=exclude site=%s reason=useroption - excluded by user' % site
if not msgBody in infoList:
infoList.append(msgBody)
PandaSites[site]['status'] = 'excluded'
else:
# already used by previous submission cycles
PandaSites[site]['status'] = 'panda_excluded'
except:
pass
# use certain sites
def useCertainSites(sitePat):
if re.search(',',sitePat) == None:
return sitePat,[]
# remove sites
global PandaSites
sites = PandaSites.keys()
cloudsForRandom = []
for site in sites:
# look for pattern
useThisSite = False
for tmpPatt in sitePat.split(','):
if tmpPatt in site:
useThisSite = True
break
# delete
if not useThisSite:
PandaSites[site]['status'] = 'skip'
else:
if not PandaSites[site]['cloud'] in cloudsForRandom:
cloudsForRandom.append(PandaSites[site]['cloud'])
# return
return 'AUTO',cloudsForRandom
# get client version
def getPandaClientVer(verbose):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# execute
url = baseURL + '/getPandaClientVer'
status,output = curl.get(url,{})
# failed
if status != 0:
return status,output
# check format
if re.search('^\d+\.\d+\.\d+$',output) == None:
return EC_Failed,"invalid version '%s'" % output
# return
return status,output
# get list of cache prefix
def getCachePrefixes(verbose):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# execute
url = baseURL + '/getCachePrefixes'
status,output = curl.get(url,{})
# failed
if status != 0:
print output
errStr = "cannot get the list of Athena projects"
tmpLog.error(errStr)
sys.exit(EC_Failed)
# return
try:
tmpList = pickle.loads(output)
tmpList.append('AthAnalysisBase')
return tmpList
except:
print output
errType,errValue = sys.exc_info()[:2]
print "ERROR: getCachePrefixes : %s %s" % (errType,errValue)
sys.exit(EC_Failed)
# get list of cmtConfig
def getCmtConfigList(athenaVer,verbose):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# execute
url = baseURL + '/getCmtConfigList'
data = {}
data['relaseVer'] = athenaVer
status,output = curl.get(url,data)
# failed
if status != 0:
print output
errStr = "cannot get the list of cmtconfig for %s" % athenaVer
tmpLog.error(errStr)
sys.exit(EC_Failed)
# return
try:
return pickle.loads(output)
except:
print output
errType,errValue = sys.exc_info()[:2]
print "ERROR: getCmtConfigList : %s %s" % (errType,errValue)
sys.exit(EC_Failed)
# get files in dataset with filte
def getFilesInDatasetWithFilter(inDS,filter,shadowList,inputFileListName,verbose,dsStringFlag=False,isRecursive=False,
antiFilter='',notSkipLog=False):
# get logger
tmpLog = PLogger.getPandaLogger()
# query files in dataset
if not isRecursive or verbose:
tmpLog.info("query files in %s" % inDS)
if dsStringFlag:
inputFileMap,inputDsString = queryFilesInDataset(inDS,verbose,getDsString=True)
else:
inputFileMap = queryFilesInDataset(inDS,verbose)
# read list of files to be used
filesToBeUsed = []
if inputFileListName != '':
rFile = open(inputFileListName)
for line in rFile:
line = re.sub('\n','',line)
line = line.strip()
if line != '':
filesToBeUsed.append(line)
rFile.close()
# get list of filters
filters = []
if filter != '':
filters = filter.split(',')
antifilters = []
if antiFilter != '':
antifilters = antiFilter.split(',')
# remove redundant files
tmpKeys = inputFileMap.keys()
filesPassFilter = []
for tmpLFN in tmpKeys:
# remove log
if not notSkipLog:
if re.search('\.log(\.tgz)*(\.\d+)*$',tmpLFN) != None or \
re.search('\.log(\.\d+)*(\.tgz)*$',tmpLFN) != None:
del inputFileMap[tmpLFN]
continue
# filename matching
if filter != '':
matchFlag = False
for tmpFilter in filters:
if re.search(tmpFilter,tmpLFN) != None:
matchFlag = True
break
if not matchFlag:
del inputFileMap[tmpLFN]
continue
# anti matching
if antiFilter != '':
antiMatchFlag = False
for tmpFilter in antifilters:
if re.search(tmpFilter,tmpLFN) != None:
antiMatchFlag = True
break
if antiMatchFlag:
del inputFileMap[tmpLFN]
continue
# files to be used
if filesToBeUsed != []:
# check matching
matchFlag = False
for pattern in filesToBeUsed:
# normal matching
if pattern == tmpLFN:
matchFlag =True
break
# doesn't match
if not matchFlag:
del inputFileMap[tmpLFN]
continue
# files which pass the matching filters
filesPassFilter.append(tmpLFN)
# files in shadow
if tmpLFN in shadowList:
if inputFileMap.has_key(tmpLFN):
del inputFileMap[tmpLFN]
continue
# no files in filelist are available
if inputFileMap == {} and (filter != '' or antiFilter != '' or inputFileListName != '') and filesPassFilter == []:
if inputFileListName != '':
errStr = "Files specified in %s are unavailable in %s. " % (inputFileListName,inDS)
elif filter != '':
errStr = "Files matching with %s are unavailable in %s. " % (filters,inDS)
else:
errStr = "Files unmatching with %s are unavailable in %s. " % (antifilters,inDS)
errStr += "Make sure that you specify correct file names or matching patterns"
tmpLog.error(errStr)
sys.exit(EC_Failed)
# return
if dsStringFlag:
return inputFileMap,inputDsString
return inputFileMap
# check if DQ2-free site
def isDQ2free(site):
if PandaSites.has_key(site) and PandaSites[site]['ddm'] == 'local':
return True
return False
# check queued analysis jobs at a site
def checkQueuedAnalJobs(site,verbose=False):
# get logger
tmpLog = PLogger.getPandaLogger()
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getQueuedAnalJobs'
data = {'site':site}
status,output = curl.post(url,data)
try:
# get queued analysis
queuedMap = pickle.loads(output)
if queuedMap.has_key('running') and queuedMap.has_key('queued'):
if queuedMap['running'] > 20 and queuedMap['queued'] > 2 * queuedMap['running']:
warStr = 'Your job might be delayed since %s is busy. ' % site
warStr += 'There are %s jobs already queued by other users while %s jobs are running. ' \
% (queuedMap['queued'],queuedMap['running'])
warStr += 'Please consider replicating the input dataset to a free site '
warStr += 'or avoiding the --site/--cloud option so that the brokerage will '
warStr += 'find a free site'
tmpLog.warning(warStr)
except:
type, value, traceBack = sys.exc_info()
tmpLog.error("checkQueuedAnalJobs %s %s" % (type,value))
# request EventPicking
def requestEventPicking(eventPickEvtList,eventPickDataType,eventPickStreamName,
eventPickDS,eventPickAmiTag,fileList,fileListName,outDS,
lockedBy,params,eventPickNumSites,eventPickWithGUID,ei_api,
verbose=False):
# get logger
tmpLog = PLogger.getPandaLogger()
# list of input files
strInput = ''
for tmpInput in fileList:
if tmpInput != '':
strInput += '%s,' % tmpInput
if fileListName != '':
for tmpLine in open(fileListName):
tmpInput = re.sub('\n','',tmpLine)
if tmpInput != '':
strInput += '%s,' % tmpInput
strInput = strInput[:-1]
# make dataset name
userDatasetName = '%s.%s.%s/' % tuple(outDS.split('.')[:2]+[MiscUtils.wrappedUuidGen()])
# open run/event number list
evpFile = open(eventPickEvtList)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/putEventPickingRequest'
data = {'runEventList' : evpFile.read(),
'eventPickDataType' : eventPickDataType,
'eventPickStreamName' : eventPickStreamName,
'eventPickDS' : eventPickDS,
'eventPickAmiTag' : eventPickAmiTag,
'userDatasetName' : userDatasetName,
'lockedBy' : lockedBy,
'giveGUID' : eventPickWithGUID,
'params' : params,
'inputFileList' : strInput,
}
if eventPickNumSites > 1:
data['eventPickNumSites'] = eventPickNumSites
if ei_api:
data['ei_api'] = ei_api
evpFile.close()
status,output = curl.post(url,data)
# failed
if status != 0 or output != True:
print output
errStr = "failed to request EventPicking"
tmpLog.error(errStr)
sys.exit(EC_Failed)
# return user dataset name
return True,userDatasetName
# check if enough sites have DBR
def checkEnoughSitesHaveDBR(dq2IDs):
# collect sites correspond to DQ2 IDs
sitesWithDBR = []
for tmpDQ2ID in dq2IDs:
tmpPandaSiteList = convertDQ2toPandaIDList(tmpDQ2ID)
for tmpPandaSite in tmpPandaSiteList:
if PandaSites.has_key(tmpPandaSite) and PandaSites[tmpPandaSite]['status'] == 'online':
if isExcudedSite(tmpPandaSite):
continue
sitesWithDBR.append(tmpPandaSite)
# count the number of online sites with DBR
nOnline = 0
nOnlineWithDBR = 0
nOnlineT1 = 0
nOnlineT1WithDBR = 0
for tmpPandaSite,tmpSiteStat in PandaSites.iteritems():
if tmpSiteStat['status'] == 'online':
# exclude test,long,local
if isExcudedSite(tmpPandaSite):
continue
# DQ2 free
if tmpSiteStat['ddm'] == 'local':
continue
nOnline += 1
if tmpPandaSite in PandaTier1Sites:
nOnlineT1 += 1
if tmpPandaSite in sitesWithDBR or tmpSiteStat['iscvmfs'] == True:
nOnlineWithDBR += 1
# DBR at enough T1 DISKs is used
if tmpPandaSite in PandaTier1Sites and tmpPandaSite in sitesWithDBR:
nOnlineT1WithDBR += 1
# enough replicas
if nOnlineWithDBR < 10:
return False
# threshold 90%
if float(nOnlineWithDBR) < 0.9 * float(nOnline):
return False
# not all T1s have the DBR
if nOnlineT1 != nOnlineT1WithDBR:
return False
# all OK
return True
# get latest DBRelease
def getLatestDBRelease(verbose=False):
# get logger
tmpLog = PLogger.getPandaLogger()
tmpLog.info('trying to get the latest version number for DBRelease=LATEST')
# get ddo datasets
ddoDatasets = getDatasets('ddo.*',verbose,True,onlyNames=True)
if ddoDatasets == {}:
tmpLog.error('failed to get a list of DBRelease datasets from DQ2')
sys.exit(EC_Failed)
# reverse sort to avoid redundant lookup
ddoDatasets = ddoDatasets.keys()
ddoDatasets.sort()
ddoDatasets.reverse()
# extract version number
latestVerMajor = 0
latestVerMinor = 0
latestVerBuild = 0
latestVerRev = 0
latestDBR = ''
for tmpName in ddoDatasets:
# ignore CDRelease
if ".CDRelease." in tmpName:
continue
# ignore user
if tmpName.startswith('ddo.user'):
continue
# use Atlas.Ideal
if not ".Atlas.Ideal." in tmpName:
continue
match = re.search('\.v(\d+)(_*[^\.]*)$',tmpName)
if match == None:
tmpLog.warning('cannot extract version number from %s' % tmpName)
continue
# ignore special DBRs
if match.group(2) != '':
continue
# get major,minor,build,revision numbers
tmpVerStr = match.group(1)
tmpVerMajor = 0
tmpVerMinor = 0
tmpVerBuild = 0
tmpVerRev = 0
try:
tmpVerMajor = int(tmpVerStr[0:2])
except:
pass
try:
tmpVerMinor = int(tmpVerStr[2:4])
except:
pass
try:
tmpVerBuild = int(tmpVerStr[4:6])
except:
pass
try:
tmpVerRev = int(tmpVerStr[6:])
except:
pass
# compare
if latestVerMajor > tmpVerMajor:
continue
elif latestVerMajor == tmpVerMajor:
if latestVerMinor > tmpVerMinor:
continue
elif latestVerMinor == tmpVerMinor:
if latestVerBuild > tmpVerBuild:
continue
elif latestVerBuild == tmpVerBuild:
if latestVerRev > tmpVerRev:
continue
# check replica locations to use well distributed DBRelease. i.e. to avoid DBR just created
tmpLocations = getLocations(tmpName,[],'',False,verbose,getDQ2IDs=True)
if not checkEnoughSitesHaveDBR(tmpLocations):
continue
# check contents to exclude reprocessing DBR
tmpDbrFileMap = queryFilesInDataset(tmpName,verbose)
if len(tmpDbrFileMap) != 1 or not tmpDbrFileMap.keys()[0].startswith('DBRelease'):
continue
# higher or equal version
latestVerMajor = tmpVerMajor
latestVerMinor = tmpVerMinor
latestVerBuild = tmpVerBuild
latestVerRev = tmpVerRev
latestDBR = tmpName
# failed
if latestDBR == '':
tmpLog.error('failed to get the latest version of DBRelease dataset from DQ2')
sys.exit(EC_Failed)
# get DBRelease file name
tmpList = queryFilesInDataset(latestDBR,verbose)
if len(tmpList) == 0:
tmpLog.error('DBRelease=%s is empty' % latestDBR)
sys.exit(EC_Failed)
# retrun dataset:file
retVal = '%s:%s' % (latestDBR,tmpList.keys()[0])
tmpLog.info('use %s' % retVal)
return retVal
# get inconsistent datasets which are complete in DQ2 but not in LFC
def getInconsistentDS(missList,newUsedDsList):
if missList == [] or newUsedDsList == []:
return []
inconDSs = []
# loop over all datasets
for tmpDS in newUsedDsList:
# escape
if missList == []:
break
# get file list
tmpList = queryFilesInDataset(tmpDS)
newMissList = []
# look for missing files
for tmpFile in missList:
if tmpList.has_key(tmpFile):
# append
if not tmpDS in inconDSs:
inconDSs.append(tmpDS)
else:
# keep as missing
newMissList.append(tmpFile)
# use new missing list for the next dataset
missList = newMissList
# return
return inconDSs
# submit task
def insertTaskParams(taskParams,verbose,properErrorCode=False):
"""Insert task parameters
args:
taskParams: a dictionary of task parameters
returns:
status code
0: communication succeeded to the panda server
255: communication failure
tuple of return code and message from the server
0: request is processed
1: duplication in DEFT
2: duplication in JEDI
3: accepted for incremental execution
4: server error
"""
# serialize
taskParamsStr = json.dumps(taskParams)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/insertTaskParams'
data = {'taskParams':taskParamsStr,
'properErrorCode':properErrorCode}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR insertTaskParams : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# get history of job retry
def getRetryHistory(jediTaskID,verbose=False):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getRetryHistory'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR getRetryHistory : %s %s" % (type,value)
return EC_Failed,None
# get PanDA IDs with TaskID
def getPandaIDsWithTaskID(jediTaskID,verbose=False):
"""Get PanDA IDs with TaskID
args:
jediTaskID: jediTaskID of the task to get lit of PanDA IDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of PanDA IDs
"""
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# execute
url = baseURL + '/getPandaIDsWithTaskID'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getPandaIDsWithTaskID : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# reactivate task
def reactivateTask(jediTaskID,verbose=False):
"""Reactivate task
args:
jediTaskID: jediTaskID of the task to be reactivated
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tupple of return code and message
0: unknown task
1: succeeded
None: database error
"""
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/reactivateTask'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
errtype,errvalue = sys.exc_info()[:2]
errStr = "ERROR reactivateTask : %s %s" % (errtype,errvalue)
return EC_Failed,output+'\n'+errStr
# get task status TaskID
def getTaskStatus(jediTaskID,verbose=False):
"""Get task status
args:
jediTaskID: jediTaskID of the task to get lit of PanDA IDs
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the status string
"""
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# execute
url = baseURL + '/getTaskStatus'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getTaskStatus : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get taskParamsMap with TaskID
def getTaskParamsMap(jediTaskID):
"""Get task status
args:
jediTaskID: jediTaskID of the task to get taskParamsMap
returns:
status code
0: communication succeeded to the panda server
255: communication failure
return: a tuple of return code and taskParamsMap
1: logical error
0: success
None: database error
"""
# instantiate curl
curl = _Curl()
# execute
url = baseURL + '/getTaskParamsMap'
data = {'jediTaskID':jediTaskID}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
errStr = "ERROR getTaskParamsMap : %s %s" % (type,value)
print errStr
return EC_Failed,output+'\n'+errStr
# get T1 sites
def getTier1sites():
global PandaTier1Sites
PandaTier1Sites = []
# FIXME : will be simplified once schedconfig has a tier field
for tmpCloud,tmpCloudVal in PandaClouds.iteritems():
for tmpDQ2ID in tmpCloudVal['tier1SE']:
# ignore NIKHEF
if tmpDQ2ID.startswith('NIKHEF'):
continue
# convert DQ2 ID to Panda Sites
tmpPandaSites = convertDQ2toPandaIDList(tmpDQ2ID)
for tmpPandaSite in tmpPandaSites:
if not tmpPandaSite in PandaTier1Sites:
PandaTier1Sites.append(tmpPandaSite)
# set X509_CERT_DIR
if os.environ.has_key('PANDA_DEBUG'):
print "DEBUG : setting X509_CERT_DIR"
if not os.environ.has_key('X509_CERT_DIR') or os.environ['X509_CERT_DIR'] == '':
tmp_x509_CApath = _x509_CApath()
if tmp_x509_CApath != '':
os.environ['X509_CERT_DIR'] = tmp_x509_CApath
else:
os.environ['X509_CERT_DIR'] = '/etc/grid-security/certificates'
if os.environ.has_key('PANDA_DEBUG'):
print "DEBUG : imported %s" % __name__
``` |
{
"source": "jonbwhite/django-timestampable",
"score": 2
} |
#### File: timestamps/drf/mixins.py
```python
import aspectlib
from django.conf import settings
from rest_framework import status, mixins
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.response import Response
from timestamps.drf.utils import is_hard_delete_request
class ListDeletedModelMixin:
def list_deleted(self, request, *args, **kwargs):
return ListModelMixin.list(self, request, *args, **kwargs)
class ListWithDeletedModelMixin:
def list_with_deleted(self, request, *args, **kwargs):
return ListModelMixin.list(self, request, *args, **kwargs)
class RetrieveDeletedModelMixin:
def retrieve_deleted(self, request, *args, **kwargs):
return RetrieveModelMixin.retrieve(self, request, *args, **kwargs)
class RetrieveWithDeletedModelMixin:
def retrieve_with_deleted(self, request, *args, **kwargs):
return RetrieveModelMixin.retrieve(self, request, *args, **kwargs)
class RestoreModelMixin:
def restore(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_restore(instance)
return Response(self.get_serializer(instance=instance).data)
def perform_restore(self, instance):
return instance.restore()
class BulkRestoreModelMixin:
def bulk_restore(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
count = self.perform_bulk_restore(queryset)
if getattr(settings, 'TIMESTAMPS__BULK_RESPONSE_CONTENT', False):
return Response(data={'count': count, }, status=status.HTTP_200_OK)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_bulk_restore(self, qs):
return qs.restore()
class DestroyModelMixin(mixins.DestroyModelMixin):
def perform_destroy(self, instance):
return instance.delete(hard=is_hard_delete_request(self))
class BulkDestroyModelMixin:
def perform_bulk_destroy(self, qs):
return qs.delete(hard=is_hard_delete_request(self))
def bulk_destroy(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
count = self.perform_bulk_destroy(queryset)
if not getattr(settings, 'TIMESTAMPS__BULK_RESPONSE_CONTENT', False):
return Response(status=status.HTTP_204_NO_CONTENT)
# a delete operation (hard delete) returns a tuple of:
# - total rows deleted (count)
# - total rows deleted per table (per_model)
if isinstance(count, tuple):
count, count_per_model = count
return Response(data={'count': count, 'count_per_model': count_per_model, }, status=status.HTTP_200_OK)
return Response(data={'count': count}, status=status.HTTP_200_OK)
def __remove_clause_deleted_at(queryset):
from timestamps.querysets import SoftDeleteQuerySet
from django.db.models.lookups import IsNull
if not isinstance(queryset, SoftDeleteQuerySet):
return queryset
queryset = queryset.all() # clone
where = queryset.query.where
for i, child in enumerate(where.children):
if isinstance(child, IsNull) and child.lhs.field.name == 'deleted_at':
where.children.pop(i)
break
return queryset
# Using Aspect-Oriented Programming (AOP)
# to change behavior of GenericAPIView.get_queryset(self).
# Doing this way, there is no need to pollute CoreModelViewSet
# and gives to developers the possibility
# to use only a subset of Mixins of soft deleting technology,
# without the need to use all the views mixins, extended in CoreModelViewSet.
@aspectlib.Aspect
def __get_queryset(*args, **kwargs):
queryset = yield aspectlib.Proceed
view = args[0]
mixin = {
'list_with_deleted': ListWithDeletedModelMixin,
'retrieve_with_deleted': RetrieveWithDeletedModelMixin,
}
if is_hard_delete_request(view):
mixin['destroy'] = DestroyModelMixin
mixin['bulk_destroy'] = BulkDestroyModelMixin
mixin = mixin.get(view.action, None)
if mixin and isinstance(view, mixin):
queryset = __remove_clause_deleted_at(queryset)
yield aspectlib.Return(queryset)
mixin = {
'list_deleted': ListDeletedModelMixin,
'retrieve_deleted': RetrieveDeletedModelMixin,
'restore': RestoreModelMixin,
'bulk_restore': BulkRestoreModelMixin,
}.get(view.action, None)
if mixin and isinstance(view, mixin):
queryset = __remove_clause_deleted_at(queryset)
yield aspectlib.Return(queryset.only_deleted())
yield aspectlib.Return(queryset)
aspectlib.weave(target=GenericAPIView.get_queryset, aspects=__get_queryset)
```
#### File: timestamps/drf/utils.py
```python
from rest_framework.serializers import BooleanField
from rest_framework.views import View
from .permissions import can_hard_delete
def is_hard_delete_request(view: View) -> bool:
permanent = view.request.query_params.get('permanent')
is_hard_delete = BooleanField(required=False, allow_null=True).run_validation(permanent)
if is_hard_delete:
can_hard_delete(view)
return is_hard_delete
```
#### File: django-timestampable/timestamps/querysets.py
```python
from django.db.models import QuerySet
from django.utils import timezone
class SoftDeleteQuerySet(QuerySet):
def only_deleted(self):
return self.filter(deleted_at__isnull=False)
def without_deleted(self):
return self.filter(deleted_at__isnull=True)
# bulk deleting
def delete(self, hard: bool = False):
if hard:
return super(SoftDeleteQuerySet, self).delete()
return super(SoftDeleteQuerySet, self).update(deleted_at=timezone.now())
# bulk restore
def restore(self):
return super(SoftDeleteQuerySet, self).update(deleted_at=None)
``` |
{
"source": "joncalhoun/cmd",
"score": 3
} |
#### File: joncalhoun/cmd/cmd.py
```python
import subprocess
def must(cmd, stdin=None):
out, err = run(cmd, stdin)
if err:
raise err
return out
def run(cmd, stdin=None):
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# out, err = p.communicate()
if stdin:
out, err = p.communicate(input=stdin.encode())
else:
out, err = p.communicate()
out = out.decode("utf-8")
if err:
msg = "Error while running an external command.\nCommand: %s\nError: %s" % (" ".join(cmd), err.decode("utf-8"))
return out, Error(msg)
return out, None
class Error(Exception):
"""cmd errors are always of this type"""
pass
``` |
{
"source": "joncalhoun/gocode",
"score": 3
} |
#### File: gocode/tests/test_gocode.py
```python
import unittest
import gocode
def info(func, input):
return "%s(%s)" % (func, input)
class TestGocode(unittest.TestCase):
def test_is_named(self):
def false(input):
self.assertFalse(gocode.is_named(input), info("is_named", input))
def true(input):
self.assertTrue(gocode.is_named(input), info("is_named", input))
false("error")
false("a")
false("chan int")
false("func(a, b int) error")
true("a int")
true("err error")
true("c chan int")
true("f func(a, b int) error")
def test_parse_param_parts(self):
def test(input, exp):
res = gocode.parse_param_parts(input)
self.assertEqual(res, exp, info("parse_param_parts", input))
test("error", ["error"])
test("(int, error)", ["int", "error"])
test("(a, b int, err error)", ["a", "b int", "err error"])
test("(a int, f func(c, d int) error, err error)", ["a int", "f func(c, d int) error", "err error"])
def test_parse_params(self):
def test(input, exp):
res = gocode.parse_params(input)
self.assertEqual(res, exp, info("parse_params", input))
test("error", [
(None, "error")
])
test("(int, error)", [
(None, "int"),
(None, "error")
])
test("(a, b int, err error)", [
("a", "int"),
("b", "int"),
("err", "error")
])
test("(a int, f func(c, d int) error, err error)", [
("a", "int"),
("f", "func(c, d int) error"),
("err", "error")
])
test("(a, b func(c, d int) error)", [
("a", "func(c, d int) error"),
("b", "func(c, d int) error")
])
test("(a int, b []chan int)", [
("a", "int"),
("b", "[]chan int")
])
test("(int, chan int)", [
(None, "int"),
(None, "chan int")
])
def test_parse_func(self):
def test(input, exp):
res = gocode.parse_func(input)
self.assertEqual(res, exp, info("parse_func", input))
test("func(a int) error", (
[("a", "int")],
[(None, "error")]
))
test("func(a, b int, c chan int, d func(e error) error) (int, error)", (
[
("a", "int"),
("b", "int"),
("c", "chan int"),
("d", "func(e error) error")
], [
(None, "int"),
(None, "error")
]
))
test("func(a int) (num int, den int)", (
[
("a", "int")
], [
("num", "int"),
("den", "int")
]
))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joncampbell123/a-pile-of-documentation",
"score": 3
} |
#### File: joncampbell123/a-pile-of-documentation/book_reader_module.py
```python
import os
import glob
import json
import pathlib
import common_json_help_module
# dictionary: book id -> book object
book_by_id = { }
# book object
class Book:
id = None
url = None
type = None
title = None
author = None
publisher = None
copyright_year = None
copyright_by = None
hierarchy = [ ]
hierarchy_root = None
hierarchy_search = None
base_json = None
def serialize_to_compiled_json(self):
f = { }
if not self.url == None:
f["url"] = self.url
if not self.type == None:
f["type"] = self.type
if not self.title == None:
f["title"] = self.title
if not self.author == None:
f["author"] = self.author
if not self.publisher == None:
f["publisher"] = self.publisher
f["copyright"] = { }
if not self.copyright_year == None:
f["copyright"]["year"] = self.copyright_year
if not self.copyright_by == None:
f["copyright"]["by"] = self.copyright_by
if not f["copyright"]:
del f["copyright"]
if not self.hierarchy == None:
if len(self.hierarchy) > 0:
f["hierarchy"] = self.hierarchy
if not self.hierarchy_search == None:
if self.hierarchy_search:
f["hierarchy search"] = self.hierarchy_search
return f;
def add_sectionobj(self,obj,json):
if not json == None:
if isinstance(json, dict):
for name in json.keys():
if not name in obj:
obj[name] = [ ]
obj[name].append(json[name])
def __init__(self,json,file_path):
self.base_json = json
file_path = pathlib.Path(file_path)
if len(file_path.parts) > 0:
self.id = file_path.parts[len(file_path.parts)-1]
if self.id[-5:] == ".json":
self.id = self.id[:-5]
if self.id == None:
raise Exception("Book with unknown id given path "+str(file_path))
if not self.id == json.get("id"):
raise Exception("Book id does not match file "+str(file_path))
self.url = json.get("url")
self.type = json.get("type")
self.title = json.get("title")
self.author = json.get("author")
self.publisher = json.get("publisher")
if "copyright" in json:
obj = json["copyright"]
self.copyright_year = obj.get("year")
self.copyright_by = obj.get("by")
#
self.hierarchy_search = { } # FIXME: Why does initializing as { } in the class fail to reinit, but assiging here makes it work?
if "hierarchy" in json:
if isinstance(json["hierarchy"], list):
self.hierarchy = json["hierarchy"]
if len(self.hierarchy) > 0:
self.hierarchy_root = json[self.hierarchy[0]] # blow up if it does not exist
search = [ self.hierarchy_root ]
newsearch = None
prev_what = None
for what in self.hierarchy:
if what in self.hierarchy_search:
raise Exception("Hierarchy name specified more than once: "+what)
self.hierarchy_search[what] = [ ]
#
if not newsearch == None:
for i in range(len(newsearch)):
hobj = newsearch[i]
if isinstance(hobj, dict):
if what in hobj:
parent_name = hobj["name lookup"]
newsearch[i] = hobj[what]
del hobj[what]
for hobjname in newsearch[i]:
newsearch[i][hobjname]["parent lookup"] = { "name": parent_name, "type": prev_what }
continue
#
newsearch[i] = { }
#
search = newsearch
#
newsearch = [ ]
for searchobj in search:
for hobjname in searchobj:
hobj = searchobj[hobjname]
hobj["name lookup"] = hobjname
self.hierarchy_search[what].append(hobj)
newsearch.append(hobj)
#
prev_what = what
def load_books():
g = glob.glob("sources/*.json",recursive=True)
for path in g:
json = common_json_help_module.load_json(path)
book = Book(json,path)
if not book.id == None:
if book.id in book_by_id:
raise Exception("Book "+book.id+" already defined")
book_by_id[book.id] = book
```
#### File: joncampbell123/a-pile-of-documentation/common_json_help_module.py
```python
import os
import glob
import json
import pathlib
def load_json(path):
f = open(path,"r",encoding='utf-8')
j = json.load(f)
f.close()
return j
```
#### File: joncampbell123/a-pile-of-documentation/gen-reference.py
```python
import os
import re
import glob
import json
import zlib
import math
import struct
import pathlib
import ttf_module
import pdf_module
import html_module
import common_json_help_module
import table_presentation_module
def text_tchr_classify(c):
if c == '\n' or c == '\t' or c == ' ':
return "w"
return "c"
def text_split_text(text):
e = [ ]
w = ""
cls = None
for c in text:
ncls = text_tchr_classify(c)
if not cls == ncls or c == '\n':
cls = ncls
if not w == "":
e.append(w)
w = ""
#
w = w + c
if not w == "":
e.append(w)
return e
def emit_table_as_text(f,table_id,tp):
f.write(("="*len(tp.display.header))+"\n")
f.write(tp.display.header+"\n")
f.write(("="*len(tp.display.header))+"\n")
f.write("\n")
#
if not tp.description == None:
f.write(tp.description+"\n")
f.write("\n")
#
if not tp.display.disptable == None:
desci = 0
for ci in range(len(tp.display.colsiz)):
if not tp.display.colhdr[ci] == None and not tp.display.coldesc[ci] == None:
x = tp.display.colhdr[ci] + ": " + tp.display.coldesc[ci]
f.write(" - "+x+"\n")
desci = desci + 1
if desci > 0:
f.write("\n")
#
for ci in range(len(tp.display.colsiz)):
if ci > 0:
f.write(" | ")
x = ""
if not tp.display.colhdr[ci] == None:
x = tp.display.colhdr[ci]
if not ci == len(tp.display.colsiz)-1:
x = (x + (" "*tp.display.colsiz[ci]))[0:tp.display.colsiz[ci]]
f.write(x)
f.write("\n")
#
for ci in range(len(tp.display.colsiz)):
if ci > 0:
f.write("=|=")
x = "="*tp.display.colsiz[ci]
f.write(x)
f.write("\n")
#
if len(tp.display.disptable) > 0:
for row in tp.display.disptable:
columns = row.get("columns")
if not columns == None:
# NTS: array of arrays, because column values can have newlines
coltext = [ ]
collines = 1
# first pass: grab each column value, split by newline, stuff into coltext
for coli in range(len(columns)):
col = columns[coli]
val = col.get("value")
if val == None:
val = ""
#
vallines = val.split('\n')
coltext.append(vallines)
# how many vertical lines will this column need?
# to render correctly, all columns will be printed with this many vertical lines.
if collines < len(vallines):
collines = len(vallines)
# second pass: draw the columns, multiple lines if needed
for collc in range(collines):
show_sources = False
if collc == 0 and row.get("same key") == True:
show_sources = True
#
for coli in range(len(columns)):
if coli > 0:
f.write(" | ")
#
val = ""
cola = coltext[coli]
if collc < len(cola):
val = cola[collc]
if val == None:
val = ""
#
x = (val + (" "*tp.display.colsiz[coli]))[0:tp.display.colsiz[coli]]
#
if not show_sources == True and coli == len(columns) - 1:
x = x.rstrip()
#
f.write(x)
#
if show_sources == True:
sia = row.get("source index")
if not sia == None:
for si in sia:
f.write(" [*"+str(si)+"]")
#
f.write("\n")
# Problem: If any column has multiple lines, the per-line text format becomes confusing, and lines are needed to visually separate them
if tp.display.columns_have_newlines == True:
for ci in range(len(tp.display.colsiz)):
if ci > 0:
f.write("-+-")
x = "-"*tp.display.colsiz[ci]
f.write(x)
f.write("\n")
#
f.write("\n")
#
if not tp.sources == None:
f.write("Sources\n")
f.write("-------\n")
for sii in range(len(tp.sources)):
sobj = tp.sources[sii]
if not int(sobj.get("source index")) == sii:
raise Exception("source index is wrong")
head = " [*"+str(sii)+"] "
f.write(head)
if "book" in sobj:
book = sobj["book"]
elif "website" in sobj:
book = sobj["website"]
else:
book = None
if not book == None:
where = sobj.get("where")
citation = sobj.get("citation")
if not citation == None:
x = ""
title = citation.get("title")
if not title == None:
if not x == "":
x = x + ", "
x = x + title
author = citation.get("author")
if not author == None:
if not x == "":
x = x + ", "
x = x + author
publisher = citation.get("publisher")
if not publisher == None:
if not x == "":
x = x + ", "
x = x + publisher
year = citation.get("year")
if not year == None:
if not x == "":
x = x + ", "
x = x + str(year)
if not x == "":
f.write(x+"\n")
#
url = citation.get("url")
if not url == None:
f.write(" "*len(head))
f.write("URL: "+url+"\n")
if not where == None:
x = ""
for whi in where:
y = ""
if "path" in whi:
if not y == "":
y = y + ", "
y = y + whi["path"]
if "title" in whi:
if not y == "":
y = y + ", "
y = y + whi["title"]
if not y == "":
if not x == "":
x = x + " => "
x = x + y
if not x == "":
f.write(" "*len(head))
f.write(x+"\n")
#
f.write("\n")
#
if not tp.notes == None and len(tp.notes) > 0:
f.write("Notes\n")
f.write("-----\n")
yline = 0
for note in tp.notes:
# If the previous note was multiple line, add an additional line
if yline > 0:
f.write("\n")
#
words = text_split_text(note)
count = 0
xstart = 3
xend = 132
yline = 0
x = xstart
for word in words:
if len(word) == 0:
continue
# collapse spaces
if word[0] == ' ' or word[0] == "\t":
word = " "
# print and count it
if word == "\n" or (x+len(word)) > xend:
yline = yline + 1
f.write("\n")
x = xstart
if not word == "\n":
if x == xstart and (word[0] == " " or word[0] == "\t"):
continue
if x == xstart:
if yline == 0:
f.write(" * ")
else:
f.write(" ")
f.write(word)
x = x + len(word)
#
if x > xstart:
f.write("\n")
f.write("\n")
#
f.write("\n\n")
def html_out_begin(f):
f.write("<head>")
f.write("<meta charset=\"utf-8\">")
f.write("<title>Tables</title>")
f.write("</head>")
f.write("<body>");
def html_out_end(f):
f.write("</body>");
f.write("</html>")
def emit_table_as_html(f,table_id,tp):
#
title = tp.display.header
#
f.write("<h2><span style=\"border-bottom: double;\">"+html_module.html_escape(title)+"</span></h2>")
#
if not tp.description == None:
f.write("<p>"+html_module.html_escape(tp.description)+"</p>")
#
if not tp.display.disptable == None:
desci = 0
for ci in range(len(tp.display.colsiz)):
if not tp.display.colhdr[ci] == None and not tp.display.coldesc[ci] == None:
if desci == 0:
f.write("<table>")
f.write("<tr style=\"font-size: 0.9em;\">")
f.write("<td style=\"font-weight: 700; padding-right: 1em; white-space: pre; text-align: left;\">"+html_module.html_escape(tp.display.colhdr[ci])+":</td>")
f.write("<td style=\"text-align: left;\">"+html_module.html_escape(tp.display.coldesc[ci])+"</td>")
f.write("</tr>")
desci = desci + 1
if desci > 0:
f.write("</table><br>")
#
f.write("<table style=\"border: 1px solid black; border-spacing: 0px;\">")
#
f.write("<tr style=\"background-color: rgb(224,224,224);\">")
for ci in range(len(tp.display.colsiz)):
x = ""
if not tp.display.colhdr[ci] == None:
x = tp.display.colhdr[ci]
#
if ci == len(tp.display.colsiz)-1:
style = ""
else:
style = "border-right: 1px solid black;"
#
style = style + " padding: 0.2em; padding-right: 1em;"
style = style + " border-bottom: 1px solid black; font-size: 0.9em; text-align: left;"
#
f.write("<th style=\""+style.strip()+"\">"+html_module.html_escape(x)+"</th>")
f.write("</tr>")
#
if len(tp.display.disptable) > 0:
for rowidx in range(len(tp.display.disptable)):
row = tp.display.disptable[rowidx]
columns = row.get("columns")
if not columns == None:
f.write("<tr valign=\"top\">")
show_sources = False
# HTML can handle multi-line just fine for us
for coli in range(len(columns)):
#
col = columns[coli]
val = col.get("value")
if val == None:
val = ""
#
if row.get("same key") == True:
show_sources = True
#
f.write("<td style=\"")
f.write("white-space: pre; padding: 0.2em; padding-right: 1em; font-size: 0.9em; text-align: left;")
if not coli == len(columns)-1:
f.write(" border-right: 1px solid black;")
if not rowidx == len(tp.display.disptable)-1:
f.write(" border-bottom: 1px solid black;")
f.write("\">")
#
f.write(html_module.html_escape(val))
#
if coli == len(columns)-1 and show_sources == True:
sia = row.get("source index")
if not sia == None:
for si in sia:
f.write(" <a style=\"text-decoration: none;\" href=\"#"+table_id+"_source_"+str(si)+"\"><span style=\"font-size: 0.6em; color: rgb(0,0,192); position: relative; top: -0.5em;\"><i>[*"+str(si)+"]</i></span></a>")
#
f.write("</td>")
#
f.write("</tr>")
#
f.write("\n")
#
f.write("</table>")
#
if not tp.sources == None:
f.write("<p><span style=\"border-bottom: solid; border-bottom-width: thin;\">Sources</span></p>\n")
f.write("<table style=\"font-size: 0.8em;\">")
for sii in range(len(tp.sources)):
f.write("<tr valign=\"top\">")
sobj = tp.sources[sii]
if not int(sobj.get("source index")) == sii:
raise Exception("source index is wrong")
#
f.write("<td style=\"padding-left: 2em; padding-bottom: 1em;\" id=\""+table_id+"_source_"+str(sii)+"\"><sup style=\"color: rgb(0,0,192);\"><i>[*"+str(sii)+"]</i></sup></td>")
#
if "book" in sobj:
book = sobj["book"]
elif "website" in sobj:
book = sobj["website"]
else:
book = None
f.write("<td style=\"padding-bottom: 1em;\">")
if not book == None:
where = sobj.get("where")
citation = sobj.get("citation")
if not citation == None:
x = ""
title = citation.get("title")
if not title == None:
if not x == "":
x = x + ", "
x = x + title
author = citation.get("author")
if not author == None:
if not x == "":
x = x + ", "
x = x + author
publisher = citation.get("publisher")
if not publisher == None:
if not x == "":
x = x + ", "
x = x + publisher
year = citation.get("year")
if not year == None:
if not x == "":
x = x + ", "
x = x + str(year)
if not x == "":
f.write(html_module.html_escape(x)+"<br>")
#
url = citation.get("url")
if not url == None:
f.write("URL: <a href=\""+url+"\" target=\"_blank\">"+html_module.html_escape(url)+"</a><br>")
if not where == None:
x = ""
for whi in where:
y = ""
if "path" in whi:
if not y == "":
y = y + ", "
y = y + whi["path"]
if "title" in whi:
if not y == "":
y = y + ", "
y = y + whi["title"]
if not y == "":
if not x == "":
x = x + " => "
x = x + y
if not x == "":
f.write(html_module.html_escape(x)+"<br>")
f.write("</td>")
#
f.write("</tr>")
#
f.write("</table>")
#
if not tp.notes == None and len(tp.notes) > 0:
f.write("<p><span style=\"border-bottom: solid; border-bottom-width: thin;\">Notes</span></p>")
f.write("<ul style=\"font-size: 0.8em; white-space: pre-line;\">")
for note in tp.notes:
f.write("<li>"+html_module.html_escape(note)+"</li>")
f.write("</ul>")
#
f.write("<br><br>")
class XY:
x = None
y = None
def __init__(self,x=0,y=0):
if type(x) == XY:
self.x = x.x
self.y = x.y
else:
self.x = x
self.y = y
def __str__(self):
return "["+str(self.x)+","+str(self.y)+"]"
def __sub__(self,other):
return XY(self.x-other.x,self.y-other.y)
def __add__(self,other):
return XY(self.x+other.x,self.y+other.y)
class WH:
w = None
h = None
def __init__(self,w=0,h=0):
if type(w) == WH:
self.w = w.w
self.h = w.h
else:
self.w = w
self.h = h
def __str__(self):
return "["+str(self.w)+"x"+str(self.h)+"]"
def __sub__(self,other):
return XY(self.w-other.w,self.h-other.h)
def __add__(self,other):
return XY(self.w+other.w,self.h+other.h)
class RectRegion:
xy = None
wh = None
def __init__(self,xy=None,wh=None):
if type(xy) == RectRegion:
self.xy = XY(xy.xy)
self.wh = WH(xy.wh)
else:
self.xy = XY(xy)
if type(wh) == WH:
self.wh = wh
elif type(wh) == XY:
self.wh = WH(wh.x,wh.y)
else:
raise Exception("RectRegion wh param invalid")
def __str__(self):
return "[xy="+str(self.xy)+",wh="+str(self.wh)+"]"
class EmitPDF:
class Font:
reg = None
bold = None
italic = None
#
font1 = None
contentRegion = None
pageTitleRegion = None
pageNumberRegion = None
#
contentRegionStack = None
#
currentTitle = None
currentPage = None
pagestream = None
pageHeight = None
currentPos = None
currentDPI = None
pdfhl = None
#
def __init__(self):
self.font1 = EmitPDF.Font()
#
ll = XY(0.25,0.5)
ur = XY(8 - 0.25,11 - 0.25)
self.contentRegion = RectRegion(ll,ur-ll)
#
self.contentRegionStack = [ ]
#
ll = XY(0.25,0.25)
ur = XY(8 - 0.25,0.45)
self.pageTitleRegion = RectRegion(ll,ur-ll)
# y coord is ignored
ll = XY(0.25,0)
ur = XY(8 - 0.25,0)
self.pageTitleLine = RectRegion(ll,ur-ll)
#
ll = XY(8 - 0.5,11 - (10/72) - 0.10)
ur = XY(8 - 0.10,11 - 0.05)
self.pageNumberRegion = RectRegion(ll,ur-ll)
#
self.currentPage = None
self.pagestream = None
#
self.pdfhl = None
def pushcontentregion(self,region):
self.contentRegionStack.append(RectRegion(self.contentRegion.xy,self.contentRegion.wh))
self.contentRegion = region
def popcontentregion(self):
self.contentRegion = self.contentRegionStack.pop()
def setpdfhl(self,pdfhl):
self.pdfhl = pdfhl
def end_page(self):
if not self.pagestream == None and not self.currentPage == None:
self.pdfhl.make_page_content_stream(self.currentPage,data=self.pagestream.data())
#
if not self.pagestream == None:
if self.pagestream.intxt == True:
self.pagestream.end_text()
del self.pagestream
self.pagestream = None
#
if not self.currentPage == None:
del self.currentPage
self.currentPage = None
def new_page(self):
self.end_page()
#
self.currentPage = page = self.pdfhl.new_page()
#
self.pdfhl.add_page_font_ref(page,self.font1.reg)
self.pdfhl.add_page_font_ref(page,self.font1.bold)
self.pdfhl.add_page_font_ref(page,self.font1.italic)
#
ps = self.pagestream = pdf_module.PDFPageContentWriter(self.pdfhl)
#
self.currentDPI = self.pdfhl.page_dpi
self.pageHeight = self.pdfhl.page_size[1]
# DEBUG: Draw a dark red box around the content region-----------------------------
if False:
ps.stroke_color(0.5,0,0)
p = self.coordxlate(XY(self.contentRegion.xy))
ps.moveto(p.x,p.y)
p = self.coordxlate(XY(self.contentRegion.xy.x+self.contentRegion.wh.w,self.contentRegion.xy.y))
ps.lineto(p.x,p.y)
p = self.coordxlate(XY(self.contentRegion.xy.x+self.contentRegion.wh.w,self.contentRegion.xy.y+self.contentRegion.wh.h))
ps.lineto(p.x,p.y)
p = self.coordxlate(XY(self.contentRegion.xy.x,self.contentRegion.xy.y+self.contentRegion.wh.h))
ps.lineto(p.x,p.y)
ps.close_subpath()
ps.stroke()
# END DEBUG------------------------------------------------------------------------
# title
self.move_to(self.pageTitleRegion.xy)
self.layout_text_begin()
ps.set_text_font(self.font1.italic,10)
ps.fill_color(0,0,0)
self.layout_text(self.currentTitle,overflow="stop")
self.newline(y=(self.layoutVadj.y*5)/4) # from baseline to below text
self.layout_text_end()
vadj = XY(0,self.currentPos.y)
#
p = self.coordxlate(self.pageTitleLine.xy + vadj)
p2 = self.coordxlate(self.pageTitleLine.xy + vadj + XY(self.pageTitleLine.wh.w,0))
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
ps.moveto(p.x,p.y)
ps.lineto(p2.x,p2.y)
ps.stroke()
# page number (top)
vadj = XY(0,10/self.currentDPI) # remember that text is rendered from a baseline, not from the top
ps.begin_text()
ps.set_text_font(self.font1.italic,10)
ptxt = str(self.currentPage.index)
pw = ps.text_width(ptxt) # get text width to right-justify
ps.fill_color(0,0,0)
p = self.coordxlate(XY(self.pageTitleRegion.xy.x+self.pageTitleRegion.wh.w-pw,self.pageTitleRegion.xy.y)+vadj)
ps.text_move_to(p.x,p.y) # right justify
ps.text(ptxt)
ps.end_text()
# page number (bottom)
vadj = XY(0,10/self.currentDPI) # remember that text is rendered from a baseline, not from the top
ps.begin_text()
ps.set_text_font(self.font1.italic,10)
ptxt = str(self.currentPage.index)
pw = ps.text_width(ptxt) # get text width to right-justify
ps.fill_color(0,0,0)
p = self.coordxlate(XY(self.pageNumberRegion.xy.x+self.pageNumberRegion.wh.w-pw,self.pageNumberRegion.xy.y)+vadj)
ps.text_move_to(p.x,p.y) # right justify
ps.text(ptxt)
ps.end_text()
#
self.move_to(self.contentRegion.xy)
#
return page
def coordxlateunscaled(self,xy):
# PDF coordinate system is bottom-up, we think top down
return XY(xy.x,self.pageHeight-xy.y)
def coordxlate(self,xy):
tx = self.coordxlateunscaled(xy)
return XY(tx.x*self.currentDPI,tx.y*self.currentDPI)
def ps(self):
return self.pagestream
def dpi(self):
return self.currentDPI
def set_title(self,title):
self.currentTitle = title
def content_end(self):
return XY(self.contentRegion.xy.x + self.contentRegion.wh.w,self.contentRegion.xy.y + self.contentRegion.wh.h)
def newline(self,*,x=0,y=0):
self.currentPos.x = self.contentRegion.xy.x + x
self.currentPos.y = self.currentPos.y + y
def tchr_classify(self,c):
if c == '\n' or c == '\t' or c == ' ':
return "w"
return "c"
def split_text(self,text):
e = [ ]
w = ""
cls = None
for c in text:
ncls = self.tchr_classify(c)
if not cls == ncls or c == '\n':
cls = ncls
if not w == "":
e.append(w)
w = ""
#
w = w + c
if not w == "":
e.append(w)
return e
def layout_text_begin(self):
if not self.pagestream.intxt:
self.pagestream.begin_text()
self.layoutStarted = True
self.layoutWritten = 0
self.layoutLineTextBuf = ""
self.layoutStartedAt = XY(self.currentPos)
self.layoutMaxEnd = XY(self.currentPos)
self.layoutVadj = XY(0,0)
def layout_text_end(self):
if len(self.layoutLineTextBuf) > 0:
self.pagestream.text(self.layoutLineTextBuf)
self.layoutLineTextBuf = ""
self.pagestream.end_text()
self.layoutStarted = False
def move_to(self,xy=None,*,x=None,y=None):
if not xy == None:
if not type(xy) == XY:
raise Exception("move_to() without XY object");
self.currentPos = XY(xy)
if not x == None:
self.currentPos.x = x
if not y == None:
self.currentPos.y = y
def layout_span_page(self,*,noEmitLeading=False,noEmitPosition=False):
savedFont = self.pagestream.currentFont
savedFontSize = self.pagestream.currentFontSize
self.layout_text_flush()
self.end_page()
self.new_page()
if not savedFont == None and not savedFontSize == None:
self.layout_text_begin()
self.pagestream.set_text_font(savedFont,savedFontSize)
if not noEmitLeading == True:
self.pagestream.text_leading(self.pagestream.currentFontSize)
self.pagestream.fill_color(0,0,0)
self.layoutVadj = XY(0,self.pagestream.currentFontSize/self.currentDPI)
#
if not noEmitPosition == True:
tp = self.coordxlate(self.currentPos+self.layoutVadj)
self.pagestream.text_move_to(tp.x,tp.y)
def layout_text_flush(self):
if len(self.layoutLineTextBuf) > 0:
self.pagestream.text(self.layoutLineTextBuf)
self.layoutLineTextBuf = ""
def layout_text(self,text,*,overflow="wrap",pagespan=False):
stop_xy = self.content_end()
elements = self.split_text(text)
#
if self.layoutWritten == 0:
self.layoutVadj = XY(0,self.pagestream.currentFontSize/self.currentDPI)
#
if pagespan == True:
# NTS: PDF parsers like pdf.js in Firefox stop parsing a line if more than one TL occurs in a BT..ET block
if (self.currentPos+self.layoutVadj).y > (self.contentRegion.xy.y+self.contentRegion.wh.h):
self.layout_span_page(noEmitLeading=True,noEmitPosition=True)
#
self.pagestream.text_leading(self.pagestream.currentFontSize)
self.layoutWritten = 1
#
tp = self.coordxlate(self.currentPos+self.layoutVadj)
self.pagestream.text_move_to(tp.x,tp.y)
#
for elem in elements:
ew = self.pagestream.text_width(elem)
fx = self.currentPos.x + ew
if fx > stop_xy.x or elem == "\n":
if overflow == "stop" and not elem == "\n":
break
#
if len(self.layoutLineTextBuf) > 0:
self.pagestream.text(self.layoutLineTextBuf)
self.layoutLineTextBuf = ""
#
if self.layoutMaxEnd.x < self.currentPos.x:
self.layoutMaxEnd.x = self.currentPos.x
#
self.pagestream.text_next_line()
self.newline(y=(self.pagestream.currentFontSize/self.currentDPI))
#
if self.layoutMaxEnd.y < self.currentPos.y:
self.layoutMaxEnd.y = self.currentPos.y
#
if not elem == "\n":
if pagespan == True:
if (self.currentPos+self.layoutVadj).y > (self.contentRegion.xy.y+self.contentRegion.wh.h):
self.layout_span_page()
#
self.layoutLineTextBuf = self.layoutLineTextBuf + elem
self.currentPos.x = self.currentPos.x + ew
#
if self.layoutMaxEnd.x < self.currentPos.x:
self.layoutMaxEnd.x = self.currentPos.x
def emit_table_as_pdf(emitpdf,pdf,pdfhl,table_id,tp):
emitpdf.set_title(tp.display.header)
if not emitpdf.currentPage == None:
page1 = emitpdf.currentPage
else:
page1 = emitpdf.new_page()
#
if (emitpdf.currentPos.y+(10*9)/emitpdf.currentDPI) > (emitpdf.contentRegion.xy.y+emitpdf.contentRegion.wh.h):
page1 = emitpdf.new_page()
#
ps = emitpdf.ps()
# header
emitpdf.newline(y=16/emitpdf.currentDPI)
#
vadj = XY(0,16/emitpdf.currentDPI) # remember that text is rendered from a baseline, not from the top
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.bold,16)
ps.fill_color(0,0,0)
emitpdf.layout_text(tp.display.header,overflow="stop")
emitpdf.layout_text_end()
ps = emitpdf.ps()
emitpdf.newline(y=emitpdf.layoutVadj.y)
emitpdf.newline(y=16/emitpdf.currentDPI/5) # 1/5th the font size
hdrlinew = emitpdf.layoutMaxEnd.x - emitpdf.layoutStartedAt.x
#
p = emitpdf.coordxlate(emitpdf.currentPos)
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
ps.moveto(p.x,p.y)
lt = emitpdf.contentRegion.wh.w
l = hdrlinew
if l > lt:
l = lt
p2 = emitpdf.coordxlate(emitpdf.currentPos+XY(l,0))
ps.lineto(p2.x,p2.y)
ps.stroke()
#
emitpdf.newline(y=10/emitpdf.currentDPI)
#
if not tp.description == None:
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.reg,10)
emitpdf.layout_text(tp.description,pagespan=True)
emitpdf.layout_text("\n\n")
emitpdf.layout_text_end()
ps = emitpdf.ps()
#
if not tp.display.disptable == None:
desci = 0
for ci in range(len(tp.display.colsiz)):
if not tp.display.colhdr[ci] == None and not tp.display.coldesc[ci] == None:
ps = emitpdf.ps()
emitpdf.layout_text_begin()
#
ps.set_text_font(emitpdf.font1.bold,8)
emitpdf.layout_text(tp.display.colhdr[ci],pagespan=True)
emitpdf.layout_text_flush()
ps = emitpdf.ps()
#
ps.set_text_font(emitpdf.font1.reg,8)
emitpdf.layout_text(": "+tp.display.coldesc[ci],pagespan=True)
emitpdf.layout_text("\n",pagespan=True)
#
emitpdf.layout_text_end()
ps = emitpdf.ps()
emitpdf.newline(y=2/emitpdf.currentDPI)
#
desci = desci + 1
ps = emitpdf.ps()
if desci > 0:
emitpdf.newline(y=10/emitpdf.currentDPI)
#
fontSize = 10
while True:
dpiwidths = [ ]
dpiposx = [ ]
dpitexx = [ ]
dpiposw = [ ]
dpitexw = [ ]
for ci in range(len(tp.display.colsiz)):
x = ""
if not tp.display.colhdr[ci] == None:
x = tp.display.colhdr[ci]
lines = x.split('\n')
mw = 0
for line in lines:
lw = pdfhl.fontwidth(emitpdf.font1.reg,fontSize,line)
if mw < lw:
mw = lw
dpiwidths.append(mw)
dpiposx.append(None)
dpitexx.append(None)
dpiposw.append(None)
dpitexw.append(None)
#
for rowidx in range(len(tp.display.disptable)):
row = tp.display.disptable[rowidx]
columns = row.get("columns")
if not columns == None:
for coli in range(len(columns)):
col = columns[coli]
val = col.get("value")
if val == None:
val = ""
lines = val.split('\n')
for line in lines:
lw = pdfhl.fontwidth(emitpdf.font1.reg,fontSize,line)
if dpiwidths[coli] < lw:
dpiwidths[coli] = lw
# decide where to layout the tables
hcols = 1
hcolw = 0
hxpad = 0.05
hipad = 0.4
hx = 0
for ci in range(len(tp.display.colsiz)):
dpiposx[ci] = hx
dpitexx[ci] = hxpad + hx
dpiposw[ci] = hxpad + dpiwidths[ci] + hxpad
dpitexw[ci] = dpiwidths[ci]
hx = dpiposx[ci] + dpiposw[ci]
#
maxw = emitpdf.contentRegion.wh.w
if hx > maxw:
fontSize = fontSize - 1
if fontSize <= 4:
raise Exception("Cannot fit tables")
continue
else:
hcols = math.floor(maxw / (hx + hipad))
if hcols < 1:
hcols = 1
hcolw = hx + hipad
hcoltw = (hx + hipad) * hcols
break
# determine table pos, make new page to ensure enough room
tablepos = XY(emitpdf.contentRegion.xy.x,emitpdf.currentPos.y)
if (tablepos.y+((fontSize*6)/emitpdf.currentDPI)) > (emitpdf.contentRegion.xy.y+emitpdf.contentRegion.wh.h):
page1 = emitpdf.new_page()
ps = emitpdf.ps()
tablepos = XY(emitpdf.contentRegion.xy.x,emitpdf.currentPos.y)
# draw table row by row
drawcol = 0
drawrowtop = 0
drawrowcount = 0
rowh = ((5.0/4.0)*fontSize)/emitpdf.currentDPI
largestY = 0 # for multi column rendering, need to remember the largest Y coordinate
rowidx = 0
while rowidx < len(tp.display.disptable):
row = tp.display.disptable[rowidx]
columns = row.get("columns")
#
if drawrowcount == 0:
emitpdf.move_to(XY(tablepos.x+(hcolw*drawcol),tablepos.y))
drawrowtop = emitpdf.currentPos.y
ps.fill_color(0.8,0.8,0.8)
p = emitpdf.coordxlate(emitpdf.currentPos)
ps.moveto(p.x,p.y)
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x+hx,emitpdf.currentPos.y))
ps.lineto(p.x,p.y)
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x+hx,emitpdf.currentPos.y+rowh))
ps.lineto(p.x,p.y)
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x,emitpdf.currentPos.y+rowh))
ps.lineto(p.x,p.y)
ps.close_subpath()
ps.fill()
#
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
p = emitpdf.coordxlate(emitpdf.currentPos)
ps.moveto(p.x,p.y)
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x+hx,emitpdf.currentPos.y))
ps.lineto(p.x,p.y)
ps.stroke()
p = emitpdf.coordxlate(emitpdf.currentPos+XY(0,rowh))
ps.moveto(p.x,p.y)
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x+hx,emitpdf.currentPos.y+rowh))
ps.lineto(p.x,p.y)
ps.stroke()
#
for coli in range(len(columns)):
val = tp.display.colhdr[coli]
if val == None:
val = ""
#
ps.fill_color(0,0,0)
emitpdf.currentPos.x = tablepos.x+(hcolw*drawcol) + dpiposx[coli] + hxpad
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.reg,fontSize)
emitpdf.layout_text(val)
emitpdf.layout_text_end()
ps = emitpdf.ps()
#
emitpdf.newline(y=rowh)
#
if largestY < emitpdf.currentPos.y:
largestY = emitpdf.currentPos.y
#
maxlines = 1
colvals = [ ]
for coli in range(len(columns)):
col = columns[coli]
val = col.get("value")
if val == None:
val = ""
#
vallines = val.split('\n')
if maxlines < len(vallines):
maxlines = len(vallines)
#
colvals.append(vallines)
#
if (emitpdf.currentPos.y+(rowh*maxlines)) > (emitpdf.contentRegion.xy.y+emitpdf.contentRegion.wh.h):
if drawrowcount > 0:
for coli in range(len(columns)):
x = tablepos.x+(hcolw*drawcol) + dpiposx[coli]
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
p = emitpdf.coordxlate(XY(x,drawrowtop))
ps.moveto(p.x,p.y)
p = emitpdf.coordxlate(XY(x,emitpdf.currentPos.y))
ps.lineto(p.x,p.y)
ps.stroke()
#
x = tablepos.x+(hcolw*drawcol)+hx
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
p = emitpdf.coordxlate(XY(x,drawrowtop))
ps.moveto(p.x,p.y)
p = emitpdf.coordxlate(XY(x,emitpdf.currentPos.y))
ps.lineto(p.x,p.y)
ps.stroke()
#
drawrowcount = 0
drawcol = drawcol + 1
if drawcol >= hcols:
drawcol = 0
largestY = 0
page1 = emitpdf.new_page()
ps = emitpdf.ps()
tablepos = XY(emitpdf.contentRegion.xy.x,emitpdf.currentPos.y)
#
pdf.currentPos = XY(tablepos.x,tablepos.y)
continue
#
show_sources = False
if row.get("same key") == True:
show_sources = True
#
coltop = XY(emitpdf.currentPos.x,emitpdf.currentPos.y)
for coli in range(len(columns)):
colv = colvals[coli]
#
ps.fill_color(0,0,0)
tx = emitpdf.currentPos.x = tablepos.x+(hcolw*drawcol) + dpiposx[coli] + hxpad
emitpdf.currentPos.y = coltop.y
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.reg,fontSize)
for line in colv:
emitpdf.currentPos.x = tx
emitpdf.layout_text(line)
emitpdf.layout_text_flush()
ps = emitpdf.ps()
ps.text_next_line()
emitpdf.layout_text_end()
ps = emitpdf.ps()
#
if show_sources == True and coli == len(columns)-1:
sia = row.get("source index")
if not sia == None and len(sia) > 0:
ps.fill_color(0,0,0.75)
emitpdf.currentPos.x = coltop.x+(hcolw*drawcol) + dpiposx[coli] + hxpad + pdfhl.fontwidth(emitpdf.font1.reg,fontSize,colvals[coli][0])
emitpdf.currentPos.y = coltop.y
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.italic,5)
for si in sia:
refmark = " [*"+str(si)+"]"
emitpdf.layout_text(refmark)
emitpdf.layout_text_end()
ps = emitpdf.ps()
ps.fill_color(0,0,0)
#
emitpdf.currentPos.x = coltop.x
emitpdf.currentPos.y = coltop.y + rowh
if maxlines > 1:
emitpdf.currentPos.y = emitpdf.currentPos.y + ((maxlines - 1) * (fontSize/emitpdf.currentDPI))
#
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
p = emitpdf.coordxlate(emitpdf.currentPos+XY(hcolw*drawcol,0))
ps.moveto(p.x,p.y)
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x+(hcolw*drawcol)+hx,emitpdf.currentPos.y))
ps.lineto(p.x,p.y)
ps.stroke()
#
if largestY < emitpdf.currentPos.y:
largestY = emitpdf.currentPos.y
#
drawrowcount = drawrowcount + 1
rowidx = rowidx + 1
#
if drawrowcount > 0:
for coli in range(len(columns)):
x = tablepos.x+(hcolw*drawcol) + dpiposx[coli]
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
p = emitpdf.coordxlate(XY(x,drawrowtop))
ps.moveto(p.x,p.y)
p = emitpdf.coordxlate(XY(x,emitpdf.currentPos.y))
ps.lineto(p.x,p.y)
ps.stroke()
#
x = tablepos.x+(hcolw*drawcol)+hx
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
p = emitpdf.coordxlate(XY(x,drawrowtop))
ps.moveto(p.x,p.y)
p = emitpdf.coordxlate(XY(x,emitpdf.currentPos.y))
ps.lineto(p.x,p.y)
ps.stroke()
#
emitpdf.newline(y=(8+2)/emitpdf.currentDPI)
if emitpdf.currentPos.y < largestY:
emitpdf.currentPos.y = largestY
#
if not tp.sources == None:
ps.fill_color(0,0,0)
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.reg,10)
emitpdf.layout_text("Sources\n",pagespan=True)
emitpdf.layout_text_end()
ps = emitpdf.ps()
emitpdf.newline(y=10/emitpdf.currentDPI/5) # 1/5th the font size
hdrlinew = emitpdf.layoutMaxEnd.x - emitpdf.layoutStartedAt.x
#
p = emitpdf.coordxlate(emitpdf.currentPos)
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
ps.moveto(p.x,p.y)
lt = emitpdf.contentRegion.wh.w
l = hdrlinew
if l > lt:
l = lt
p2 = emitpdf.coordxlate(emitpdf.currentPos+XY(l,0))
ps.lineto(p2.x,p2.y)
ps.stroke()
#
emitpdf.newline(y=5/emitpdf.currentDPI)
#
for sii in range(len(tp.sources)):
sobj = tp.sources[sii]
if not int(sobj.get("source index")) == sii:
raise Exception("source index is wrong")
#
emitpdf.currentPos.x = emitpdf.currentPos.x + 0.1
nPos = emitpdf.currentPos.x + 0.3
emitpdf.layout_text_begin()
#
refmark = "[*"+str(sii)+"]"
ps.set_text_font(emitpdf.font1.italic,8)
ps.fill_color(0,0,0.75)
emitpdf.layout_text(refmark,pagespan=True)
emitpdf.layout_text_end()
ps = emitpdf.ps()
ps.fill_color(0,0,0)
#
if "book" in sobj:
book = sobj["book"]
elif "website" in sobj:
book = sobj["website"]
else:
book = None
emit = False
if not book == None:
where = sobj.get("where")
citation = sobj.get("citation")
if not citation == None:
x = ""
title = citation.get("title")
if not title == None:
if not x == "":
x = x + ", "
x = x + title
author = citation.get("author")
if not author == None:
if not x == "":
x = x + ", "
x = x + author
publisher = citation.get("publisher")
if not publisher == None:
if not x == "":
x = x + ", "
x = x + publisher
year = citation.get("year")
if not year == None:
if not x == "":
x = x + ", "
x = x + str(year)
if not x == "":
#
if emit == False:
emit = True
else:
emitpdf.newline(y=4/emitpdf.currentDPI)
#
emitpdf.currentPos.x = nPos
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.reg,8)
ps.fill_color(0,0,0)
emitpdf.layout_text(x+"\n",pagespan=True)
emitpdf.layout_text_end()
ps = emitpdf.ps()
emit = True
#
url = citation.get("url")
if not url == None: # TODO: I know PDF allows this... how do you make it clickable so that it loads the web address?
#
if emit == False:
emit = True
else:
emitpdf.newline(y=4/emitpdf.currentDPI)
#
emitpdf.currentPos.x = nPos
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.reg,8)
ps.fill_color(0,0,0)
emitpdf.layout_text("URL: ",pagespan=True)
emitpdf.layout_text(url+"\n",pagespan=True)
emitpdf.layout_text_end()
ps = emitpdf.ps()
if not where == None:
x = ""
for whi in where:
y = ""
if "path" in whi:
if not y == "":
y = y + ", "
y = y + whi["path"]
if "title" in whi:
if not y == "":
y = y + ", "
y = y + whi["title"]
if not y == "":
if not x == "":
x = x + " => "
x = x + y
if not x == "":
#
if emit == False:
emit = True
else:
emitpdf.newline(y=4/emitpdf.currentDPI)
#
emitpdf.currentPos.x = nPos
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.italic,8)
ps.fill_color(0,0,0)
emitpdf.layout_text(x+"\n",pagespan=True)
emitpdf.layout_text_end()
ps = emitpdf.ps()
#
emitpdf.newline(y=(8+2)/emitpdf.currentDPI)
#
emitpdf.newline(y=2/emitpdf.currentDPI)
#
if not tp.notes == None and len(tp.notes) > 0:
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.reg,10)
emitpdf.layout_text("Notes\n",pagespan=True)
emitpdf.layout_text_end()
ps = emitpdf.ps()
emitpdf.newline(y=10/emitpdf.currentDPI/5) # 1/5th the font size
hdrlinew = emitpdf.layoutMaxEnd.x - emitpdf.layoutStartedAt.x
#
p = emitpdf.coordxlate(emitpdf.currentPos)
ps.stroke_color(0,0,0)
ps.linewidth(0.5)
ps.moveto(p.x,p.y)
lt = emitpdf.contentRegion.wh.w
l = hdrlinew
if l > lt:
l = lt
p2 = emitpdf.coordxlate(emitpdf.currentPos+XY(l,0))
ps.lineto(p2.x,p2.y)
ps.stroke()
#
emitpdf.newline(y=5/emitpdf.currentDPI)
lx = emitpdf.currentPos.x
lmargin = 0.2
lbulletx = 0.1
for note in tp.notes:
emitpdf.currentPos.x = lx
#
nregion = RectRegion(emitpdf.contentRegion)
nregion.xy = nregion.xy + XY(lmargin,0)
nregion.wh.w = nregion.wh.w - lmargin
#
emitpdf.currentPos.x = lx + lbulletx
ps.fill_color(0.25,0.25,0.25)
cx = 0.0
cy = (8/emitpdf.currentDPI)*0.5*(5.0/4.0)
cw = (8/emitpdf.currentDPI)*0.4
ch = (8/emitpdf.currentDPI)*0.4
#
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x+cx-(cw/2.0),emitpdf.currentPos.y+cy-(cw/2.0)))
ps.moveto(p.x,p.y)
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x+cx+(cw/2.0),emitpdf.currentPos.y+cy-(cw/2.0)))
ps.lineto(p.x,p.y)
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x+cx+(cw/2.0),emitpdf.currentPos.y+cy+(cw/2.0)))
ps.lineto(p.x,p.y)
p = emitpdf.coordxlate(XY(emitpdf.currentPos.x+cx-(cw/2.0),emitpdf.currentPos.y+cy+(cw/2.0)))
ps.lineto(p.x,p.y)
#
ps.close_subpath()
ps.fill()
#
emitpdf.currentPos.x = lx + lmargin
#
emitpdf.pushcontentregion(nregion)
emitpdf.layout_text_begin()
ps.set_text_font(emitpdf.font1.reg,8)
emitpdf.layout_text(note+"\n",pagespan=True)
emitpdf.layout_text_end()
ps = emitpdf.ps()
emitpdf.popcontentregion()
#
emitpdf.currentPos.x = lx
#
emitpdf.newline(y=5/emitpdf.currentDPI)
os.system("rm -Rf reference/text; mkdir -p reference/text")
os.system("rm -Rf reference/html; mkdir -p reference/html")
os.system("rm -Rf reference/pdf; mkdir -p reference/pdf")
tables_json = common_json_help_module.load_json("compiled/tables.json")
ftxt = open("reference/text/tables.txt","w",encoding="UTF-8")
fhtml = open("reference/html/tables.htm","w",encoding="UTF-8")
fpdf = open("reference/pdf/tables.pdf","wb")
html_out_begin(fhtml)
if True:
emitpdf = EmitPDF()
#
pdf = pdf_module.PDFGen()
pdfhl = pdf_module.PDFGenHL(pdf)
emitpdf.setpdfhl(pdfhl)
# -- font 1: regular
emitpdf.font1.reg = pdfhl.add_font({
pdf_module.PDFName("Subtype"): pdf_module.PDFName("TrueType"),
pdf_module.PDFName("Name"): pdf_module.PDFName("F1"),
pdf_module.PDFName("Encoding"): pdf_module.PDFName("WinAnsiEncoding"),
pdf_module.PDFName("BaseFont"): pdf_module.PDFName("ABCDEE+Ubuntu")
},
desc={
},
ttffile="ttf/Ubuntu-R.ttf")
# -- font 1: bold
emitpdf.font1.bold = pdfhl.add_font({
pdf_module.PDFName("Subtype"): pdf_module.PDFName("TrueType"),
pdf_module.PDFName("Name"): pdf_module.PDFName("F2"),
pdf_module.PDFName("Encoding"): pdf_module.PDFName("WinAnsiEncoding"),
pdf_module.PDFName("BaseFont"): pdf_module.PDFName("ABCDEE+Ubuntu")
},
desc={
},
ttffile="ttf/Ubuntu-B.ttf")
# -- font 1: italic
emitpdf.font1.italic = pdfhl.add_font({
pdf_module.PDFName("Subtype"): pdf_module.PDFName("TrueType"),
pdf_module.PDFName("Name"): pdf_module.PDFName("F3"),
pdf_module.PDFName("Encoding"): pdf_module.PDFName("WinAnsiEncoding"),
pdf_module.PDFName("BaseFont"): pdf_module.PDFName("ABCDEE+Ubuntu")
},
desc={
},
ttffile="ttf/Ubuntu-RI.ttf")
# -------------- END FONTS
def table_sort_by_title(a): # a is the key from the table
r = a
t = tables.get(a)
if not t == None:
if "name" in t:
n = t["name"]
if not n == None:
r = re.sub(r"[^a-zA-Z0-9]+"," ",n).strip()
#
return r.lower()
tables = tables_json.get("tables")
if not tables == None:
torder = sorted(tables,key=table_sort_by_title)
for table_id in torder:
tp = table_presentation_module.TablePresentation(tables[table_id])
emit_table_as_text(ftxt,table_id,tp)
emit_table_as_html(fhtml,table_id,tp)
emit_table_as_pdf(emitpdf,pdf,pdfhl,table_id,tp)
if True:
emitpdf.end_page()
pdfhl.finish()
pdf.write_file(fpdf)
fpdf.close()
html_out_end(fhtml)
fhtml.close()
ftxt.close()
```
#### File: joncampbell123/a-pile-of-documentation/pdf_module.py
```python
import os
import glob
import json
import zlib
import struct
import pathlib
import ttf_module
import pdf_module
import html_module
import common_json_help_module
import freetype # some things are super complicated and are better left to the professionals (pip3 install freetype-py)
def pdf_str_escape(v):
r = ""
for c in v:
if c == '\n':
r = r + "\\n"
elif c == '\r':
r = r + "\\r"
elif c == '\t':
r = r + "\\t"
elif c == '\b':
r = r + "\\b"
elif c == '\f':
r = r + "\\f"
elif c == '(':
r = r + "\\("
elif c == ')':
r = r + "\\)"
elif c == '\\':
r = r + "\\\\"
else:
r = r + c
#
return r;
# the world's simplest PDF generator class
class PDFName:
name = None
def __init__(self,name):
self.name = name
def __str__(self):
return self.name
def __eq__(self,other):
if other == None:
return False
return self.name.__eq__(other.name)
def __hash__(self):
return self.name.__hash__()
class PDFIndirect:
id = None
def __init__(self,id):
if type(id) == PDFObject or type(id) == PDFStream:
id = id.id
elif not type(id) == int:
raise Exception("PDFIndirect id must be integer")
self.id = id
class PDFStream:
id = None
data = None
header = None
def __init__(self,data):
self.header = PDFObject({})
self.data = data
class PDFObject:
id = None
index = None
value = None
type = None # boolean, integer, real, text string, hex string, name, array, dict, stream, null, indirect
def __init__(self,value=None,*,vtype=None):
self.type = None
self.set(value,vtype=vtype)
def setkey(self,key,value):
if type(self.value) == dict:
self.value[key] = value
else:
raise Exception("Data type does not accept key value mapping ("+str(type(self.value)))
def set(self,value=None,*,vtype=None):
if vtype == None:
if type(value) == bool:
vtype = bool
elif type(value) == int:
vtype = int
elif type(value) == float:
vtype = float
elif type(value) == str:
vtype = str
elif type(value) == bytes:
vtype = bytes
elif type(value) == PDFName:
vtype = PDFName
elif type(value) == list:
vtype = list
elif type(value) == dict:
vtype = dict
elif type(value) == PDFStream:
vtype = PDFStream
elif value == None:
vtype = None
elif type(value) == PDFIndirect:
vtype = PDFIndirect
else:
raise Exception("Unable to determine type")
#
self.type = vtype
if vtype == None:
self.value = None
elif vtype == bool:
self.value = (value == True)
elif vtype == int:
if type(value) == int:
self.value = value
else:
self.value = int(str(value),0)
elif vtype == float:
if type(value) == float:
self.value = value
else:
self.value = float(str(value))
elif vtype == str:
if type(value) == str:
self.value = value
else:
self.value = str(value)
elif vtype == bytes:
if type(value) == bytes:
self.value = value
else:
raise Exception("bytes must be bytes")
elif vtype == PDFName:
if type(value) == str:
self.value = PDFName(value)
elif type(value) == PDFName:
self.value = value
else:
raise Exception("PDFName must be PDFName")
elif vtype == list:
if type(value) == list:
self.value = value
else:
raise Exception("list must be list")
elif vtype == dict:
if type(value) == dict:
self.value = value
else:
raise Exception("dict must be dict")
elif vtype == PDFStream:
if type(value) == bytes:
self.value = PDFStream(value)
elif type(value) == PDFStream:
self.value = value
else:
raise Exception("PDFStream must be PDFStream")
elif vtype == None:
if value == None:
self.value = value
else:
raise Exception("None must be none")
elif vtype == PDFIndirect:
if type(value) == int:
self.value = PDFIndirect(value)
elif type(value) == PDFIndirect:
self.value = value
elif type(value) == PDFObject:
self.value = value.id
else:
raise Exception("PDFIndirect must be PDFIndirect")
else:
raise Exception("Don't know how to handle type "+str(vtype)+" value "+str(value))
class PDFGen:
pdfver = None
objects = None
root_id = None
zlib_compress_streams = None
#
def __init__(self,optobj=None):
self.root_id = None
self.pdfver = [ 1, 4 ]
self.objects = [ None ] # object 0 is always NULL because most PDFs seem to count from 1
self.zlib_compress_streams = True
#
def new_stream_object(self,value=None):
id = len(self.objects)
obj = PDFStream(value)
self.objects.append(obj)
obj.id = id
return obj
def new_object(self,value=None,*,vtype=None):
id = len(self.objects)
obj = PDFObject(value,vtype=vtype)
self.objects.append(obj)
obj.id = id
return obj
def set_root_object(self,obj):
if type(obj) == int:
self.root_id = obj
elif type(obj) == PDFObject:
self.root_id = obj.id
else:
raise Exception("Set root node given invalid object")
#
def serialize(self,obj):
if not type(obj) == PDFObject:
obj = PDFObject(obj)
#
if obj.type == bool:
if obj.value == True:
return "true"
else:
return "false"
elif obj.type == int:
return str(obj.value)
elif obj.type == float:
return str(obj.value)
elif obj.type == str:
return "("+pdf_str_escape(obj.value)+")"
elif obj.type == bytes:
r = ""
for c in obj.value:
h = hex(c)[2:].upper() # strip off 0x, also Adobe says it can be upper or lower case, I choose upper
if len(h) < 2:
h = '0'+h
r = r + h
return "<"+r+">"
elif obj.type == PDFName:
return "/" + str(obj.value.name)
elif obj.type == list:
r = ""
for ent in obj.value:
if not r == "":
r = r + " "
r = r + self.serialize(ent)
return "["+r+"]"
elif obj.type == dict:
r = ""
for key in obj.value:
objval = obj.value[key]
if not type(key) == PDFName:
raise Exception("dict keys must be PDFName not "+type(key))
if type(key) == PDFName and (type(objval) == PDFName or type(objval) == str or type(objval) == bytes or type(objval) == list or type(objval) == PDFObject):
r = r + self.serialize(key) + self.serialize(objval)
else:
r = r + self.serialize(key) + " " + self.serialize(objval)
return "<<"+r+">>"
elif obj.type == PDFStream:
raise Exception("PDFStream serialize directly")
elif obj.type == None:
return "null"
elif obj.type == PDFIndirect:
return str(obj.value.id)+" 0 R"
else:
raise Exception("Unknown type on serialize")
#
def write_file(self,f):
objofs = [ ]
if self.root_id == None:
raise Exception("PDFGen root node not specified")
if self.root_id < 0 or self.root_id >= len(self.objects):
raise Exception("PDFGen root node out of range")
f.seek(0)
f.write(("%PDF-"+str(self.pdfver[0])+"."+str(self.pdfver[1])+"\n").encode())
f.write("%".encode()+bytes([0xC2,0xB5,0xC2,0xB6])+"\n\n".encode()) # non-ASCII chars to convince other programs this is not text
for objid in range(len(self.objects)):
obj = self.objects[objid]
if not obj == None:
if not obj.id == objid:
raise Exception("Object has wrong id")
#
if obj == None:
if len(objofs) == objid:
objofs.append(None)
else:
raise Exception("objid count error")
continue
#
if len(objofs) == objid:
objofs.append(f.tell())
else:
raise Exception("objid count error")
#
f.write((str(objid)+" 0 obj\n").encode())
if type(obj) == PDFObject:
f.write(self.serialize(obj).encode())
elif type(obj) == PDFStream:
if self.zlib_compress_streams == True and len(obj.data) > 0:
cmp = zlib.compressobj(level=9,method=zlib.DEFLATED,wbits=15,memLevel=9)
z = cmp.compress(obj.data)
z += cmp.flush(zlib.Z_FINISH)
if len(obj.data) > len(z):
obj.header.value[PDFName("Filter")] = PDFName("FlateDecode")
obj.data = z
del cmp
#
obj.header.value[PDFName("Length")] = len(obj.data)
f.write(self.serialize(obj.header).encode())
f.write("\n".encode())
f.write("stream\n".encode())
f.write(obj.data)
f.write("\n".encode())
f.write("endstream".encode())
else:
raise Exception("unsupported object")
#
f.write("\n".encode())
f.write("endobj\n\n".encode())
#
xrefofs = f.tell()
f.write("xref\n".encode())
f.write(("0 "+str(len(self.objects))+"\n").encode())
# NTS: Each xref entry must be 20 bytes each
for objid in range(len(self.objects)):
ofs = objofs[objid]
if not ofs == None:
s = str(ofs)[0:10]
if len(s) < 10:
s = ("0"*(10-len(s)))+s
f.write((s+" 00000 n \n").encode())
else:
f.write(("0000000000 00000 f \n").encode())
f.write("\n".encode())
#
f.write("trailer\n".encode())
f.write(self.serialize({
PDFName("Size"): len(self.objects),
PDFName("Root"): PDFIndirect(self.root_id)
}).encode())
f.write("\n".encode())
#
f.write("startxref\n".encode())
f.write((str(xrefofs)+"\n").encode())
f.write("%%EOF\n".encode())
class PDFGenHL:
pdf = None
root_node = None
pages_node = None
pages = None
page_size = None
page_dpi = None
def __init__(self,pdf):
self.pdf = pdf
self.page_size = [ 8, 11 ]
self.page_dpi = 72
self.pages = [ None ] # count from 1, fill slot 0, array of PDFIndirect
#
self.root_node = self.pdf.new_object({
PDFName("Type"): PDFName("Catalog"),
PDFName("Lang"): "en-US"
})
self.pdf.set_root_object(self.root_node)
#
self.pages_node = self.pdf.new_object({
PDFName("Type"): PDFName("Pages")
})
self.root_node.setkey(PDFName("Pages"), PDFIndirect(self.pages_node))
def finish(self):
self.pages_node.setkey(PDFName("Count"), len(self.pages) - 1) # slot 0 does not count
self.pages_node.setkey(PDFName("Kids"), self.pages[1:])
def new_page(self):
pagedir = self.pdf.new_object({
PDFName("Type"): PDFName("Page"),
PDFName("Parent"): PDFIndirect(self.pages_node),
PDFName("MediaBox"): [ 0, 0, self.page_size[0]*self.page_dpi, self.page_size[1]*self.page_dpi ]
})
pagedir.index = len(self.pages)
pageindir = PDFIndirect(pagedir)
self.pages.append(pageindir)
return pagedir
def get_page(self,page):
po = self.pages[page]
if po == None:
return None
return self.pdf.objects[po.id]
def make_page_content_stream(self,pageobj,*,data=bytes()):
if PDFName("Contents") in pageobj.value:
return
page_content = self.pdf.new_stream_object(data)
pageobj.setkey(PDFName("Contents"), PDFIndirect(page_content))
def add_page_font_ref(self,pageobj,info):
if not PDFName("Resources") in pageobj.value:
res_obj = PDFObject({})
pageobj.setkey(PDFName("Resources"), res_obj)
res_obj.setkey(PDFName("ProcSet"), [ PDFName("PDF"), PDFName("Text"), PDFName("ImageB"), PDFName("ImageC"), PDFName("ImageI") ])
else:
res_obj = pageobj.value[PDFName("Resources")]
#
fontname = info.value.get(PDFName("Name"))
if fontname == None:
raise Exception("Font without a name")
#
if not PDFName("Font") in res_obj.value:
font_obj = PDFObject({})
res_obj.setkey(PDFName("Font"), font_obj)
else:
font_obj = res_obj.value[PDFName("Font")]
#
if not PDFName(fontname) in font_obj.value:
font_obj.setkey(PDFName(fontname), PDFIndirect(info))
else:
raise Exception("Font already added")
def add_font(self,fontdict,*,desc=None,ttffile=None):
fontdict[PDFName("Type")] = PDFName("Font")
if not desc == None:
fdo = {
PDFName("Type"): PDFName("FontDescriptor")
}
#
if PDFName("BaseFont") in fontdict:
fdo[PDFName("FontName")] = fontdict[PDFName("BaseFont")]
#
fontpdfobj = self.pdf.new_object(fontdict)
#
if not ttffile == None:
f = open(ttffile,"rb")
ttfdata = f.read()
ttf = ttf_module.TTFFile(ttfdata)
f.close()
pdfinfo = ttf.get_info_for_pdf()
if not pdfinfo.italicAngle == None:
fdo[PDFName("ItalicAngle")] = pdfinfo.italicAngle
if not pdfinfo.fontWeight == None:
fdo[PDFName("FontWeight")] = pdfinfo.fontWeight
if not pdfinfo.xMin == None:
fdo[PDFName("FontBBox")] = [ pdfinfo.xMin, pdfinfo.yMin, pdfinfo.xMax, pdfinfo.yMax ]
if not pdfinfo.Ascent == None:
fdo[PDFName("Ascent")] = pdfinfo.Ascent
if not pdfinfo.Descent == None:
fdo[PDFName("Descent")] = pdfinfo.Descent
if not pdfinfo.firstChar == None:
fontpdfobj.value[PDFName("FirstChar")] = pdfinfo.firstChar
if not pdfinfo.lastChar == None:
fontpdfobj.value[PDFName("LastChar")] = pdfinfo.lastChar
#
# CAREFUL! Adobe documents LSB as bit 1 and MSB as bit 32
#
# bit 0: FixedPitch
# bit 1: Serif
# bit 2: Symbolic
# bit 3: Script
# bit 5: Nonsymbolic
# bit 6: Italic
# bit 16: AllCap
flags = 0
if pdfinfo.isFixedPitch:
flags = flags | (1 << 0)
#
fdo[PDFName("Flags")] = flags
# I don't know how to get this from the TTF so just guess
fdo[PDFName("StemV")] = 52
# now the complicated part: reading glyph widths, and mapping chars to glyphs, to build the Width table.
# Not only does PDF expect /Widths but it will also help the PDF export know how to lay out text properly.
ft = freetype.Face(ttffile)
char2glyph = ft.get_chars() # pair (uchar,glyph)
ft.set_char_size(1) # NTS: Not sure why this makes proper spacing with linearHoriAdvance?
widths = [ ]
while len(widths) < (pdfinfo.lastChar + 1 - pdfinfo.firstChar):
widths.append(0)
for [char,glyphidx] in char2glyph:
if char >= pdfinfo.firstChar and char <= pdfinfo.lastChar:
widx = char - pdfinfo.firstChar
ft.load_glyph(glyphidx)
widths[widx] = int(ft.glyph.linearHoriAdvance / 65.536)
#
fontdict[PDFName("Widths")] = widths
del ft
#
fontdict[PDFName("FontDescriptor")] = PDFIndirect(self.pdf.new_object(fdo))
#
if not ttffile == None:
# finally, make a stream_object for the TTF file and point to it from the font descriptor
fontstream = self.pdf.new_stream_object(ttfdata)
# PDF stream objects always list their length as /Length
# Except Font TTF streams, which require the same size specified as /Length1 (facepalm)
fontstream.header.value[PDFName("Length1")] = len(ttfdata)
fdo[PDFName("FontFile2")] = PDFIndirect(fontstream)
#
return fontpdfobj
def fontwidth(self,font,size,text):
sz = 0
fc = font.value.get(PDFName("FirstChar"))
if fc == None:
fc = 0
fw = font.value.get(PDFName("Widths"))
if not fw == None:
for c in text:
ci = ord(c) - fc
if ci >= 0 and ci < len(fw):
sz = sz + (fw[ci] * size) / 1000.0
#
return sz / self.page_dpi
class PDFPageContentWriter:
wd = None
intxt = None
pdfhl = None
currentFont = None
currentFontSize = None
def data(self):
return self.wd
def __init__(self,pdfhl):
self.wd = bytes()
self.intxt = False
self.pdfhl = pdfhl
self.currentFont = None
self.currentFontSize = None
def rstrip(self):
i = len(self.wd) - 1
while i >= 0 and self.wd[i:i+1] == b' ':
i = i - 1
i = i + 1
if not len(self.wd) == i:
self.wd = self.wd[0:i]
def begin_text(self):
if self.intxt == True:
raise Exception("Already in text")
self.intxt = True
self.wd += "BT ".encode()
def end_text(self):
if not self.intxt == True:
raise Exception("Not in text, cannot end")
self.intxt = False
self.wd += "ET ".encode()
def text_width(self,text):
if self.currentFont == None or self.currentFontSize == None:
raise Exception("No current font")
return self.pdfhl.fontwidth(self.currentFont,self.currentFontSize,text)
def set_text_font(self,font_id,size):
if not self.intxt == True:
raise Exception("Not in text")
self.currentFont = font_id
self.currentFontSize = size
if not type(font_id) == PDFObject:
raise Exception("set_text_font must specify font object")
if not PDFName("Name") in font_id.value:
raise Exception("PDFObject as font id with no Name")
font_id = font_id.value[PDFName("Name")]
if not type(font_id) == PDFName:
raise Exception("PDFObject as font id with value that is not PDFName")
font_id = font_id.name
if not type(font_id) == str:
raise Exception("PDFObject as font id with value that is PDFName not a str")
#
if font_id[0] == 'F':
font_id = font_id[1:]
#
self.rstrip()
self.wd += ("/F"+str(font_id)+" "+str(size)+" Tf ").encode()
def text_move_to(self,x,y):
if not self.intxt == True:
raise Exception("Not in text")
self.wd += (str(x)+" "+str(y)+" Td ").encode()
def text(self,text):
if not self.intxt == True:
raise Exception("Not in text")
self.rstrip()
self.wd += ("("+pdf_str_escape(text)+") Tj ").encode()
def text_leading(self,l):
if not self.intxt == True:
raise Exception("Not in text")
self.wd += (str(l)+" TL ").encode()
def text_next_line(self):
if not self.intxt == True:
raise Exception("Not in text")
self.wd += "T* ".encode()
def fill_color(self,r,g,b):
self.wd += (str(r)+" "+str(g)+" "+str(b)+" rg ").encode()
def stroke_color(self,r,g,b):
self.wd += (str(r)+" "+str(g)+" "+str(b)+" RG ").encode()
def linewidth(self,l):
self.wd += (str(l)+" w ").encode()
def moveto(self,x,y):
self.wd += (str(x)+" "+str(y)+" m ").encode()
def lineto(self,x,y):
self.wd += (str(x)+" "+str(y)+" l ").encode()
def close_subpath(self):
self.wd += ("h ").encode()
def stroke(self):
self.wd += ("S ").encode()
def fill(self):
self.wd += ("f ").encode()
def stroke_and_fill(self):
self.wd += ("B ").encode()
def finish(self):
if self.intxt == True:
self.end_text()
```
#### File: joncampbell123/a-pile-of-documentation/ttf_module.py
```python
import os
import glob
import json
import zlib
import struct
import pathlib
class TTFFileTable:
tag = None
offset = None
length = None
checksum = None
data = None
def __init__(self,tag,checksum,offset,length):
self.tag = tag
self.checksum = checksum
self.offset = offset
self.length = length
def __str__(self):
return "{ tag="+self.tag.decode()+" chk="+hex(self.checksum)+" offset="+str(self.offset)+" size="+str(self.length)+" }"
class TTFInfoForPDF:
Ascent = None
Descent = None
isFixedPitch = None
fontWeight = None
italicAngle = None
unitsPerEm = None
firstChar = None
lastChar = None
xMin = None
yMin = None
xMax = None
yMax = None
class TTFFile:
tables = None
version = None
def __init__(self,data):
[self.version,numTables,searchRange,entrySelector,rangeShift] = struct.unpack(">LHHHH",data[0:12])
self.tables = [ ]
for ti in range(numTables):
ofs = 12+(ti*16)
#
tag = data[ofs:ofs+4]
[checkSum,offset,length] = struct.unpack(">LLL",data[ofs+4:ofs+16])
#
te = TTFFileTable(tag,checkSum,offset,length)
te.data = data[offset:offset+length]
#
self.tables.append(te)
def lookup(self,id):
for ti in self.tables:
if ti.tag.decode().strip() == id:
return ti
return None
def get_info_for_pdf(self):
r = TTFInfoForPDF()
#
post = self.lookup("post")
if not post == None:
# FIXED: 32-bit fixed pt (L)
# FWORD: 16-bit signed int (h)
# ULONG: 32-bit unsigned long (L)
[FormatType,r.italicAngle,underlinePosition,underlineThickness,r.isFixedPitch] = struct.unpack(">LLhhL",post.data[0:16])
del post
#
head = self.lookup("head")
if not head == None:
# FIXED: 32-bit fixed pt (L)
# FWORD: 16-bit signed int (h)
# USHORT: 16-bit unsigned int (H)
# ULONG: 32-bit unsigned long (L)
[tableversion,fontRevision,checkSumAdjustment,magicNumber,flags,r.unitsPerEm] = struct.unpack(">LLLLHH",head.data[0:20])
# skip the two created/modified timestamps, each 8 bytes long
[r.xMin,r.yMin,r.xMax,r.yMax] = struct.unpack(">hhhh",head.data[36:36+8])
del head
#
os2 = self.lookup("OS/2")
if not os2 == None:
[version,xAvgCharWidth,r.fontWeight] = struct.unpack(">HhH",os2.data[0:6])
[r.firstChar,r.lastChar] = struct.unpack(">HH",os2.data[64:64+4])
del os2
#
hhea = self.lookup("hhea")
if not hhea == None:
[tableVersion,r.Ascent,r.Descent] = struct.unpack(">Lhh",hhea.data[0:8])
del hhea
#
return r
``` |
{
"source": "joncamp/jetbot_ros",
"score": 2
} |
#### File: jetbot_ros/gazebo/pygazebo_classifier.py
```python
import os
import time
import signal
import argparse
import asyncio
import pygazebo
import numpy as np
from PIL import Image
from pynput import keyboard
from datetime import datetime
from navigation_model import NavigationModel
'''
gz topic -l
gz joint -m 'simple_diff' -j right_wheel_hinge --vel-t 0
gz world --reset-all
renice -n 15 $(pgrep gzclient)
'''
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='localhost', type=str)
parser.add_argument('--port', default=11346, type=int)
parser.add_argument('--retry', default=30, type=int)
parser.add_argument('--robot', default='simple_diff', type=str)
parser.add_argument('--camera', default='camera_link', type=str)
parser.add_argument('--left-wheel', default='left_wheel_hinge', type=str)
parser.add_argument('--right-wheel', default='right_wheel_hinge', type=str)
parser.add_argument('--max-speed', default=2.0, type=float)
parser.add_argument('--model', default='resnet18', type=str)
parser.add_argument('--dataset', default='data/dataset', type=str)
parser.add_argument('--epochs', default=10, type=int)
parser.add_argument('--batch-size', default=1, type=int)
args = parser.parse_args()
print(args)
#
# program modes:
# 'collect' (V) save data while driving with WASD/arrow keys
# 'train' (T) train model on collected data
# 'infer' (I) run the model in autonomous inference mode
# 'reset' (R) reset the simulation
#
mode = None
# current robot wheel velocities
wheel_speed = {'left': None, 'right': None}
drive_dir = 'stop'
# load navigation model
nav_model = NavigationModel(args.model)
# setup dataset
data_classes = [
#'backward',
'forward',
'left',
'right',
#'stop'
]
for cls in data_classes:
os.makedirs(os.path.join(args.dataset, cls), exist_ok=True)
#
# gazebo connection
#
def gazebo_connect(host='localhost', port=11346, retry=30):
async def _connect(host, port, retry):
connected = False
for i in range(retry):
try:
print(f'waiting for gazebo connection {host}:{port} (attempt={i+1})')
manager = await pygazebo.connect((host, port))
connected = True
print(f'connected to gazebo server {host}:{port}')
break
except Exception as e:
print(e)
pass
await asyncio.sleep(1)
if not connected:
raise Exception("Timeout connecting to Gazebo.")
return manager
return asyncio.get_event_loop().run_until_complete(
_connect(host, port, retry))
def gazebo_advertise(manager, topic_name, msg_type):
async def _advertise(manager, topic_name, msg_type):
print(f'advertising {topic_name} ({msg_type})')
return await manager.advertise(topic_name, msg_type)
return asyncio.get_event_loop().run_until_complete(
_advertise(manager, topic_name, msg_type))
def gazebo_subscribe(manager, topic_name, msg_type, callback):
async def _subscribe(manager, topic_name, msg_type, callback):
print(f'subscribing to {topic_name} ({msg_type})')
subscriber = manager.subscribe(topic_name, msg_type, callback)
await subscriber.wait_for_connection()
return subscriber
return asyncio.get_event_loop().run_until_complete(
_subscribe(manager, topic_name, msg_type, callback))
# connect to gazebo server
manager = gazebo_connect(args.host, args.port, args.retry)
print('namespaces')
print(' ', manager.namespaces())
print('publications')
for topic in manager.publications():
print(' ', topic)
# subscribe to topics
last_img = None
def on_image(data):
if mode != 'collect' and mode != 'infer':
return
msg = pygazebo.msg.image_stamped_pb2.ImageStamped()
msg.ParseFromString(data)
img = np.frombuffer(msg.image.data, dtype=np.uint8)
img = np.reshape(img, (msg.image.height, msg.image.width, 3))
#print(msg.time)
#print(f'width={msg.image.width} height={msg.image.height} pixel_format={msg.image.pixel_format} step={msg.image.step}')
#print(img.shape)
#print('')
if mode == 'collect':
if drive_dir in data_classes:
img_path = os.path.join(args.dataset, drive_dir, f"{datetime.now().strftime('%Y%m%d_%H%M%S_%f')}.jpg")
Image.fromarray(img).save(img_path)
print(f"saved {msg.image.width}x{msg.image.height} image to '{img_path}'")
elif mode == 'infer':
global last_img
last_img = Image.fromarray(img)
image_subscriber = gazebo_subscribe(manager, f'/gazebo/default/{args.robot}/{args.camera}/camera/image', 'gazebo.msgs.ImageStamped', on_image)
# advertise topics
joint_publisher = gazebo_advertise(manager, f'/gazebo/default/{args.robot}/joint_cmd', 'gazebo.msgs.JointCmd')
#
# keyboard handler
#
key_states = {}
def on_press(key):
global key_states
try:
key_states[key.char] = True
except AttributeError:
key_states[key] = True
def on_release(key):
global key_states
global mode
try:
key_states[key.char] = False
if key.char == 'v':
mode = 'collect' if mode != 'collect' else None
elif key.char == 't':
mode = 'train' if mode != 'train' else None
elif key.char == 'i':
mode = 'infer' if mode != 'infer' else None
except AttributeError:
key_states[key] = False
keyboard_listener = keyboard.Listener(on_press=on_press, on_release=on_release)
keyboard_listener.start()
#
# robot control
#
def set_wheel_speed(left, right):
global wheel_speed
changed_speed = False
if wheel_speed['left'] != left:
left_msg = pygazebo.msg.joint_cmd_pb2.JointCmd()
left_msg.name = f'{args.robot}::{args.left_wheel}'
left_msg.velocity.target = left
joint_publisher.publish(left_msg)
wheel_speed['left'] = left
changed_speed = True
if wheel_speed['right'] != right:
right_msg = pygazebo.msg.joint_cmd_pb2.JointCmd()
right_msg.name = f'{args.robot}::{args.right_wheel}'
right_msg.velocity.target = right
joint_publisher.publish(right_msg)
wheel_speed['right'] = right
changed_speed = True
if changed_speed:
print(f"set_wheel_speed({left}, {right})")
def set_drive_direction(dir, speed=1.0):
global drive_dir
if not isinstance(dir, str):
dir = data_classes[dir]
if dir == 'stop':
set_wheel_speed(0,0)
elif dir == 'forward':
set_wheel_speed(speed, speed)
elif dir == 'backward':
set_wheel_speed(-speed, -speed)
elif dir == 'left':
set_wheel_speed(-speed, speed)
elif dir == 'right':
set_wheel_speed(speed, -speed)
else:
raise ValueError(f"invalid drive direction: {dir}")
drive_dir = dir
def teleop(speed=1.0):
dir = 'stop'
if key_states.get(keyboard.Key.left) or key_states.get('a'):
dir = 'left'
elif key_states.get(keyboard.Key.right) or key_states.get('d'):
dir = 'right'
elif key_states.get(keyboard.Key.up) or key_states.get('w'):
dir = 'forward'
elif key_states.get(keyboard.Key.down) or key_states.get('s'):
dir = 'backward'
set_drive_direction(dir, speed)
#
# main loop
#
run_signal = True
def signal_handler(sig, frame):
global run_signal
run_signal = False
print('pressed Ctrl+C, exiting...')
signal.signal(signal.SIGINT, signal_handler)
# main loop
async def run():
global mode
global last_img
while run_signal:
if mode == 'train':
nav_model.train(args.dataset, epochs=args.epochs, batch_size=args.batch_size)
mode = None
elif mode == 'infer':
if last_img is not None:
dir, prob = nav_model.infer(last_img)
print(f'dir={dir} prob={prob:.8f}')
set_drive_direction(dir, args.max_speed)
last_img = None
else:
teleop(args.max_speed)
await asyncio.sleep(0.1) # TODO should this be run faster?
print('shutting down, stopping robot...')
set_wheel_speed(0,0)
await asyncio.sleep(1.0)
asyncio.get_event_loop().run_until_complete(run())
```
#### File: jetbot_ros/dnn/reshape_model.py
```python
import torch
import torch.nn
#
# reshape the model for N classes
#
def reshape_model(model, arch, num_classes):
"""Reshape a model's output layers for the given number of classes"""
# reshape output layers for the dataset
if arch.startswith("resnet"):
model.fc = torch.nn.Linear(model.fc.in_features, num_classes)
print("=> reshaped ResNet fully-connected layer with: " + str(model.fc))
elif arch.startswith("alexnet"):
model.classifier[6] = torch.nn.Linear(model.classifier[6].in_features, num_classes)
print("=> reshaped AlexNet classifier layer with: " + str(model.classifier[6]))
elif arch.startswith("vgg"):
model.classifier[6] = torch.nn.Linear(model.classifier[6].in_features, num_classes)
print("=> reshaped VGG classifier layer with: " + str(model.classifier[6]))
elif arch.startswith("squeezenet"):
model.classifier[1] = torch.nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model.num_classes = num_classes
print("=> reshaped SqueezeNet classifier layer with: " + str(model.classifier[1]))
elif arch.startswith("densenet"):
model.classifier = torch.nn.Linear(model.classifier.in_features, num_classes)
print("=> reshaped DenseNet classifier layer with: " + str(model.classifier))
elif arch.startswith("inception"):
model.AuxLogits.fc = torch.nn.Linear(model.AuxLogits.fc.in_features, num_classes)
model.fc = torch.nn.Linear(model.fc.in_features, num_classes)
print("=> reshaped Inception aux-logits layer with: " + str(model.AuxLogits.fc))
print("=> reshaped Inception fully-connected layer with: " + str(model.fc))
elif arch.startswith("googlenet"):
if model.aux_logits:
from torchvision.models.googlenet import InceptionAux
model.aux1 = InceptionAux(512, num_classes)
model.aux2 = InceptionAux(528, num_classes)
print("=> reshaped GoogleNet aux-logits layers with: ")
print(" " + str(model.aux1))
print(" " + str(model.aux2))
model.fc = torch.nn.Linear(model.fc.in_features, num_classes)
print("=> reshaped GoogleNet fully-connected layer with: " + str(model.fc))
else:
print("classifier reshaping not supported for " + args.arch)
print("model will retain default of 1000 output classes")
return model
```
#### File: jetbot_ros/dnn/xy_dataset.py
```python
import os
import glob
import PIL.Image
import numpy as np
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def get_x(path, width):
"""Gets the x value from the image filename"""
return (float(int(path.split("_")[1])) - width/2) / (width/2)
def get_y(path, height):
"""Gets the y value from the image filename"""
return (float(int(path.split("_")[2])) - height/2) / (height/2)
class XYDataset(torch.utils.data.Dataset):
def __init__(self, directory, transform=None, random_hflips=False):
self.directory = directory
self.transform = transform
self.image_paths = glob.glob(os.path.join(self.directory, '*.jpg'))
self.random_hflips = random_hflips
#self.color_jitter = transforms.ColorJitter(0.3, 0.3, 0.3, 0.3)
def __len__(self):
return len(self.image_paths)
def __getitem__(self, idx):
image_path = self.image_paths[idx]
image = PIL.Image.open(image_path)
width, height = image.size
x = float(get_x(os.path.basename(image_path), width))
y = float(get_y(os.path.basename(image_path), height))
if self.random_hflips and float(np.random.rand(1)) > 0.5:
image = transforms.functional.hflip(image)
x = -x
#image = self.color_jitter(image)
if self.transform is not None:
image = self.transform(image)
return image, torch.tensor([x, y]).float()
```
#### File: jetbot_ros/jetbot_ros/motors_waveshare.py
```python
import rclpy
from rclpy.node import Node
from jetbot_ros.motors import MotorController
from Adafruit_MotorHAT import Adafruit_MotorHAT
class MotorControllerWaveshare(MotorController):
"""
Motor controller node that supports the Waveshare JetBot.
@see motors.py for the base class to implement different controllers.
"""
MOTOR_LEFT = 1 # left motor ID
MOTOR_RIGHT = 2 # right motor ID
def __init__(self):
super().__init__()
# open Adafruit MotorHAT driver
self.driver = Adafruit_MotorHAT(i2c_bus=1)
# get motor objects from driver
self.motors = {
self.MOTOR_LEFT : self.driver.getMotor(self.MOTOR_LEFT),
self.MOTOR_RIGHT : self.driver.getMotor(self.MOTOR_RIGHT)
}
self.pwm_channels = {
self.MOTOR_LEFT : (1, 0),
self.MOTOR_RIGHT : (2, 3)
}
def set_speed(self, left, right):
"""
Sets the motor speeds between [-1.0, 1.0]
"""
self._set_pwm(self.MOTOR_LEFT, left, self.left_trim)
self._set_pwm(self.MOTOR_RIGHT, right, self.right_trim)
def _set_pwm(self, motor, value, trim):
# apply trim and convert [-1,1] to PWM value
pwm = int(min(max((abs(value) + trim) * self.max_pwm, 0), self.max_pwm))
self.motors[motor].setSpeed(pwm)
# set the motor direction
ina, inb = self.pwm_channels[motor]
if value > 0:
self.motors[motor].run(Adafruit_MotorHAT.FORWARD)
self.driver._pwm.setPWM(ina, 0, pwm * 16)
self.driver._pwm.setPWM(inb, 0, 0)
elif value < 0:
self.motors[motor].run(Adafruit_MotorHAT.BACKWARD)
self.driver._pwm.setPWM(ina, 0, 0)
self.driver._pwm.setPWM(inb, 0, pwm * 16)
else:
self.motors[motor].run(Adafruit_MotorHAT.RELEASE)
self.driver._pwm.setPWM(ina, 0, 0)
self.driver._pwm.setPWM(inb, 0, 0)
def main(args=None):
rclpy.init(args=args)
node = MotorControllerWaveshare()
node.get_logger().info("listening for velocity messages...")
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
```
#### File: jetbot_ros/launch/gazebo_world.launch.py
```python
import os
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, DeclareLaunchArgument
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import ThisLaunchFileDir, LaunchConfiguration
from launch_ros.actions import Node
from launch.actions import ExecuteProcess
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
use_sim_time = LaunchConfiguration('use_sim_time', default='True')
robot_name = DeclareLaunchArgument('robot_name', default_value='jetbot')
robot_model = DeclareLaunchArgument('robot_model', default_value='simple_diff_ros') # jetbot_ros
robot_x = DeclareLaunchArgument('x', default_value='-0.3')
robot_y = DeclareLaunchArgument('y', default_value='-2.65')
robot_z = DeclareLaunchArgument('z', default_value='0.0')
world_file_name = 'dirt_path_curves.world'
pkg_dir = get_package_share_directory('jetbot_ros')
os.environ["GAZEBO_MODEL_PATH"] = os.path.join(pkg_dir, 'models')
world = os.path.join(pkg_dir, 'worlds', world_file_name)
launch_file_dir = os.path.join(pkg_dir, 'launch')
gazebo = ExecuteProcess(
cmd=['gazebo', '--verbose', world,
'-s', 'libgazebo_ros_init.so',
'-s', 'libgazebo_ros_factory.so',
'-g', 'libgazebo_user_camera_control_system.so'],
output='screen', emulate_tty=True)
spawn_entity = Node(package='jetbot_ros', node_executable='gazebo_spawn', # FYI 'node_executable' is renamed to 'executable' in Foxy
parameters=[
{'name': LaunchConfiguration('robot_name')},
{'model': LaunchConfiguration('robot_model')},
{'x': LaunchConfiguration('x')},
{'y': LaunchConfiguration('y')},
{'z': LaunchConfiguration('z')},
],
output='screen', emulate_tty=True)
return LaunchDescription([
robot_name,
robot_model,
robot_x,
robot_y,
robot_z,
gazebo,
spawn_entity,
])
``` |
{
"source": "joncar/ha-fpa",
"score": 2
} |
#### File: joncar/ha-fpa/sensor.py
```python
import logging
import voluptuous as vol
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT, SensorEntity
from homeassistant.const import DEVICE_CLASS_TIMESTAMP, SERVICE_TURN_ON
from homeassistant.helpers import config_validation as cv, entity_platform
import pybabyfpa
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN, ATTR_BOTTLE_ID
_LOGGER = logging.getLogger(__name__)
STATE_TO_ICON = {
"requesting_bottle": "mdi:transfer-down",
"making_bottle": "mdi:transfer-down",
"full_bottle": "mdi:cup",
"funnel_cleaning_needed": "mdi:liquid-spot",
"funnel_out": "mdi:filter-off-outline",
"lid_open": "mdi:projector-screen-variant-off-outline",
"low_water": "mdi:water-off",
"bottle_missing": "mdi:cup-off-outline",
"ready": "mdi:cup-outline"
}
async def async_setup_entry(
hass, config_entry, async_add_entities, discovery_info=None
):
"""Set up the sensor platform."""
api = hass.data[DOMAIN][config_entry.entry_id]
if not api.has_me:
await api.get_me()
for device in api.devices:
await api.connect_to_device(device.device_id)
async_add_entities([FpaMainSensor(api, device) for device in api.devices])
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_TURN_ON,
{vol.Required(ATTR_BOTTLE_ID): cv.positive_int},
"turn_on",
)
class FpaSensor(SensorEntity):
"""Representation of a Fpa sensor."""
_api: pybabyfpa.Fpa
_device: pybabyfpa.FpaDevice
_making_bottle_requested: bool # between start API call and making_bottle shadow update
_full_bottle: bool # between making_bottle shadow update and bottle_missing shadow update
_old_making_bottle: bool
_old_bottle_missing: bool
def __init__(self, api, device):
"""Initialize the sensor."""
self._api = api
self._device = device
self._making_bottle_requested = False
self._full_bottle = False
async def async_added_to_hass(self):
"""Run when this Entity has been added to HA."""
self._old_making_bottle = False
self._old_bottle_missing = False
def updated_callback(device: pybabyfpa.FpaDevice):
if device.device_id != self._device.device_id:
return
if not self._old_making_bottle and device.shadow.making_bottle:
self._making_bottle_requested = False
if self._old_making_bottle and not device.shadow.making_bottle and not device.shadow.bottle_missing:
self._full_bottle = True
if not self._old_bottle_missing and device.shadow.bottle_missing:
self._making_bottle_requested = False
self._full_bottle = False
self._old_making_bottle = device.shadow.making_bottle
self._old_bottle_missing = device.shadow.bottle_missing
self._device = device
self.schedule_update_ha_state()
self.async_on_remove(self._api.add_listener(updated_callback))
@property
def available(self):
"""Return if data is available."""
return self._device.connected
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._device.device_id)},
"manufacturer": "Baby Brezza",
"name": self._device.title,
}
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:baby-bottle"
@property
def should_poll(self):
"""No polling needed."""
return False
class FpaMainSensor(FpaSensor):
"""Sensor for the Fpa's main state."""
_attr_device_class = "fpa__state"
@property
def assumed_state(self):
"""Return if data is from assumed state."""
return self._making_bottle_requested
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return self._device.device_id
@property
def name(self):
"""Return the name of the sensor."""
return self._device.title
@property
def icon(self):
"""Return the icon of the sensor."""
return STATE_TO_ICON[self.state]
@property
def state(self):
"""Return the state of the sensor."""
if self._making_bottle_requested: # only useful for testing?
return "requesting_bottle"
if self._making_bottle_requested or self._device.shadow.making_bottle:
return "making_bottle"
if self._full_bottle:
return "full_bottle"
if self._device.shadow.funnel_cleaning_needed:
return "funnel_cleaning_needed"
if self._device.shadow.funnel_out:
return "funnel_out"
if self._device.shadow.lid_open:
return "lid_open"
if self._device.shadow.low_water:
return "low_water"
if self._device.shadow.bottle_missing:
return "bottle_missing"
return "ready"
@property
def extra_state_attributes(self):
"""Return the extra state attributes."""
attr = {
"temperature": self._device.shadow.temperature,
"powder": self._device.shadow.powder,
"volume": self._device.shadow.volume,
"volume_unit": self._device.shadow.volume_unit,
"making_bottle": self._device.shadow.making_bottle,
"water_only": self._device.shadow.water_only,
"bottle_missing": self._device.shadow.bottle_missing,
"funnel_cleaning_needed": self._device.shadow.funnel_cleaning_needed,
"funnel_out": self._device.shadow.funnel_out,
"lid_open": self._device.shadow.lid_open,
"low_water": self._device.shadow.low_water,
}
for bottle in self._device.bottles:
attr[
f"bottle_{bottle.id}"
] = f"{bottle.volume}{bottle.volume_unit} of {str(bottle.formula)}"
return attr
async def turn_on(self, **kwargs):
"""Service call to start making a bottle."""
bottle_id = kwargs.get(ATTR_BOTTLE_ID)
# Cloud API will ignore a start for all cases where the attributes
# track a disallowed state like lid open or bottle missing.
# However it does not attempt to track if the bottle has been
# filled and has not been removed, so guard against that
# (and all other) disallowed states here.
if self.state != "ready":
_LOGGER.error(f"Cannot start bottle when in state {self.state}")
return
_LOGGER.info(f"Starting bottle {bottle_id}!")
self._making_bottle_requested = True
await self._api.start_bottle(bottle_id)
``` |
{
"source": "joncar/ha-snoo",
"score": 2
} |
#### File: joncar/ha-snoo/__init__.py
```python
from __future__ import annotations
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
import pysnoo
from .const import DOMAIN
PLATFORMS = ["sensor"]
_LOGGER = logging.getLogger(__name__)
class SnooHub:
"""Hub containing of the Snoo API objects."""
def __init__(self, auth, snoo, device, baby, pubnub):
"""Initialize the hub."""
self.auth = auth
self.snoo = snoo
self.device = device
self.baby = baby
self.pubnub = pubnub
self.is_unloading = False
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Snoo from a config entry."""
auth = pysnoo.SnooAuthSession()
await auth.fetch_token(entry.data["username"], entry.data["password"])
snoo = pysnoo.Snoo(auth)
devices = await snoo.get_devices()
# Snoo's app only allows one device per account...
if len(devices) != 1:
return True
device = devices[0]
# ... because who would have multiple devices and only one baby.
baby = await snoo.get_baby()
pubnub = pysnoo.SnooPubNub(
auth,
device.serial_number,
f"pn-homeassistant-{device.serial_number}",
)
await pubnub.subscribe_and_await_connect()
hub = SnooHub(auth, snoo, device, baby, pubnub)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = hub
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hub = hass.data[DOMAIN].pop(entry.entry_id)
if hub:
hub.is_unloading = True
await hub.pubnub.unsubscribe_and_await_disconnect()
return unload_ok
``` |
{
"source": "joncarlson/intro-to-docker",
"score": 3
} |
#### File: intro-to-docker/2-volume-mapping/getcollection.py
```python
import os
import requests
import json
import sys
DIRECTORY = './data'
MODEL = 'Collection Metadata'
APIURL = os.environ.get('MEDITOR_API_URL')
EXTENSIONS = ('.txt')
def main():
collections = getCollectionsFromDataFiles()
if len(collections) == 0:
sys.exit('No collections found!')
for collection in collections:
params = {
'model': MODEL,
'title': collection,
}
print('Retrieving ' + collection + ' from mEditor')
request = requests.get(url = APIURL + '/getDocument', params = params)
filename = DIRECTORY + '/' + collection + '.json'
with open(filename, 'w') as file:
json.dump(request.json(), file, indent=2)
print('Created file: ' + filename)
def getCollectionsFromDataFiles():
collections = []
for subdir, dirs, files in os.walk(DIRECTORY):
for file in files:
ext = os.path.splitext(file)[-1].lower()
if ext != '' and ext in EXTENSIONS:
file = open(os.path.join(subdir, file), 'r')
collections.extend(file.read().splitlines())
return collections
if __name__ == "__main__":
main()
``` |
{
"source": "joncar/pylitejet",
"score": 3
} |
#### File: pylitejet/test_handlers/protocol_ljtest.py
```python
from serial.serialutil import *
import logging
import threading
_LOGGER = logging.getLogger(__name__)
class Serial(SerialBase):
def open(self):
self._read_buffer = bytes()
self._read_ready = threading.Event()
self._read_ready.clear()
self._load_levels = {}
self._switch_pressed = {}
self.is_open = True
def close(self):
self.is_open = False
def from_url(self, url):
_LOGGER.error("url is %s", url)
@property
def in_waiting(self):
return len(self._read_buffer)
@property
def out_waiting(self):
return 0
def cancel_read(self):
self._read_buffer = bytes()
self._read_ready.set()
def read(self, size=1):
self._read_ready.wait()
next_bytes = self._read_buffer[0:size]
self._read_buffer = self._read_buffer[size:]
if len(self._read_buffer) == 0:
self._read_ready.clear()
return next_bytes
def _reconfigure_port(self):
pass
def _set_load(self, number, level):
self._load_levels[number] = level
self._respond("^K{0:03d}{1:02d}\r".format(number, level))
def _set_switch(self, number, pressed):
if self._switch_pressed.get(number, False) == pressed:
return
self._switch_pressed[number] = pressed
if pressed:
event = "P"
else:
event = "R"
self._respond("{0}{1:03d}\r".format(event, number))
def write(self, data):
str = data.decode("utf-8")
assert str[0] == "^"
command = str[1]
if command != "G" and command != "H":
number = int(str[2:5])
if command == "A":
self._set_load(number, 99)
elif command == "B":
self._set_load(number, 0)
elif command == "C" or command == "D":
_LOGGER.warning("Scenes not supported")
elif command == "E":
level = int(str[5:7])
rate = int(str[7:9])
self._set_load(number, level)
elif command == "F":
self._respond("{0:02d}\r".format(self._load_levels.get(number, 0)))
elif command == "G":
_LOGGER.warning("Instant status not supported")
self._respond("000000000000000000000000000000000000000000000000\r")
elif command == "H":
_LOGGER.warning("Instant status not supported")
self._respond(
"00000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000\r"
)
elif command == "I":
self._set_switch(number, True)
elif command == "J":
self._set_switch(number, False)
elif command == "K":
self._respond("Switch #{}\r".format(number))
elif command == "L":
self._respond("Load #{}\r".format(number))
elif command == "M":
self._respond("Scene #{}\r".format(number))
return len(data)
def _respond(self, str):
self._read_buffer = str.encode("utf-8")
self._read_ready.set()
``` |
{
"source": "joncar/pysnoo",
"score": 3
} |
#### File: pysnoo/tests/helpers.py
```python
import os
import json
def load_fixture(folder, filename, mode='r'):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__),
'fixtures', folder, filename)
with open(path, mode) as fdp:
return fdp.read()
# Login and Refresh Token response are the same.
def get_token(expires_in=None):
"""Get an OAuth2 Token with variable expires_in"""
token_string = load_fixture('', 'us_login__post_200.json')
token = json.loads(token_string)
if expires_in is not None:
token['expires_in'] = expires_in
# scope is an array in the parsed representation.
token['scope'] = token['scope'].split(' ')
return token, token_string
```
#### File: pysnoo/tests/test_snoo_auth_session.py
```python
import json
from asynctest import TestCase, patch, CoroutineMock, ANY, MagicMock
from callee import Contains
from oauthlib.oauth2 import OAuth2Error
from pysnoo.const import (OAUTH_LOGIN_ENDPOINT,
OAUTH_TOKEN_REFRESH_ENDPOINT,
SNOO_API_URI,
BASE_HEADERS)
from pysnoo.auth_session import SnooAuthSession
from tests.helpers import load_fixture, get_token
class TestSnooAuthSession(TestCase):
"""SnooAuthSession Test class"""
@patch('aiohttp.client.ClientSession._request')
async def test_login_success(self, mocked_request):
"""Test the successful fetch of an initial token"""
# Setup
_, token_response = get_token()
mocked_request.return_value.text = CoroutineMock(side_effect=[token_response])
async with SnooAuthSession() as session:
# Test
await session.fetch_token('USER', 'PASSWORD')
# Check
mocked_request.assert_called_once_with(
'POST', OAUTH_LOGIN_ENDPOINT,
data=json.dumps({'grant_type': 'password', 'username': 'USER', 'password': 'PASSWORD'}),
timeout=None,
# Base Headers are only added in _request, which is mocked.
headers={'Accept': 'application/json', 'Content-Type': 'application/json;charset=UTF-8'},
auth=ANY,
verify_ssl=True)
self.assertEqual(session.headers, BASE_HEADERS)
self.assertTrue(session.authorized)
@patch('aiohttp.client.ClientSession._request')
async def test_login_failure(self, mocked_request):
"""Test the failed fetch of an initial token"""
token_response = load_fixture('', 'us_login__post_400.json')
mocked_request.return_value.text = CoroutineMock(side_effect=[token_response])
async with SnooAuthSession() as session:
with self.assertRaises(OAuth2Error):
await session.fetch_token('USER', '<PASSWORD>')
@patch('aiohttp.client.ClientSession._request')
async def test_refresh_expired_token(self, mocked_request):
"""Test the automatic refresh of an expired token"""
token, token_response = get_token(-10)
mocked_tocken_updater = MagicMock()
# Token Refresh POST
mocked_request.return_value.text = CoroutineMock(side_effect=[token_response, "test"])
async with SnooAuthSession(token=token, token_updater=mocked_tocken_updater) as session:
async with session.get(SNOO_API_URI) as resp:
response_body = await resp.text()
self.assertEqual('test', response_body)
# Just make sure REFRESH CALL has the correct updated data and header attributes.
mocked_request.assert_any_call(
'POST', OAUTH_TOKEN_REFRESH_ENDPOINT,
data=json.dumps({'grant_type': 'refresh_token',
'refresh_token': token['refresh_token'],
'allow_redirects': 'True'}),
timeout=None,
headers={'Accept': 'application/json', 'Content-Type': 'application/json;charset=UTF-8'},
auth=None,
verify_ssl=True)
# Check that token_updater function was called with new TOKEN
mocked_tocken_updater.assert_called_once_with(Contains('access_token'))
``` |
{
"source": "joncar/streamdeck2mqtt",
"score": 2
} |
#### File: joncar/streamdeck2mqtt/streamdeck2mqtt.py
```python
import os
import threading
import logging
import re
import json
import signal
from PIL import Image, ImageDraw, ImageFont
from StreamDeck.DeviceManager import DeviceManager
from StreamDeck.ImageHelpers import PILHelper
import paho.mqtt.client as mqtt
class StreamDeck2MQTT:
def render_key_image(self, key):
icon_text = key.get('icon')
icon_font_name = key.get('icon_font', 'mdi')
label_text = key.get('text')
image = PILHelper.create_image(self.deck)
draw = ImageDraw.Draw(image)
if icon_text and icon_font_name == 'emoji':
icon_size = 150
icon = Image.new("RGB", (icon_size, icon_size), 'black')
icon_draw = ImageDraw.Draw(icon)
icon_draw.text((int(icon_size/2), int(icon_size/2)), text=icon_text, font=self.icon_emoji_font, anchor="mm", embedded_color=True)
icon.thumbnail((image.width - 20, image.width - 20), Image.LANCZOS)
image.paste(icon, (10, 10))
elif icon_text and icon_font_name == 'mdi':
v = (image.height - 20) if label_text else image.height
draw.text((int(image.width / 2), int(v / 2)), text=icon_text, font=self.icon_mdi_font, anchor="mm", fill="white", embedded_color=True)
if label_text:
v = (image.height - 5) if icon_text else (image.height / 2)
draw.text((image.width / 2, v), text=label_text, font=self.label_font, anchor="ms", fill="white")
return PILHelper.to_native_format(self.deck, image)
def on_connect(self, client, userdata, flags, rc):
self.client.subscribe(f"streamdeck/{self.deck_sn}/#")
self.client.publish(f'streamdeck/{self.deck_sn}/availability', 'online', retain=True)
for key_id in range(self.deck.key_count()):
config = json.dumps({
"unique_id": f"streamdeck_{self.deck_sn}_{key_id}",
"name": f"StreamDeck Key {key_id}",
"state_topic": f"streamdeck/{self.deck_sn}/{key_id}/state",
"availability_topic": f"streamdeck/{self.deck_sn}/availability",
"json_attributes_topic": f"streamdeck/{self.deck_sn}/{key_id}/attributes",
"icon": "mdi:keyboard",
"device": {
"identifiers": [self.deck_sn],
"name": "StreamDeck"
}
})
self.client.publish(f'homeassistant/binary_sensor/streamdeck_{self.deck_sn}_{key_id}/config', config, retain=True)
self.client.publish(f"streamdeck/{self.deck_sn}/{key_id}/attributes", json.dumps({
"number": key_id
}), retain=True)
def on_message(self, client, userdata, msg):
p = re.compile(r'streamdeck/([^/]+)/(\d+)/(text|icon|set)')
m = p.match(msg.topic)
if m:
deck_sn = m.group(1)
key_id = int(m.group(2))
prop = m.group(3)
value = msg.payload.decode('utf-8')
key = self.keys.setdefault(key_id, {})
if prop == 'set':
self.keys[key_id] = key = json.loads(value)
else:
key[prop] = value
image = self.render_key_image(key)
with self.deck:
self.deck.set_key_image(key_id, image)
def on_key_change(self, deck, key, state):
self.client.publish(f'streamdeck/{self.deck_sn}/{key}/state', 'ON' if state else 'OFF', retain=True)
def __init__(self, deck):
self.deck = deck
self.keys = {}
def start(self, config):
self.deck.open()
self.deck.reset()
self.deck.set_brightness(30)
self.deck.set_key_callback(self.on_key_change)
self.deck_sn = self.deck.get_serial_number().replace('\0', '').replace('\x01', '')
key_width, key_height = self.deck.key_image_format()['size']
self.icon_mdi_font = ImageFont.truetype('materialdesignicons-webfont.ttf', key_height)
self.icon_emoji_font = ImageFont.truetype('NotoColorEmoji.ttf', size=109, layout_engine=ImageFont.LAYOUT_RAQM)
self.label_font = ImageFont.truetype('Roboto-Regular.ttf', 14)
client_id = f"streamdeck2mqtt_{self.deck_sn}"
self.client = mqtt.Client(client_id=client_id, clean_session=False)
self.client.username_pw_set(config['mqtt_username'], config['mqtt_password'])
self.client.will_set(f'streamdeck/{self.deck_sn}/availability', 'offline')
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
if config.get('debug'):
self.client.enable_logger()
self.client.connect(config['mqtt_server'])
self.client.loop_start()
with open("config.json") as json_data_file:
config = json.load(json_data_file)
if config.get('debug'):
logging.basicConfig(level=logging.DEBUG)
for deck in DeviceManager().enumerate():
worker = StreamDeck2MQTT(deck)
worker.start(config)
signal.pause()
``` |
{
"source": "joncasdam/django-celery-fulldbresult",
"score": 2
} |
#### File: django-celery-fulldbresult/django_celery_fulldbresult/errors.py
```python
from __future__ import absolute_import, unicode_literals
class SchedulingStopPublishing(Exception):
"""Raised before publishing a scheduled task to prevent Celery from sending
the task to the broker.
"""
def __init__(self, task_id):
super(SchedulingStopPublishing, self).__init__()
self.task_id = task_id
```
#### File: django-celery-fulldbresult/django_celery_fulldbresult/__init__.py
```python
from __future__ import absolute_import, unicode_literals
from celery import current_app
from celery.states import PENDING
from celery.app.task import Context, Task
from celery.signals import before_task_publish
from django_celery_fulldbresult.errors import SchedulingStopPublishing
from django.conf import settings
from django.utils.timezone import now
schedule_eta = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_SCHEDULE_ETA", False)
track_publish = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_TRACK_PUBLISH", False)
monkey_patch_async = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_MONKEY_PATCH_ASYNC", False)
old_apply_async = Task.apply_async
def new_apply_async(self, *args, **kwargs):
try:
return old_apply_async(self, *args, **kwargs)
except SchedulingStopPublishing as exc:
# There was an ETA and the task was not sent to the broker.
# A scheduled task was created instead.
return self.AsyncResult(exc.task_id)
def apply_async_monkey_patch():
Task.apply_async = new_apply_async
def unapply_async_monkey_patch():
Task.apply_async = old_apply_async
if monkey_patch_async:
apply_async_monkey_patch()
if track_publish or schedule_eta:
@before_task_publish.connect
def update_sent_state(sender=None, body=None, exchange=None,
routing_key=None, **kwargs):
# App may not be loaded on init
from django_celery_fulldbresult.models import SCHEDULED
task = current_app.tasks.get(sender)
save = False
status = None
schedule_eta = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_SCHEDULE_ETA", False)
track_publish = getattr(
settings, "DJANGO_CELERY_FULLDBRESULT_TRACK_PUBLISH", False)
ignore_result = getattr(task, "ignore_result", False) or\
getattr(settings, "CELERY_IGNORE_RESULT", False)
if schedule_eta and body.get("eta") and not body.get("chord")\
and not body.get("taskset"):
status = SCHEDULED
save = True
elif track_publish and not ignore_result:
status = PENDING
save = True
if save:
backend = task.backend if task else current_app.backend
request = Context()
request.update(**body)
request.date_submitted = now()
request.delivery_info = {
"exchange": exchange,
"routing_key": routing_key
}
backend.store_result(
body["id"], None, status, traceback=None, request=request)
if status == SCHEDULED:
raise SchedulingStopPublishing(task_id=body["id"])
```
#### File: management/commands/find_stale_scheduled_tasks.py
```python
from datetime import timedelta
from django.core.management import BaseCommand
from django_celery_fulldbresult.models import TaskResultMeta
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--days",
action="store",
dest="days",
type=int,
default=0,
help="max days before a scheduled task is stale")
parser.add_argument(
"--seconds",
action="store",
dest="seconds",
type=int,
default=0,
help="max seconds before a task is stale")
parser.add_argument(
"--microseconds",
action="store",
dest="microseconds",
type=int,
default=0,
help="max microseconds before a task is stale")
parser.add_argument(
"--minutes",
action="store",
dest="minutes",
type=int,
default=0,
help="max minutes before a task is stale")
parser.add_argument(
"--hours",
action="store",
dest="hours",
type=int,
default=0,
help="max hours before a task is stale")
parser.add_argument(
"--weeks",
action="store",
dest="weeks",
type=int,
default=0,
help="max weeks before a task is stale")
def handle(self, *args, **options):
delta = timedelta(
days=options["days"], seconds=options["seconds"],
microseconds=options["microseconds"], minutes=options["minutes"],
hours=options["hours"], weeks=options["weeks"])
tasks = TaskResultMeta.objects.get_stale_scheduled_tasks(
delta)
print("Stale scheduled tasks:")
for task in tasks:
print(" {0} - {1}: {2}".format(
task.date_done, task.task_id, task.task))
```
#### File: management/commands/find_stale_tasks.py
```python
from datetime import timedelta
from django.core.management import BaseCommand
from django_celery_fulldbresult.managers import TERMINAL_STATES
from django_celery_fulldbresult.models import TaskResultMeta
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--days",
action="store",
dest="days",
type=int,
default=0,
help="max days before a scheduled task is stale")
parser.add_argument(
"--seconds",
action="store",
dest="seconds",
type=int,
default=0,
help="max seconds before a task is stale")
parser.add_argument(
"--microseconds",
action="store",
dest="microseconds",
type=int,
default=0,
help="max microseconds before a task is stale")
parser.add_argument(
"--minutes",
action="store",
dest="minutes",
type=int,
default=0,
help="max minutes before a task is stale")
parser.add_argument(
"--hours",
action="store",
dest="hours",
type=int,
default=0,
help="max hours before a task is stale")
parser.add_argument(
"--weeks",
action="store",
dest="weeks",
type=int,
default=0,
help="max weeks before a task is stale")
parser.add_argument(
"--terminal-state",
action="append",
dest="terminal_states",
help="State considered terminal (non-stale). can be repeated.")
def handle(self, *args, **options):
delta = timedelta(
days=options["days"], seconds=options["seconds"],
microseconds=options["microseconds"], minutes=options["minutes"],
hours=options["hours"], weeks=options["weeks"])
acceptable_states = options["terminal_states"]
if not acceptable_states:
acceptable_states = TERMINAL_STATES
tasks = TaskResultMeta.objects.get_stale_tasks(
delta, acceptable_states).order_by("date_done")
print("Stale tasks:")
for task in tasks:
print(" {0} - {1}: {2}".format(
task.date_done, task.task_id, task.task))
```
#### File: django-celery-fulldbresult/django_celery_fulldbresult/result_backends.py
```python
from __future__ import absolute_import, unicode_literals
from djcelery.backends.database import DatabaseBackend
from django_celery_fulldbresult.models import TaskResultMeta
from django_celery_fulldbresult import serialization
class DatabaseResultBackend(DatabaseBackend):
"""Database backend that stores enough task metadata to retry the task.
"""
TaskModel = TaskResultMeta
def _store_result(self, task_id, result, status, traceback, request=None):
if request:
args = serialization.dumps(request.args)
kwargs = serialization.dumps(request.kwargs)
# Sometimes, request.task is an object and not a str, so the right
# solution is:
# 1. if request.name exists (this is a Request object), use it.
# 2. Otherwise, use request.task (this will be a string from the
# Context object)
task = getattr(request, "name", None) or request.task
expires = request.expires
delivery_info = request.delivery_info or {}
routing_key = delivery_info.get("routing_key")
exchange = delivery_info.get("exchange")
hostname = request.hostname
date_submitted = getattr(request, "date_submitted", None)
eta = request.eta
else:
args = []
kwargs = {}
task = ""
expires = None
routing_key = None
exchange = None
hostname = None
date_submitted = None
eta = None
self.TaskModel._default_manager.store_result(
task_id, result, status,
traceback=traceback, children=self.current_task_children(request),
task=task, args=args, kwargs=kwargs, expires=expires,
routing_key=routing_key, exchange=exchange, hostname=hostname,
date_submitted=date_submitted, eta=eta)
return result
```
#### File: django-celery-fulldbresult/django_celery_fulldbresult/serialization.py
```python
from kombu.serialization import registry as k_registry
from kombu.exceptions import (EncodeError, DecodeError)
def dumps(data):
"""Serializes data using Kombu serializer.
"""
try:
# dumps will convert any strings into json-compatible strings.
content_type, encoding, data = k_registry.dumps(
data, serializer="json")
except EncodeError as e:
raise TypeError(e)
return data
def loads(data, content_type="application/json", encoding="utf-8"):
"""Deserializes data using Kombu deserializer, default format is JSON.
"""
try:
return k_registry.loads(data, content_type, encoding)
except DecodeError as e:
raise TypeError(e)
``` |
{
"source": "joncastro/contrail-ansible-deployer",
"score": 2
} |
#### File: playbooks/filter_plugins/ctrl_data_intf_dict.py
```python
from ansible.errors import AnsibleFilterError
import ipaddress
class FilterModule(object):
def filters(self):
return {
'ctrl_data_intf_dict': self.ctrl_data_intf_dict,
'mgmt_intf_dict': self.mgmt_intf_dict
}
@staticmethod
def get_host_ctrl_data_nw_if(my_ip, my_vars, cidr):
ctrl_data_nw = ipaddress.ip_network(cidr)
for iface in my_vars.get('ansible_interfaces',[]):
if_str = 'ansible_' + iface
if_ipv4 = my_vars.get(if_str, {}).get('ipv4', None)
if if_ipv4 and \
ipaddress.ip_address(if_ipv4['network']) == \
ctrl_data_nw.network_address:
return iface
return None
# Tries to detect control-data-intf for each host in the instances
# definition. The host specific 'network_interface' takes the highest
# precedence. Next, CONTROL_DATA_NET_LIST if defined (could be a list of
# comma separated CIDR subnet definitions) will be used to pick the
# interface corresponding to the IP address on each host that falls within
# this subnet range.
def ctrl_data_intf_dict(self, instances, contrail_config,
kolla_config, hostvars):
host_intf = {}
kolla_globals = kolla_config.get('kolla_globals', {})
for k,v in instances.iteritems():
tmp_intf = kolla_globals.get('network_interface', None)
if tmp_intf != None:
host_intf[v['ip']] = tmp_intf
subnet_list = None
subnet_list_str = contrail_config.get('CONTROL_DATA_NET_LIST', None)
if subnet_list_str:
subnet_list = subnet_list_str.split(',')
if subnet_list:
for subnet in subnet_list:
tmp_intf = FilterModule.get_host_ctrl_data_nw_if(v['ip'],
hostvars.get(v['ip'], {}), subnet)
if tmp_intf != None:
host_intf[v['ip']] = tmp_intf
break
for i,j in v.get('roles', {}).iteritems():
if j is not None:
tmp_intf = j.get('network_interface', None)
if tmp_intf != None:
host_intf[v['ip']] = tmp_intf
return host_intf
def mgmt_intf_dict(self, instances, contrail_config,
kolla_config, hostvars):
host_intf = {}
kolla_globals = kolla_config.get('kolla_globals', {})
for k,v in instances.iteritems():
for i in hostvars.get(v['ip'], {}).get('ansible_interfaces', []):
if_str = 'ansible_' + i
if_ipv4 = hostvars[v['ip']].get(if_str, {}).get('ipv4', None)
if if_ipv4 and if_ipv4.get('address', None) == v['ip']:
host_intf[v['ip']] = i
tmp_intf = kolla_globals.get('kolla_external_vip_interface', None)
if tmp_intf != None:
host_intf[v['ip']] = tmp_intf
for i,j in v.get('roles', {}).iteritems():
if j is not None:
tmp_intf = j.get('kolla_external_vip_interface', None)
if tmp_intf != None:
host_intf[v['ip']] = tmp_intf
return host_intf
``` |
{
"source": "joncatanio/cannoli",
"score": 4
} |
#### File: suite/test04/test.py
```python
class Test:
var = 2
def __init__(self, x, y):
self.x = x
self.y = y
def method(self):
print("method: self.x = " + str(self.x))
print("method: self.y = " + str(self.y))
print("Updating self.x")
self.x = "`method` updated my value"
def swapxy(self):
temp = self.x
self.x = self.y
self.y = temp
int_val = 5
obj1 = Test("x value", "y value")
obj2 = obj1
print("Updating obj1.var ...")
obj1.var = 4
print("obj1.var: " + str(obj1.var))
print("obj2.var: " + str(obj2.var))
print("Updating obj2.x ...")
print("PRE obj1.x: " + str(obj1.x))
print("PRE obj2.x: " + str(obj2.x))
obj2.x = "changed string"
print("POST obj1.x: " + str(obj1.x))
print("POST obj2.x: " + str(obj2.x))
print("Assign obj2.var to variable 'a'")
a = obj2.var
print("a: " + str(a))
print("Modify 'a' to show that obj2.var won't be effected")
a = 15
print("a: " + str(a))
print("obj2.var: " + str(obj2.var))
print("Calling obj1.method() ...")
obj1.method()
print("State of obj1 & obj2 after call")
print("obj1.x: " + str(obj1.x) + " obj1.y: " + str(obj1.y) + " obj1.var: " + str(obj1.var))
print("obj2.x: " + str(obj2.x) + " obj2.y: " + str(obj2.y) + " obj2.var: " + str(obj2.var))
print("Calling obj1.swapxy() ...")
obj1.swapxy()
print("obj1.x: " + str(obj1.x) + " obj1.y: " + str(obj1.y) + " obj1.var: " + str(obj1.var))
print("obj2.x: " + str(obj2.x) + " obj2.y: " + str(obj2.y) + " obj2.var: " + str(obj2.var))
```
#### File: suite/test07/test.py
```python
class Node:
def __init__(self, val, next):
self.val = val
self.next = next
lst = Node(1, Node(5, Node(8, Node(-1, None))))
temp = lst
while temp:
print(temp.val)
temp = temp.next
```
#### File: suite/test09/other_mod.py
```python
import some_mod
def functione(b):
a = some_mod.some_class()
print(b)
print("othermod calling in " + str(a.hello))
```
#### File: suite/test13/test.py
```python
def get_int(x):
return x
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
b = a[:]
c = 4
b.append(-1)
print(a)
print(b)
print(a[1:5])
print(a[1:12])
print(a[0:11])
print(a[1:24])
print(a[-5:10])
print(a[-1])
print(a[1:-2])
print(a[1:0])
print(a[1:])
print(a[:5])
print(a[c:])
print(a[:get_int(9)])
print(a[-111111:-222222222])
print(a[5:1])
print(a[5:1])
print(a[5:1])
print(a[:])
print(a[1:4])
print(a[-1:-4])
print(a[-1:-4])
print(a[-1:])
print(a[-1:-4])
print(a[-1:])
print(a[1:-5])
print(a[:-5])
print(a[10:-5])
print(a[:])
print(a[0:-2])
``` |
{
"source": "joncave/CLI",
"score": 2
} |
#### File: factioncli/commands/credentials.py
```python
from cliff.lister import Lister
from factioncli.processing.config import get_passwords
class Credentials(Lister):
"Returns a list of the default credentials for this instance of Faction"
def take_action(self, parsed_args):
passwords = get_passwords()
return ("Type", "Username", "Password"), passwords
```
#### File: processing/docker/container.py
```python
import logging
import docker
from docker.models.containers import Container
from factioncli.processing.cli import log
from factioncli.processing.cli.printing import error_out
client = docker.from_env()
class container_status:
name = ""
status = ""
ip_address = ""
message = ""
created = ""
def get_container(container_name):
log.debug("Searching for container named: {0}".format(container_name))
containers = client.containers.list()
for container in containers:
if container.attrs['Name'] == "/{0}".format(container_name):
return container
log.debug("Could not find container named: {0}".format(container_name))
return None
def get_container_ip_address(container_name, network_name='faction_default'):
log.debug("Getting IP for container named {0} on network {1}".format(container_name, network_name))
container = get_container(container_name)
if container:
return container.attrs["NetworkSettings"]["Networks"][network_name]['IPAddress']
else:
return None
def start_container(container):
log.debug("Stopping container: {0}".format(container.attrs["Name"]))
if isinstance(container, Container):
if container.status == 'running':
log.debug("Container {0} is not running. No need to stop it")
else:
container.start()
else:
error_out("{0} is not a container object".format(container))
def stop_container(container):
log.debug("Stopping container: {0}".format(container.attrs["Name"]))
if isinstance(container, Container):
if container.status == 'running':
container.stop()
else:
log.debug("Container {0} is not running. No need to stop it")
else:
error_out("{0} is not a container object".format(container))
def restart_container(container):
log.debug("Stopping container: {0}".format(container.attrs["Name"]))
if isinstance(container, Container):
if container.status == 'running':
container.restart()
else:
log.debug("Container {0} is not running. No need to stop it")
else:
error_out("{0} is not a container object".format(container))
def remove_container(container):
log.debug("Stopping container: {0}".format(container.attrs["Name"]))
if isinstance(container, Container):
if container.status == 'running':
container.stop()
else:
log.debug("Container {0} is not running. No need to stop it")
else:
error_out("{0} is not a container object".format(container))
def execute_container_command(container, command):
log.debug("Executing {0} against container: {1}".format(command, container.attrs["Name"]))
if isinstance(container, Container):
if container.status == 'running':
return container.exec_run(command)
else:
error_out("Container {0} is not running. Can not execute commands against it")
error_out("{0} is not a container object".format(container))
def get_container_status(container_name, network_name='faction_default'):
container = get_container(container_name)
if container:
status = container_status
container_name = container.attrs["Name"]
if container_name[0] == "/":
container_name = container_name[1:]
status.name = container_name
status.status = container.status
status.ip_address = container.attrs["NetworkSettings"]["Networks"][network_name]['IPAddress']
status.created = container.attrs["Created"]
return status
```
#### File: processing/setup/networking.py
```python
import fcntl # fcntl is unix only
import socket
import struct
def get_nics():
return socket.if_nameindex()
def get_hw_addr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', bytes(ifname[:15],'utf-8'))
)[20:24])
def get_ip_addresses():
ip_addresses = {}
res = get_nics()
for r in res:
ip_addresses[r[1]] = get_hw_addr(r[1])
return ip_addresses
``` |
{
"source": "joncave/Trac-Cookie-Revocation",
"score": 2
} |
#### File: Trac-Cookie-Revocation/cookierevocation/revoke.py
```python
from pkg_resources import resource_filename
from trac.core import *
from trac.admin import IAdminPanelProvider
from trac.web.chrome import ITemplateProvider, add_notice, add_warning
from trac.util.text import exception_to_unicode
class CookieRevocation(Component):
implements(IAdminPanelProvider, ITemplateProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
"""Add the CookieRevocation admin panel"""
if 'TRAC_ADMIN' in req.perm:
yield ('general', 'General', 'revoke-cookie', 'Revoke A Cookie')
def render_admin_panel(self, req, cat, page, version):
"""Render the CookieRevocation admin panel and handle POST requests to
delete a user's trac_auth cookie from the auth_cookie table.
"""
req.perm.require('TRAC_ADMIN')
if req.method == 'POST':
user = req.args.get('user')
if user:
@self.env.with_transaction()
def revoke_auth_cookie(db):
cursor = db.cursor()
cursor.execute("DELETE FROM auth_cookie WHERE name = %s", (user,))
self.log.info('Cookie revoked for user %s', user)
add_notice(req, 'Cookie revoked!')
else:
add_warning(req, 'You did not provide a username')
return 'revoke.html', {}
# ITemplateProvider methods
def get_htdocs_dirs(self):
"""Return the absolute path of a directory containing additional
static resources (such as images, style sheets, etc).
"""
return []
def get_templates_dirs(self):
"""Return the absolute path of the directory containing the provided
Genshi templates.
"""
return [resource_filename(__name__, 'templates')]
``` |
{
"source": "joncdavid/pymol-extras",
"score": 4
} |
#### File: pymol-extras/3point-planar-alignment/3point_planar_align.py
```python
import numpy as np
import sys
USAGE_STR = """
ERROR: Not enough input arguments.
USAGE:
python3 3point_planar_align.py <fp_name> <fm_name> <of_name>
where,
<fp_name> is the csv of three 3d points to align with XZ-plane, y=0;
<fm_name> is the csv of all atom 3d points;
<of_name> is the output csv of all the rotated 3d points;
"""
def read(fp_name, fm_name):
"""Reads fp_name, csv file of three 3d points; and fm_name, csv file of all atom 3d points. Returns (P,M), where P is a matrix of three 3d points, and M is a matrix of all atom 3d points."""
P = np.loadtxt(fp_name, delimiter=',')
M = np.loadtxt(fm_name, delimiter=',')
return (P,M)
def rotate_all_points(T,X):
"""Rotates all points in X using transformation matrix T."""
#return T.dot(X)
#return X.dot(T)
return T.dot(X.transpose())
def find_angle_between_vectors(v,w):
"""Finds the angle between vectors v,w."""
v_magnitude = np.linalg.norm(v)
w_magnitude = np.linalg.norm(w)
print("--------------------")
print("(DEBUG) v: {}, type(w): {}, shape: {}".format(v, type(v), v.shape))
print("(DEBUG) w: {}, type(w): {}, shape: {}".format(w, type(w), w.shape))
print("(DEBUG) v.dot(w): {}, type: {}".format(v.dot(w), type(v.dot(w))))
print("(DEBUG) v_magnitude: {}, type: {}".format(v_magnitude, type(v_magnitude)))
print("(DEBUG) w_magnitude: {}, type: {}".format(w_magnitude, type(w_magnitude)))
theta = np.arccos( v.dot(w) / (v_magnitude*w_magnitude) )
print("theta = {}".format( theta ))
return theta
def align_vec1_to_XY_plane(v):
"""Align v to the XY-plane, z=0 by rotating about the y-axis"""
v_proj_onto_XY_plane = np.array([v[0],v[1],0]) ## proj_XZ=Y(v), where z=0
x_std_vec = np.array([1,0,0])
#theta = find_angle_between_vectors(v,x_std_vec)
theta = find_angle_between_vectors(v_proj_onto_XY_plane, x_std_vec)
Ry = np.array([[np.cos(theta), 0, np.sin(theta)],
[0,1,0],
[-1*np.sin(theta), 0, np.cos(theta)]])
return Ry
def align_vec1_to_x_axis(v):
"""Aligns v to the X-axis."""
x_std_vec = np.array([1,0,0])
theta = find_angle_between_vectors(v, x_std_vec)
Rz = np.array([[np.cos(theta), -1*np.sin(theta),0],
[np.sin(theta), np.cos(theta),0],
[0,0,1]])
return Rz
def align_vec2_to_YZ_plane(v):
"""Aligns v to the YZ-plane, y=0."""
#z_std_vec = np.array([0,0,1])
y_std_vec = np.array([0,1,0])
v_proj_onto_YZ_plane = np.array([0,v[1],v[2]]) ## proj_YZ(v), where x=0
#theta = find_angle_between_vectors(v_proj_onto_YZ_plane, z_std_vec)
theta = find_angle_between_vectors(v_proj_onto_YZ_plane, y_std_vec)
#theta = find_angle_between_vectors(v, z_std_vec)
#theta = -1.0*theta
Rx = np.array([[1,0,0],
[0,np.cos(theta),-1*np.sin(theta)],
[0,np.sin(theta),np.cos(theta)]])
return Rx
##===== main ====
if len(sys.argv) < 4:
print(USAGE_STR)
exit()
fp_name = sys.argv[1]
fm_name = sys.argv[2]
of_name = sys.argv[3]
(P,M) = read(fp_name, fm_name)
p1 = P[0,:]
p2 = P[1,:]
p3 = P[2,:]
p1p2 = p2 - p1 #p1p2 and p1p3 lie on the same plane
p1p3 = p3 - p1
p1p2_orig = p1p2 - p1 #p1p2_orig, p1p3_orig have their base at origin
p1p3_orig = p1p3 - p1
print("p1p2: {}".format(p1p2))
print("p1p3: {}".format(p1p3))
print("p1p2_orig: {}".format(p1p2_orig))
print("p1p3_orig: {}".format(p1p3_orig))
##--- find series of transformations
T1 = align_vec1_to_XY_plane(p1p2_orig) # rotate p1p2_orig about y-axis
T2 = align_vec1_to_x_axis( T1.dot(p1p2_orig) ) # now rotate about z-axis
T3 = align_vec2_to_YZ_plane( T2.dot(T1.dot(p1p3_orig)) ) # rotate p1p3_orig about x-axis
T = T3.dot(T2.dot(T1))
print("---- begin validation of transformations ----")
print("p1p2 as aligned to XY-plane: {}".format(T1.dot(p1p2_orig)))
print("p1p2 as aligned to X-plane: {}".format(T2.dot(T1.dot(p1p2_orig))))
print("p1p3 as aligned to XZ-plane: {}".format(T.dot(p1p3_orig)))
##--- rotate all points using those transformations
numPoints = max(M.shape)
X_orig = M - p1*np.ones(numPoints) # move points towards origin
X_orig_rot = rotate_all_points(T,X_orig) # rotate
X_rot = X_orig_rot + p1*np.ones(numPoints) # move away from origin
print("\n---- begin debug points -----")
print("\n---- M ----")
print("{}".format(M))
print("\n---- M moved towards origin ----")
print("{}".format(X_orig))
print("\n---- M rotated ----")
print("{}".format(X_orig_rot))
print("\n---- M rotated, moved away from origin ----")
print("{}".format(X_rot))
np.savetxt(of_name, X_rot, delimiter=',')
``` |
{
"source": "jon-chai/ga-drone-pathing",
"score": 3
} |
#### File: jon-chai/ga-drone-pathing/Drone_GA.pyde
```python
import random as rand
class Vehicle:
mr = 0.2
# Food weight, Poison weight, Food percep, Poison percep, FOV, Drone weight, Drone percep
feature_creation = [lambda x: random(0, 2), lambda x:random(-2, 0), lambda x:random(20, 100),
lambda x:random(20, 100), lambda x:random(PI/6, PI/2), lambda x:random(-2, 0),
lambda x:random(20, 100)]
def __init__(self, x=None, y=None, dna=None):
if x is None:
x = random(width)
if y is None:
y = random(height)
self.position = PVector(x, y)
self.velocity = PVector(0, -2)
self.acceleration = PVector(0, 0)
self.r = 4
self.maxspeed = 5
self.maxforce = 0.5
self.points = 0
self.alive = True
if dna is None:
self.dna = [feature(0) for feature in self.feature_creation]
else:
self.dna = dna[:]
def update(self):
# self.points += 0.01
self.velocity.add(self.acceleration)
self.velocity.limit(self.maxspeed)
self.position.add(self.velocity)
self.acceleration.mult(0)
def applyForce(self, force):
self.acceleration.add(force)
def seek(self, target):
desired = PVector.sub(target, self.position)
desired.setMag(self.maxspeed)
steer = PVector.sub(desired, self.velocity)
steer.limit(self.maxforce)
return steer
def behaviors(self, good, bad, drones, idx):
steerG = self.eat(good, 1, self.dna[2])
steerB = self.eat(bad, -1, self.dna[3])
steerD = self.crash(drones, idx, self.dna[6])
steerG.mult(self.dna[0])
steerB.mult(self.dna[1])
steerD.mult(self.dna[5])
self.applyForce(steerG)
self.applyForce(steerB)
self.applyForce(steerD)
# def clone(self):
# if random(1) < 0.005:
# return Vehicle(self.position.x, self.position.y, self.dna)
def mutate(self):
for idx, gene in enumerate(self.dna):
if random(1) < self.mr:
self.dna[idx] = self.feature_creation[idx](0)
def eat(self, things, nutrition, perception):
record = width+height
closest = None
theta = self.velocity.heading2D();
a = theta - self.dna[4]/2
b = theta + self.dna[4]/2
for _, item in enumerate(things):
d = self.position.dist(item)
if d < 3:
if nutrition < 0:
self.alive = False
else:
things.remove(item)
self.points += nutrition
elif d < record and d < perception:
if nutrition < 0:
dir = PVector.sub(item, self.position)
if a < atan(dir.y/(dir.x + 0.0001)) < b:
record = d
closest = item
else:
record = d
closest = item
if closest is not None:
return self.seek(closest)
return PVector(0, 0)
def crash(self, things, idx, perception):
record = width + height
closest = None
for thing in things[idx+1:]:
d = self.position.dist(thing.position)
if d < 5:
self.alive = False
thing.alive = False
elif d < record and d < perception:
record = d
closest = thing
if closest is not None:
return self.seek(closest.position)
return PVector(0, 0)
# def dead(self):
# return self.health <= 0
def boundaries(self, d):
desired = None
if self.position.x < d:
desired = PVector(self.maxspeed, self.velocity.y)
elif self.position.x > width - d:
desired = PVector(-self.maxspeed, self.velocity.y)
if self.position.y < d:
desired = PVector(self.velocity.x, self.maxspeed)
elif self.position.y > height - d:
desired = PVector(self.velocity.x, -self.maxspeed)
if desired is not None:
desired.normalize()
desired.mult(self.maxspeed)
steer = PVector.sub(desired, self.velocity)
steer.limit(self.maxforce)
self.applyForce(steer)
def display(self):
theta = self.velocity.heading2D() + PI/2;
pushMatrix();
translate(self.position.x, self.position.y);
rotate(theta);
if debug:
noFill()
strokeWeight(3)
stroke(0, 255, 0)
# Green ring
line(0, 0, 0, -self.dna[0] * 20)
strokeWeight(2)
circle(0, 0, self.dna[2] * 2)
stroke(255, 0, 0)
# Red cone
line(0, 0, 0, -self.dna[1] * 20)
fill(255,0,0,75)
noStroke()
arc(0, 0, self.dna[3] * 2, self.dna[3] * 2, -self.dna[4]/2 - PI/2, self.dna[4]/2 - PI/2)
noFill()
stroke(0, 100, 200)
strokeWeight(3)
circle(0, 0, self.dna[6] * 2)
line(0, 0, 0, -self.dna[5] * 20)
# col = lerpColor(color(0, 255, 0), color(255, 0, 0), self.points)
col = color(255, 255, 255)
fill(col);
stroke(col);
strokeWeight(1);
beginShape();
vertex(0, -self.r*2);
vertex(-self.r, self.r*2);
vertex(self.r, self.r*2);
endShape(CLOSE);
popMatrix();
def crossover_mid(mating_pool):
global num_vehicles
children = []
crossover_point = int(len(mating_pool[0].dna) / 2)
for i in range(num_vehicles - len(mating_pool)):
parents = rand.sample(mating_pool, 2)
par0 = parents[0].dna[:crossover_point]
par1 = parents[1].dna[crossover_point:]
child_genome = par0 + par1
children.append(Vehicle(dna=child_genome))
children[-1].mutate()
for parent in mating_pool:
children.append(Vehicle(random(width), random(height), parent.dna[:]))
return children
def create_board(num_food, num_poison, bound):
food = [PVector(random(bound, width-bound), random(bound, height-bound)) for i in range(num_food)]
poison = [PVector(random(bound, width-bound), random(bound, height-bound)) for i in range(num_poison)]
# for y in range(200, 400, 5):
# poison.append(PVector(200, y))
return food, poison
def create_wall(x1, y1, x2, y2):
global poison
xs = []
for i in range(x1, x2, 5):
xs.append(i)
ys = []
# for i in range(
def setup():
global vehicles, food, poison, debug, gen, counter, num_vehicles, bound, best_drones
size(1200, 800)
num_vehicles = 20
bound = 25
vehicles = [Vehicle(random(width), random(height)) for i in range(num_vehicles)]
food, poison = create_board(120, 20, bound)
debug = True
gen = 0
counter = 0
best_drones = []
# with open('results.txt', 'w') as f:
# f.write('')
def mouseClicked():
global debug
debug = not debug
# def mouseDragged():
# poison.append(PVector(mouseX, mouseY))
def keyPressed():
if key == 'q':
saveStrings('results.txt', best_drones)
exit()
def asort(seq):
return sorted(range(len(seq)), key=seq.__getitem__)
def draw():
global gen, counter, vehicles, food, poison, bound, best_drones
counter += 1
if counter > 400 or len(vehicles) == 0 or len(food) == 0:
gen += 1
print('Generation: {}'.format(gen))
counter = 0
sorted_vehicles = sorted(vehicles, key=lambda x: x.points, reverse=True)
while len(sorted_vehicles) < 8:
sorted_vehicles.append(Vehicle())
best_vehicles = sorted_vehicles[:8]
print([v.points for v in best_vehicles])
best_drones.append('{};{}'.format(best_vehicles[0].dna, best_vehicles[0].points))
vehicles = crossover_mid(best_vehicles)
food, poison = create_board(120 + 10 * gen, 20 + 5 * gen, bound)
background(51)
# if random(1) < 0.1:
# food.append(PVector(random(bound, width-bound), random(bound, height-bound))
# if random(1) < 0.02:
# poison.append(PVector(random(width), random(height)))
for item in food:
fill(0, 255, 0)
noStroke()
ellipse(item.x, item.y, 8, 8)
for item in poison:
fill(255, 0, 0)
noStroke()
ellipse(item.x, item.y, 8, 8)
for idx, v in enumerate(vehicles):
v.boundaries(bound)
v.behaviors(food, poison, vehicles, idx)
v.update()
v.display()
for _, v in enumerate(vehicles):
if not v.alive:
vehicles.remove(v)
``` |
{
"source": "jonchang/recordpeeper",
"score": 3
} |
#### File: recordpeeker/bin/mitmdump_input.py
```python
import socket
import time
import requests
from recordpeeker import json_decode
from recordpeeker.dispatcher import Dispatcher
def enter_dungeon(data, flow):
global args
start_time = time.time()
dungeon_request = flow.request.content
headers = flow.request.headers
enter_url = flow.request.url
leave_url = enter_url.replace("enter_dungeon", "leave_dungeon")
name = data.get("enemy", dict(name="???", memory_factor="0")).get("name")
resp = None
while args.find not in name:
if time.time() - start_time > 28: ## times out after 30 seconds
print "Took too long! Entering the dungeon so you don't get kicked out."
return
print "Opponent is {0}, retrying...".format(name)
resp = requests.post(leave_url, headers=headers, data=dungeon_request)
if resp.status_code != requests.codes.ok: resp.raise_for_status()
resp = requests.post(enter_url, headers=headers, data=dungeon_request)
if resp.status_code != requests.codes.ok: resp.raise_for_status()
data = json_decode(resp.content)
name = data.get("enemy", dict(name="???", memory_factor="0")).get("name")
print "Found {0}! Entering the dungeon now...".format(name)
if resp is not None:
flow.response.content = resp.content
def start(context, argv):
global args
from recordpeeker.bin.command_line import parse_args
args = parse_args(argv)
ips = set([ii[4][0] for ii in socket.getaddrinfo(socket.gethostname(), None) if ii[4][0] != "127.0.0.1"])
print "Configure your phone's proxy to point to this computer, then visit mitm.it"
print "on your phone to install the interception certificate.\n"
print "Record Peeker is listening on port {0}, on these addresses:".format(args.port)
print "\n".join([" * {0}".format(ip) for ip in ips])
print ""
print "forever24 is looking for '{0}' (case-sensitive).".format(args.find)
print "Waiting for you to enter the Magitek Facility..."
global dp
dp = Dispatcher('ffrk.denagames.com')
dp.register('/dff/event/coliseum/6/enter_dungeon', enter_dungeon, flow=True)
[dp.ignore(path, regex) for path, regex in ignored_requests]
ignored_requests = [
('/dff/', True),
('/dff/splash', False),
('/dff/?timestamp', False),
('/dff/battle/?timestamp', False),
]
def response(context, flow):
global args
global dp
dp.handle(flow, args)
```
#### File: recordpeeper/recordpeeker/mitmdump_input.py
```python
import json
import shlex
import os
import socket
import heapq
from collections import OrderedDict, defaultdict
from libmproxy.protocol.http import decoded
from tabulate import tabulate
from recordpeeker import Equipment, ITEMS, BATTLES, DUNGEONS, slicedict, best_equipment
from recordpeeker.dispatcher import Dispatcher
def get_display_name(enemy):
for child in enemy["children"]:
for param in child["params"]:
return param.get("disp_name", "Unknown Enemy")
def get_drops(enemy):
for child in enemy["children"]:
for drop in child["drop_item_list"]:
yield drop
def handle_get_battle_init_data(data):
battle_data = data["battle"]
battle_id = battle_data["battle_id"]
battle_name = BATTLES.get(battle_id, "battle #" + battle_id)
print "Entering {0}".format(battle_name)
all_rounds_data = battle_data['rounds']
tbl = [["rnd", "enemy", "drop"]]
for round_data in all_rounds_data:
round = round_data.get("round", "???")
for round_drop in round_data["drop_item_list"]:
item_type = int(round_drop.get("type", 0))
if item_type == 21:
itemname = "potion"
elif item_type == 22:
itemname = "hi-potion"
elif item_type == 23:
itemname = "x-potion"
elif item_type == 31:
itemname = "ether"
elif item_type == 32:
itemname = "turbo ether"
else:
itemname = "unknown"
tbl.append([round, "<round drop>", itemname])
for enemy in round_data["enemy"]:
had_drop = False
enemyname = get_display_name(enemy)
for drop in get_drops(enemy):
item_type = drop.get("type", 0)
if item_type == 11:
itemname = "{0} gil".format(drop.get("amount", 0))
elif item_type == 41 or item_type == 51:
type_name = "orb id#" if item_type == 51 else "equipment id#"
item = ITEMS.get(drop["item_id"], type_name + drop["item_id"])
itemname = "{0}* {1}".format(drop.get("rarity", 1), item)
elif item_type == 61:
itemname = "event item"
else:
itemname = "unknown"
had_drop = True
tbl.append([round, enemyname, itemname])
if not had_drop:
tbl.append([round, enemyname, "nothing"])
print tabulate(tbl, headers="firstrow")
print ""
def handle_party_list(data):
wanted = "name series_id acc atk def eva matk mdef mnd series_acc series_atk series_def series_eva series_matk series_mdef series_mnd"
topn = OrderedDict()
topn["atk"] = 5
topn["matk"] = 2
topn["mnd"] = 2
topn["def"] = 5
find_series = [101001, 102001, 103001, 104001, 105001, 106001, 107001, 108001, 110001, 113001]
equips = defaultdict(list)
for item in data["equipments"]:
kind = item.get("equipment_type", 1)
heapq.heappush(equips[kind], Equipment(slicedict(item, wanted)))
for series in find_series:
print "Best equipment for FF{0}:".format((series - 100001) / 1000)
# Need to use lists for column ordering
tbl = ["stat n weapon stat n armor stat n accessory".split()]
tbldata = [[],[],[],[]]
for itemtype in range(1, 4): ## 1, 2, 3
for stat, count in topn.iteritems():
for equip in best_equipment(series, equips[itemtype], stat, count):
name = equip["name"].replace(u"\uff0b", "+")
tbldata[itemtype].append([stat, equip[stat], name])
# Transpose data
for idx in range(0, len(tbldata[1])):
tbl.append(tbldata[1][idx] + tbldata[2][idx] + tbldata[3][idx])
print tabulate(tbl, headers="firstrow")
print ""
def handle_dungeon_list(data):
tbl = []
world_data = data["world"]
world_id = world_data["id"]
world_name = world_data["name"]
print "Dungeon List for {0} (id={1})".format(world_name, world_id)
dungeons = data["dungeons"]
for dungeon in dungeons:
name = dungeon["name"]
id = dungeon["id"]
difficulty = dungeon["challenge_level"]
type = "ELITE" if dungeon["type"] == 2 else "NORMAL"
tbl.append([name, id, difficulty, type])
tbl = sorted(tbl, key=lambda row : int(row[1]))
tbl.insert(0, ["Name", "ID", "Difficulty", "Type"])
print tabulate(tbl, headers="firstrow")
def handle_battle_list(data):
tbl = [["Name", "Id", "Rounds"]]
dungeon_data = data["dungeon_session"]
dungeon_id = dungeon_data["dungeon_id"]
dungeon_name = dungeon_data["name"]
dungeon_type = int(dungeon_data["type"])
world_id = dungeon_data["world_id"]
print "Entering dungeon {0} ({1})".format(dungeon_name, "Elite" if dungeon_type==2 else "Normal")
battles = data["battles"]
for battle in battles:
tbl.append([battle["name"], battle["id"], battle["round_num"]])
print tabulate(tbl, headers="firstrow")
def handle_survival_event(data):
# XXX: This maybe works for all survival events...
enemy = data.get("enemy", dict(name="???", memory_factor="0"))
name = enemy.get("name", "???")
factor = float(enemy.get("memory_factor", "0"))
print "Your next opponent is {0} (x{1:.1f})".format(name, factor)
def start(context, argv):
global args
from recordpeeker.command_line import parse_args
args = parse_args(argv)
ips = set([ii[4][0] for ii in socket.getaddrinfo(socket.gethostname(), None) if ii[4][0] != "127.0.0.1"])
print "Configure your phone's proxy to point to this computer, then visit mitm.it"
print "on your phone to install the interception certificate.\n"
print "Record Peeker is listening on port {0}, on these addresses:".format(args.port)
print "\n".join([" * {0}".format(ip) for ip in ips])
print ""
print "Try entering the Party screen, or starting a battle."
global dp
dp = Dispatcher('ffrk.denagames.com')
[dp.register(path, function) for path, function in handlers]
[dp.ignore(path, regex) for path, regex in ignored_requests]
handlers = [
('get_battle_init_data' , handle_get_battle_init_data),
('/dff/party/list', handle_party_list),
('/dff/world/dungeons', handle_dungeon_list),
('/dff/world/battles', handle_battle_list),
('/dff/event/coliseum/6/get_data', handle_survival_event)
]
ignored_requests = [
('/dff/', True),
('/dff/splash', False),
('/dff/?timestamp', False),
('/dff/battle/?timestamp', False),
]
def response(context, flow):
global args
global dp
dp.handle(flow, args)
``` |
{
"source": "jonchang/tact",
"score": 2
} |
#### File: tact/tact/fastmrca.py
```python
from __future__ import division
from .tree_util import get_tip_labels
global tree
def initialize(phy):
"""
Initialize the fastmrca singleton with a tree.
"""
global tree
tree = phy
def bitmask(labels):
"""
Gets a bitmask for the taxa in `labels`, potentially in parallel.
"""
global tree
tn = tree.taxon_namespace
return tn.taxa_bitmask(labels=labels)
def get(labels):
"""Pulls a MRCA node out for the taxa in `labels`."""
global tree
labels = set(labels)
mrca = tree.mrca(leafset_bitmask=bitmask(labels))
if mrca and labels.issuperset(get_tip_labels(mrca)):
return mrca
return None
def fastmrca_getter(tn, x):
"""Helper function for submitting stuff."""
taxa = tn.get_taxa(labels=x)
mask = 0
for taxon in taxa:
mask |= tn.taxon_bitmask(taxon)
return mask
```
#### File: tact/tact/lib.py
```python
from __future__ import division
import random
import sys
from decimal import Decimal as D
from math import exp
from math import log
import numpy as np
from scipy.optimize import minimize, minimize_scalar, dual_annealing
# Raise on overflow
np.seterr(all="raise")
def get_bd(r, a):
"""
Converts turnover and relative extinction to birth and death rates.
Args:
r (float): turnover or net diversification (birth - death)
a (float): relative extinction (death / birth)
Returns:
(float, float): birth, death
"""
return -r / (a - 1), -a * r / (a - 1)
def get_ra(b, d):
"""
Converts birth and death to turnover and relative extinction rates.
Args:
b (float): birth rate
d (float): extinction rate
Returns:
(float, float): turnover, relative extinction
"""
return (b - d, d / b)
def wrapped_lik_constant(x, sampling, ages):
"""
Wrapper for birth-death likelihood to make optimizing more convenient.
Args:
x (float, float): turnover, relative extinction
sampling (float): sampling fraction (0, 1]
ages (list): vector of node ages
Returns:
float: a likelihood
"""
return lik_constant(get_bd(*x), sampling, ages)
def wrapped_lik_constant_yule(x, sampling, ages):
"""
Wrapper for Yule likelihood to make optimizing more convenient.
Args:
x (float): birth rate
sampling (float): sampling fraction (0, 1]
ages (list): vector of node ages
Returns:
float: a likelihood
"""
return lik_constant((x, 0.0), sampling, ages)
def two_step_optim(func, x0, bounds, args):
"""
Conduct a two-step function optimization, first by using the fast L-BFGS-B method,
and if that fails, use simulated annealing.
Args:
func (callable): function to optimize
x0 (tuple): initial conditions
bounds (tuple): boundary conditions
args (lsit): additional argumnets to pass to `func`
Returns:
tuple: optimized parameter values
"""
try:
result = minimize(func, x0=x0, bounds=bounds, args=args, method="L-BFGS-B")
if result["success"]:
return result["x"].tolist()
except FloatingPointError:
pass
result = dual_annealing(func, x0=x0, bounds=bounds, args=args)
if result["success"]:
return result["x"].tolist()
raise Exception(f"Optimization failed: {result['message']} (code {result['status']})")
def optim_bd(ages, sampling, min_bound=1e-9):
"""
Optimizes birth and death parameters given a vector of splitting times and sampling fraction.
Args:
ages (list): vector of node ages
sampling (float): sampling fraction (0, 1]
min_bound (float): minimum birth rate
Returns:
float, float: birth and death rates
"""
if max(ages) < 0.000001:
init_r = 1e-3
else:
# Magallon-Sanderson crown estimator
init_r = (log((len(ages) + 1) / sampling) - log(2)) / max(ages)
init_r = max(1e-3, init_r)
bounds = ((min_bound, 100), (0, 1 - min_bound))
result = two_step_optim(wrapped_lik_constant, x0=(init_r, min_bound), bounds=bounds, args=(sampling, ages))
return get_bd(*result)
def optim_yule(ages, sampling, min_bound=1e-9):
"""
Optimizes birth parameter under a Yule model, given a vector of splitting times and sampling fraction.
Args:
ages (list): vector of node ages
sampling (float): sampling fraction (0, 1]
min_bound (float): minimum birth rate
Returns:
float, float: birth and death rates (where death is always 0)
"""
bounds = (min_bound, 100)
result = minimize_scalar(wrapped_lik_constant_yule, bounds=bounds, args=(sampling, ages), method="Bounded")
if result["success"]:
return (result["x"], 0.0)
raise Exception(f"Optimization failed: {result['message']} (code {result['status']})")
def p0_exact(t, l, m, rho): # noqa: E741
"Exact version of `p0` using Decimal math."
t = D(t)
l = D(l) # noqa: E741
m = D(m)
rho = D(rho)
return D(1) - rho * (l - m) / (rho * l + (l * (D(1) - rho) - m) * (-(l - m) * t).exp())
def p0(t, l, m, rho): # noqa: E741
try:
return 1 - rho * (l - m) / (rho * l + (l * (1 - rho) - m) * exp(-(l - m) * t))
except FloatingPointError:
return float(p0_exact(t, l, m, rho))
def p1_exact(t, l, m, rho): # noqa: E741
"""Exact version of `p1` using Decimal math."""
t = D(t)
l = D(l) # noqa: E741
m = D(m)
rho = D(rho)
num = rho * (l - m) ** D(2) * (-(l - m) * t).exp()
denom = (rho * l + (l * (1 - rho) - m) * (-(l - m) * t).exp()) ** D(2)
return num / denom
def p1_orig(t, l, m, rho): # noqa: E741
"""Original version of `p1`, here for testing and comparison purposes."""
try:
num = rho * (l - m) ** 2 * np.exp(-(l - m) * t)
denom = (rho * l + (l * (1 - rho) - m) * np.exp(-(l - m) * t)) ** 2
res = num / denom
except (OverflowError, FloatingPointError):
res = float(p1_exact(t, l, m, rho))
if res == 0.0:
return sys.float_info.min
return res
def p1(t, l, m, rho): # noqa: E741
"""
Optimized version of `p1_orig` using common subexpression elimination and strength reduction
from exponentiation to multiplication.
"""
try:
ert = np.exp(-(l - m) * t, dtype=np.float64)
num = rho * (l - m) ** 2 * ert
denom = (rho * l + (l * (1 - rho) - m) * ert) ** 2
res = num / denom
except (OverflowError, FloatingPointError):
res = float(p1_exact(t, l, m, rho))
if res == 0.0:
return sys.float_info.min
return res
def intp1_exact(t, l, m): # noqa: E741
"""Exact version of `intp1` using Decimal math."""
l = D(l) # noqa: E741
m = D(m)
t = D(t)
num = D(1) - (-(l - m) * t).exp()
denom = l - m * (-(l - m) * t).exp()
return num / denom
def intp1(t, l, m): # noqa: E741
try:
return (1 - exp(-(l - m) * t)) / (l - m * exp(-(l - m) * t))
except OverflowError:
return float(intp1_exact(t, l, m))
def lik_constant(vec, rho, t, root=1, survival=1, p1=p1):
"""
Calculates the likelihood of a constant-rate birth-death process, conditioned
on the waiting times of a phylogenetic tree and degree of incomplete sampling.
Based off of the R function `TreePar::LikConstant` written by <NAME>.
<NAME>. On incomplete sampling under birth-death models and connections
to the sampling-based coalescent. Jour. Theo. Biol. 261: 58-66, 2009.
Args:
vec (float, float): two element tuple of birth and death
rho (float): sampling fraction
t (list): vector of waiting times
root (bool): include the root or not? (default: 1)
survival (bool): assume survival of the process? (default: 1)
Returns:
float: a likelihood
"""
l = vec[0] # noqa: E741
m = vec[1]
t.sort(reverse=True)
lik = (root + 1) * log(p1(t[0], l, m, rho))
for tt in t[1:]:
lik += log(l) + log(p1(tt, l, m, rho))
if survival == 1:
lik -= (root + 1) * log(1 - p0(t[0], l, m, rho))
return -lik
def crown_capture_probability(n, k):
"""
Calculate the probability that a sample of `k` taxa from a clade
of `n` total taxa includes a root node, under a Yule process.
This equation is taken from:
<NAME>. 1996. How many taxa must be sampled to identify
the root node of a large clade? Systematic Biology 45:168-173
Args:
n (int): total number of taxa
k (int): sampled taxa
Returns:
float: probability
"""
if n < k:
raise Exception(f"n must be greater than or equal to k (n={n}, k={k})")
if n == 1 and k == 1:
return 0 # not technically correct but it works for our purposes
return 1 - 2 * (n - k) / ((n - 1) * (k + 1))
# TODO: This could probably be optimized
def get_new_times(ages, birth, death, missing, told=None, tyoung=None):
"""
Simulates new speciation events in an incomplete phylogeny assuming a
constant-rate birth-death process.
Adapted from the R function `TreeSim::corsim` written by <NAME>.
<NAME>, <NAME>, <NAME>. A new method for handling missing
species in diversification analysis applicable to randomly or
non-randomly sampled phylogenies. Syst. Biol., 61(5): 785-792, 2012.
Args:
ages (list): vector of waiting times
birth (float): birth rate
death (float): death rate
missing (int): number of missing taxa to simulate
told (float): maximum simulated age (default: `max(ages)`)
tyoung (float): minimum simulated age bound (default: `0`)
Returns:
list: vector of simulated waiting times.
"""
if told is None:
told = max(ages)
if len(ages) > 0:
if max(ages) > told and abs(max(ages) - told) > sys.float_info.epsilon:
raise Exception("Zero or negative branch lengths detected in backbone phylogeny")
if tyoung is None:
tyoung = 0
ages.sort(reverse=True)
times = [x for x in ages if told >= x >= tyoung]
times = [told] + times + [tyoung]
ranks = range(0, len(times))
only_new = []
while missing > 0:
if len(ranks) > 2:
distrranks = []
for i in range(1, len(ranks)):
temp = ranks[i] * (intp1(times[i - 1], birth, death) - intp1(times[i], birth, death))
distrranks.append(temp)
try:
dsum = sum(distrranks)
distrranks = [x / dsum for x in distrranks]
for i in range(1, len(distrranks)):
distrranks[i] = distrranks[i] + distrranks[i - 1]
r = random.uniform(0, 1)
addrank = min([idx for idx, x in enumerate(distrranks) if x > r])
except ZeroDivisionError:
addrank = 0
except ValueError:
addrank = 0
else:
addrank = 0
r = random.uniform(0, 1)
const = intp1(times[addrank], birth, death) - intp1(times[addrank + 1], birth, death)
try:
temp = intp1(times[addrank + 1], birth, death) / const
except ZeroDivisionError:
temp = 0.0
xnew = 1 / (death - birth) * log((1 - (r + temp) * const * birth) / (1 - (r + temp) * const * death))
only_new.append(xnew)
missing -= 1
only_new.sort(reverse=True)
return only_new
```
#### File: tact/tests/conftest.py
```python
from __future__ import division
import os
import pytest
@pytest.fixture
def datadir():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
@pytest.fixture
def ages():
return [
20.934955,
17.506532,
16.64467,
15.380987,
14.547092000000001,
13.664577999999999,
13.289480000000001,
11.667099,
9.799231,
9.510413,
9.029556000000001,
8.806255,
8.770727,
8.480102,
6.984475999999999,
6.706684,
2.11319,
0.545689,
0.147482,
]
@pytest.fixture
def sampling():
return 0.869565217391
@pytest.fixture
def birth():
return 0.09115578881894915
@pytest.fixture
def death():
return 0.0
``` |
{
"source": "jon-choi/hillsbarber",
"score": 3
} |
#### File: jon-choi/hillsbarber/main.py
```python
from flask import Flask, render_template
app = Flask(__name__)
app.config['DEBUG'] = True
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def hello(name=None):
"""Return a friendly HTTP greeting."""
return render_template('template.html', name=name, text="Jinja Flask")
# return render_template('bootstrap_cover.html', name=name)
# @app.route('/rates')
# def helloRates(name='rates'):
# return render_template('template.html',name=name)
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
``` |
{
"source": "jonchui/MyLife",
"score": 3
} |
#### File: MyLife/handlers/calendar.py
```python
import webapp2
from templates import get_template
class CalendarHandler(webapp2.RequestHandler):
def get(self):
data = { "page" : "write"}
self.response.write(get_template('calendar.html').render(data))
```
#### File: MyLife/handlers/dataupgrade.py
```python
import webapp2, logging
from models.postcounter import PostCounter
from models.post import Post
from google.appengine.api import users
class DataUpgradeHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
logging.info('LOGGED IN USER IS: %s' % user)
```
#### File: MyLife/handlers/edit.py
```python
import webapp2, datetime, logging, json, filestore
from templates import get_template
from models.post import Post
from models.postcounter import PostCounter
from models.userimage import UserImage
from models.rawmail import RawMail
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import app_identity
class GetPhotoUploadUrlHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = "application/json"
self.response.write(json.dumps({"upload_url" : filestore.create_upload_url('/api/addphoto')}))
class AddPhotoHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
file_info = self.get_file_infos()[0]
self.response.headers['Content-Type'] = "application/json"
year = self.request.get('year')
month = self.request.get('month')
day = self.request.get('day')
date = datetime.datetime(int(year), int(month), int(day))
if file_info.content_type.lower() not in ('image/jpeg', 'image/jpg', 'image/png', 'image/gif', 'image/bmp'):
return self.response.write(json.dumps({"status" : "error", "message" : "Unsupported content type: " + file_info.content_type}))
bytes = filestore.read(file_info.gs_object_name)
existing_images = [u.filename for u in UserImage.query(UserImage.date == date).fetch()]
filename = UserImage.create_image_name(file_info.filename, date, existing_images)
img = UserImage()
img.import_image(filename, file_info.filename, bytes, date, None)
img.put()
filestore.delete(file_info.gs_object_name)
#If there's a post here we should add the image...
post = Post.query(Post.date == date).get()
if post:
post.has_images = True
if post.images is None:
post.images = []
post.images.append(filename)
post.put()
self.response.write(json.dumps({"status" : "ok", "filename" : filename}))
class DeletePhotoHandler(webapp2.RequestHandler):
def post(self, filename):
self.response.headers['Content-Type'] = "application/json"
img = UserImage.query(UserImage.filename == filename).get()
if not img:
return self.response.write(json.dumps({"status" : "error", "message" : "Image does not exit"}))
post = Post.query(Post.date == img.date).get()
#Remove it from the post
if post:
try:
post.images.remove(filename)
post.text = post.text.replace('$IMG:' + filename, '').replace('\n\n\n\n', '\n\n')
except:
pass
if len(post.images) == 0:
post.has_images = False
post.put()
filestore.delete(img.serving_size_key)
filestore.delete(img.original_size_key)
img.key.delete()
self.response.write(json.dumps({"status" : "ok"}))
class EditHandler(webapp2.RequestHandler):
def get(self, kind, year, month, day):
date = datetime.datetime(int(year),int(month),int(day)).date()
post = Post.query(Post.date == date).get()
if kind == 'write' and post:
return self.redirect('/edit/%s' % date.strftime('%Y-%m-%d'))
if kind == 'edit' and not post:
return self.redirect('/write/%s' % date.strftime('%Y-%m-%d'))
data = {
"date" : date,
"text" : "",
"page" : "write",
"kind" : kind
}
if post:
data["page"] = "edit"
data["text"] = post.text
data["images"] = post.images
else:
data["images"] = [u.filename for u in UserImage.query(UserImage.date == date).fetch()]
self.response.write(get_template('edit.html').render(data))
def post(self, kind, year, month, day):
date = datetime.datetime(int(year),int(month),int(day)).date()
post = Post.query(Post.date == date).get()
is_new = False
if not post:
post = Post(date=date, source='web',images=[])
is_new = True
post.text = self.request.get('text')
save = self.request.get('action') == 'save'
delete = self.request.get('action') == 'delete'
if save and delete:
raise Exception('Something weird happened...')
if save:
if is_new:
post.images = [u.filename for u in UserImage.query(UserImage.date == date).fetch()]
post.images.sort()
post.has_images = True
post.put()
if is_new:
PostCounter.get().increment(post.date.year, post.date.month)
self.redirect_to_date(post.date)
elif delete:
self.delete_post(post)
next_post = Post.query(Post.date > date).order(Post.date).get()
if next_post and next_post.date.month == date.month:
return self.redirect_to_date(next_post.date)
#No way, we'll have to just redirect to the empty month
self.redirect('/past/%s' % date.strftime('%Y-%m'))
else:
raise Exception('How the hell did we get here...?')
def delete_post(self, post):
images = UserImage.query(UserImage.date == post.date).fetch()
for img in images:
filestore.delete(img.serving_size_key)
filestore.delete(img.original_size_key)
img.key.delete()
emails = RawMail.query(RawMail.date == post.date).fetch()
for email in emails:
email.key.delete()
post.key.delete()
PostCounter.get().decrement(post.date.year, post.date.month)
logging.info('Deleted %s images, %s emails and 1 post from %s' % (len(images), len(emails), post.date.strftime('%Y-%m-%d')))
def redirect_to_date(self, date):
self.redirect('/past/%s#day-%s' % (date.strftime('%Y-%m'), date.day))
```
#### File: MyLife/handlers/export.py
```python
from __future__ import with_statement
import webapp2, time, zipfile, re, datetime, logging, json, filestore
from StringIO import StringIO
from models.post import Post
from models.userimage import UserImage
from models.exporttask import ExportTask
from google.appengine.ext import ndb
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import taskqueue
class ExportStartHandler(webapp2.RequestHandler):
def post(self):
task = ExportTask()
task.put()
retry_options = taskqueue.TaskRetryOptions(task_retry_limit=0)
queue_task = taskqueue.Task(url='/export/run', params={"task":task.key.urlsafe()}, retry_options=retry_options)
queue_task.add()
result = {"message" : "Waiting for task to start..", "id" : task.key.urlsafe()}
self.response.headers['Content-Type'] = "application/json"
self.response.write(json.dumps(result))
class ExportHandler(webapp2.RequestHandler):
def post(self):
export_task_key = ndb.Key(urlsafe=self.request.get('task'))
export_task = export_task_key.get()
try:
day_string = datetime.datetime.today().strftime('%Y-%m-%d')
zip_filename = 'export_%s.zip' % day_string
logging.info('Starting export task')
self.cleanup_old_export_tasks()
buffer = StringIO()
archive = zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED)
self.add_posts_to_zip(export_task, archive, day_string)
self.add_images_to_zip(export_task, archive)
archive.close()
export_task.update('Saving zip file...')
self.create_zip_blob(buffer, zip_filename)
export_task.update('Finished creating zip', status='finished', filename=zip_filename)
self.enqueue_for_deletion(export_task)
except Exception, ex:
export_task.update('Failed to export: %s' % ex, status='failed')
logging.error('Failed export: %s' % ex.message)
def add_posts_to_zip(self, export_task, archive, day_string):
export_task.update('Fetching posts...', status='inprogress')
posts = [p for p in Post.query().order(Post.date).fetch()]
export_task.update('Got %s posts, adding to zip...' % len(posts))
post_text = ''
for p in Post.query().order(Post.date).fetch():
post_text += p.date.strftime('%Y-%m-%d')
post_text += '\r\n\r\n'
post_text += p.text.replace('\r\n', '\n').replace('\n', '\r\n').strip()
post_text += '\r\n\r\n'
archive.writestr('/export_%s.txt' % day_string, post_text.encode('utf-8'))
export_task.update('Added %s posts to zip...' % len(posts))
def enqueue_for_deletion(self, export_task):
#Enqueue the task to be deleted in 15 minutes...
timestamp = datetime.datetime.now() + datetime.timedelta(minutes=15)
retry_options = taskqueue.TaskRetryOptions(task_retry_limit=0)
queue_task = taskqueue.Task(url='/export/delete', eta=timestamp, params={"task":export_task.key.urlsafe()}, retry_options=retry_options)
queue_task.add()
def cleanup_old_export_tasks(self):
#Lets delete any old export tasks hanging around...
old_deleted = 0
for ex in ExportTask.query().fetch():
if ex.status in ('finished', 'failed'):
try:
filestore.delete(ex.filename)
except:
pass
ex.key.delete()
old_deleted += 1
if old_deleted > 0:
logging.info('Deleted %s old export tasks' % old_deleted)
def add_images_to_zip(self, export_task, archive):
export_task.update('Fetching image information...')
images = [i for i in UserImage.query().order(UserImage.filename).fetch()]
export_task.update('Found %s images...' % len(images))
for i, img in enumerate(images):
img_data = filestore.read(img.original_size_key)
archive.writestr('/img_%s' % img.filename.replace('.jpg', '.jpeg'), img_data)
if i % 5 == 0:
export_task.update('Added %s of %s images to zip... ' % (i+1,len(images)))
export_task.update('Finished adding images...')
def create_zip_blob(self, buffer, filename):
filestore.write(filename, buffer.getvalue(), content_type='application/zip')
class ExportStatusHandler(webapp2.RequestHandler):
def get(self, id):
export_task = ndb.Key(urlsafe=id).get()
result = {"status":export_task.status, "message" : export_task.message, "filename" : export_task.filename}
self.response.headers['Content-Type'] = "application/json"
self.response.write(json.dumps(result))
class ExportDownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, filename):
export = ExportTask.query(UserImage.filename == filename).get()
if not export:
self.error(404)
else:
self.send_blob(filestore.get_blob_key(export.filename))
class ExportDeleteHandler(webapp2.RequestHandler):
def post(self):
export_task_key = ndb.Key(urlsafe=self.request.get('task'))
export_task = export_task_key.get()
if not export_task:
logging.info('Export task already deleted')
return
try:
filestore.delete(export_task.filename)
except:
logging.info('Failed to delete export blob')
export_task.key.delete()
logging.info('Deleted export task')
```
#### File: MyLife/handlers/image.py
```python
from google.appengine.ext.webapp import blobstore_handlers
import filestore
from models.userimage import UserImage
class ImageHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, filename):
image = UserImage.query(UserImage.filename == filename).get()
if self.request.get('fullsize'):
key = image.original_size_key
else:
key = image.serving_size_key
if not image:
self.error(404)
else:
self.send_blob(filestore.get_blob_key(key))
```
#### File: MyLife/handlers/settings.py
```python
import webapp2, filestore
from templates import get_template
from models.settings import Settings
from models.userimage import UserImage
from models.timezones import timezones
from models.migratetask import MigrateTask
class SettingsHandler(webapp2.RequestHandler):
def get(self):
#Check whether the migration is done so we can see whether to show the Blobstore Migration
#or not...
settings = Settings.get()
if not settings.blobstore_migration_done:
migration_task_finished = bool(MigrateTask.query(MigrateTask.status == 'finished').get())
if migration_task_finished:
settings.blobstore_migration_done = True
settings.put()
else:
#Try to figure out whether this is a new user that has nothing in the blobstore...
if not UserImage.query().get():
settings.blobstore_migration_done = True
settings.put()
self._render(settings)
def post(self):
settings = Settings.get()
settings.email_address = self.request.get('email-address')
settings.timezone = self.request.get('timezone')
settings.email_hour = int(self.request.get('email-hour'))
settings.dropbox_access_token = self.request.get('dropbox-access-token')
settings.include_old_post_in_entry = self.request.get('include-old-entry') == 'yes'
settings.put()
self._render(settings, True)
def _render(self, settings, saved=False):
data = {
"page" : "settings",
"email_address" : settings.email_address,
"dropbox_access_token" : settings.dropbox_access_token or "",
"timezone" : settings.timezone,
"timezones" : timezones,
"email_hour" : settings.email_hour,
"include_old_post_in_entry" : settings.include_old_post_in_entry,
"upload_url" : filestore.create_upload_url('/upload-finished'),
"saved" : saved,
"can_migrate_images" : not settings.blobstore_migration_done,
"bucket_exists" : filestore.bucket_exists(),
"version" : open('VERSION').read()
}
self.response.write(get_template('settings.html').render(data))
```
#### File: MyLife/handlers/upload.py
```python
from __future__ import with_statement
import webapp2, time, logging, json, zipfile, datetime, re, traceback, filestore, json
from StringIO import StringIO
from models.post import Post
from models.importtask import ImportTask
from models.userimage import UserImage
from models.postcounter import PostCounter
from google.appengine.ext import ndb
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import taskqueue
from errorhandling import log_error
class UploadFinishedHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
file_info = self.get_file_infos()[0]
#for i in dir(file_info):
# if not '__' in i:
# logging.info('%s is %s' % (i, getattr(file_info, i)))
task = ImportTask(
uploaded_file = file_info.gs_object_name
)
task.put()
retry_options = taskqueue.TaskRetryOptions(task_retry_limit=0)
queue_task = taskqueue.Task(url='/import', params={"task":task.key.urlsafe()}, retry_options=retry_options)
queue_task.add()
result = {"message" : "Upload finished, starting import...", "id" : task.key.urlsafe()}
self.response.headers['Content-Type'] = "application/json"
self.response.write(json.dumps(result))
class ImportHandler(webapp2.RequestHandler):
def post(self):
import_task_key = ndb.Key(urlsafe=self.request.get('task'))
import_task = import_task_key.get()
import_task.update('Unpacking zip file...', status='inprogress')
logging.info('Starting import ...')
counter = PostCounter.get()
try:
posts, images = self.read_zip_file(import_task.uploaded_file)
import_task.update('Importing...', total_photos=len(images), total_posts=len(posts))
logging.info('Importing %s posts, %s images' % (len(posts), len(images)))
posts = self.filter_posts(posts)
for date, text in posts:
str_date = date.strftime('%Y-%m-%d')
p = Post(
date=date,
source='ohlife',
text=text.decode('utf-8')
)
p.images = []
p.has_images = False
post_images = [(k,images[k]) for k in images.keys() if str_date in k]
if len(post_images):
logging.info('Importing %s images for date %s' % (len(post_images), str_date))
p.images = []
p.has_images = True
for name, bytes in post_images:
user_image = UserImage()
img_name = name.replace('img_', '').replace('.jpeg', '.jpg')
user_image.import_image(img_name, name, bytes, date)
p.images.append(img_name)
import_task.imported_photos += 1
user_image.put()
p.put()
counter.increment(p.date.year, p.date.month, False)
import_task.imported_posts += 1
if import_task.imported_posts % 10 == 0:
import_task.update('Imported %s/%s post, %s/%s photos...' % (import_task.imported_posts, import_task.total_posts,import_task.imported_photos, import_task.total_photos))
logging.info(import_task.message)
counter.put()
counter.put()
skipped_posts = import_task.total_posts - import_task.imported_posts
skipped_photos = import_task.total_photos - import_task.imported_photos
msg = 'Imported %s posts and %s photos.' % (import_task.imported_posts, import_task.imported_photos)
if skipped_posts or skipped_photos:
msg += ' %s posts and %s photos already existed and were skipped.' % (skipped_posts, skipped_photos)
import_task.update(msg, status='finished')
logging.info(import_task.message)
filestore.delete(import_task.uploaded_file)
except Exception, ex:
try:
filestore.delete(import_task.uploaded_file)
except:
pass
try:
counter.put()
except:
pass
import_task.update('Failed to import: %s' % ex, status='failed')
log_error('Failed import', traceback.format_exc(6))
def read_zip_file(self, uploaded_file):
zip_data = filestore.read(uploaded_file)
zip = zipfile.ZipFile(StringIO(zip_data))
text = None
images = {}
good_names = [n for n in zip.namelist() if not '__MACOSX' in n]
text_files = [n for n in good_names if n.endswith('.txt') and not '__MACOSX' in n]
image_files = [n for n in good_names if re.search('\\.(jpe?g|bmp|png|gif|tiff)$', n, re.I)]
if len(text_files) > 1:
raise Exception('More than one possible text files in zip file: %s' % ','.join(text_files))
other_files = [n for n in good_names if not n in text_files + image_files]
if len(other_files) > 0:
raise Exception('Got files that we don\'t know how to handle: %s' % ','.join(other_files))
text = zip.read(text_files[0])
for name in image_files:
images[re.sub('^/', '', name)] = zip.read(name)
text = text.replace('\r\n', '\n').strip()
lines = text.split('\n')
posts = []
prev_line_empty = True
current_date, current_text = None, ''
for i,line in enumerate(lines):
next_line_empty = i == len(lines)-1 or lines[i+1] == ''
m = re.match(r'^(\d\d\d\d)-(\d\d)-(\d\d)$', line)
if m and prev_line_empty and next_line_empty:
if current_date:
posts.append((current_date, current_text.rstrip()))
current_text = ''
current_date = datetime.date(int(m.group(1)), int(m.group(2)), int(m.group(3)))
else:
current_text += line + '\r\n'
prev_line_empty = line == ''
if current_text.strip() and current_date:
posts.append((current_date, current_text.strip()))
return posts, images
def filter_posts(self, new_posts):
#Lets check if we're going to overwrite anything...
existing_posts = [p.date for p in Post.query()]
filtered_posts = []
for date, text in new_posts:
if date in existing_posts:
logging.info('Skipping post for %s, already exists' % date)
else:
filtered_posts.append((date,text))
return filtered_posts
class ImportStatusHandler(webapp2.RequestHandler):
def get(self, id):
import_task = ndb.Key(urlsafe=id).get()
result = {"status":import_task.status, "message" : import_task.message}
self.response.headers['Content-Type'] = "application/json"
self.response.write(json.dumps(result))
```
#### File: MyLife/models/exporttask.py
```python
import datetime
from google.appengine.ext import ndb
class ExportTask(ndb.Model):
blob_key = ndb.BlobKeyProperty()
total_posts = ndb.IntegerProperty(default=0)
total_photos = ndb.IntegerProperty(default=0)
exported_posts = ndb.IntegerProperty(default=0)
exported_photos = ndb.IntegerProperty(default=0)
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
filename = ndb.StringProperty()
status = ndb.StringProperty(choices=['new', 'inprogress', 'finished', 'failed'],default='new')
message = ndb.TextProperty(default='Waiting for task to start...')
def update(self, message, **kwargs):
self.message = message
for k,v in kwargs.items():
self.__setattr__(k, v)
self.put()
``` |
{
"source": "jonchun/badlinkfinder",
"score": 3
} |
#### File: badlinkfinder/cli/input.py
```python
from argparse import ArgumentParser, RawDescriptionHelpFormatter, SUPPRESS
import logging
from textwrap import dedent, wrap
from badlinkfinder import __doc__, __version__
from badlinkfinder.utilities import str2LogLevel
logger = logging.getLogger('blf.cli')
class CustomHelpFormatter(RawDescriptionHelpFormatter):
"""A nicer help formatter.
Help for arguments can be indented and contain new lines.
It will be de-dented and arguments in the help
will be separated by a blank line for better readability.
Taken from https://github.com/jakubroztocil/httpie/
"""
def __init__(self, max_help_position=6, *args, **kwargs):
# A smaller indent for args help.
kwargs['max_help_position'] = max_help_position
super(CustomHelpFormatter, self).__init__(*args, **kwargs)
def _split_lines(self, text, width):
text = dedent(text).strip() + '\n\n'
return text.splitlines()
parser = ArgumentParser(
prog='blf',
formatter_class=CustomHelpFormatter,
description='{}'.format(__doc__.strip()),
add_help=False
)
#######################################################################
# Positional arguments.
#######################################################################
positional = parser.add_argument_group(
title='Positional Arguments',
description=dedent("""These arguments come after any flags and in the order they are listed here.
Only URL is required.
""")
)
positional.add_argument(
'url',
metavar='URL',
default=None,
help="""
The starting seed URL to begin crawling your website. This is required to begin searching for bad links.
"""
)
#######################################################################
# Crawler Settings.
#######################################################################
crawler_settings = parser.add_argument_group(
title='Crawler Settings',
description=None
)
crawler_settings.add_argument(
'--workers',
type=int,
default=5,
help="""
By default, 5 workers are used for the crawler.
"""
)
crawler_settings.add_argument(
'--timeout',
type=int,
default=10,
help="""
By default, requests time out after 10 seconds.
"""
)
crawler_settings.add_argument(
'--include-inbound',
dest='include_inbound',
action='store_true',
help="""
Whether to include inbound URLs when reporting Site Errors (show where they were referenced from)
"""
)
crawler_settings.add_argument(
'--output-file',
dest='output_file',
type=str,
help="""
File name for storing the errors found.
"""
)
#######################################################################
# Parser Settings.
#######################################################################
parser_settings = parser.add_argument_group(
title='Parser Settings',
description=None
)
parser_settings.add_argument(
'--ignore-schemes',
action='append',
dest='ignore_schemes',
help="""
Ignore scheme when parsing URLs so that it does not detect as invalid.
--ignore-schemes custom
will ignore any URL that looks like "custom:nonstandardurlhere.com"
(You can declare this option multiple times)
"""
)
parser_settings.add_argument(
'--ignore-domains',
action='append',
dest='ignore_domains',
help="""
Ignore external domain when crawling URLs.
--ignore-domains example.com
will not crawl any URL that is on "example.com".
(You can declare this option multiple times)
"""
)
#######################################################################
# Troubleshooting
#######################################################################
troubleshooting = parser.add_argument_group(title='Troubleshooting')
troubleshooting.add_argument(
'--help',
action='help',
default=SUPPRESS,
help="""
Show this help message and exit.
"""
)
troubleshooting.add_argument(
'--version',
action='version',
version=__version__,
help="""
Show version and exit.
"""
)
log_levels = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']
log_level_help = ''
for log_level in log_levels:
log_level_help += '{} | '.format(log_level)
log_level_help = '[{}]'.format(log_level_help.rstrip(' |'))
troubleshooting.add_argument(
'--log_level', '--log-level',
dest='log_level',
type=str,
default='WARNING',
help=log_level_help
)
def extra_parse_logic(args):
# do extra parsing/validation on arguments here
try:
level = str2LogLevel(args.log_level)
except ValueError:
raise BLFInvalidArgument('Log level "{}" is invalid. Please use one of the following values: {}' .format(args.log_level, log_level_help))
args.log_level = level
return args
class BLFInvalidArgument(Exception):
pass
```
#### File: badlinkfinder/badlinkfinder/core.py
```python
import logging
import sys
from badlinkfinder.cli.input import extra_parse_logic, parser, BLFInvalidArgument
from badlinkfinder.crawler import Crawler
import badlinkfinder.cli.output as blf_output
logger = logging.getLogger('blf')
def main():
try:
args = extra_parse_logic(parser.parse_args())
blf_output.configure(args)
crawler = Crawler(
workers=args.workers,
timeout=args.timeout,
ignore_schemes=args.ignore_schemes or [],
ignore_domains=args.ignore_domains or [],
)
blf_output.display(crawler, crawler.run(args.url))
except BLFInvalidArgument as e:
raise e
except KeyboardInterrupt:
# Catch Interrupt and save progress in case you want to restart it. (TODO)
raise Exception('ERROR: Interrupted by user')
except Exception as e:
import traceback
traceback.print_exc()
raise e
```
#### File: badlinkfinder/badlinkfinder/fetcher.py
```python
import logging
import mimetypes
import sys
from urllib.parse import urlparse
import lxml.html
import requests
logger = logging.getLogger('blf.fetcher')
def fetch_url_head(url, headers=None, timeout=10):
logger.info('HEAD: {}'.format(url))
request_parameters = {
'headers': headers,
'timeout': timeout,
'allow_redirects': True,
}
site_node = None
try:
response = requests.head(url, **request_parameters)
except Exception as e:
logger.warning('Failed to load {}: {}'.format(url, e))
site_node = SiteNode(url, 'HEAD', status_code=-1, error=e)
else:
site_node = SiteNode(url, 'HEAD', final_url=response.url, status_code=response.status_code, headers=response.headers, contents=response.content)
return site_node
def fetch_url_get(url, headers=None, timeout=10):
logger.info('GET: {}'.format(url))
request_parameters = {
'headers': headers,
'timeout': timeout,
'allow_redirects': True,
}
site_node = None
try:
response = requests.get(url, **request_parameters)
except Exception as e:
logger.warning('Failed to load {}: {}'.format(url, e))
site_node = SiteNode(url, 'GET', status_code=-1, error=e)
else:
site_node = SiteNode(url, 'GET', final_url=response.url, status_code=response.status_code, headers=response.headers, contents=response.content)
return site_node
def get_all_referenced_urls(html, base_url):
logger.info('searching {} for neighbors'.format(base_url))
try:
dom = lxml.html.fromstring(html)
except Exception as e:
logger.error('Unable to parse {} | {}'.format(base_url, e))
return []
else:
dom.make_links_absolute(base_url, resolve_base_href=True)
return [i[2] for i in dom.iterlinks()]
class SiteNode:
def __init__(self, url, request_type, final_url=None, status_code=None, headers=None, contents=None, error=None):
"""
Construct a new 'SiteNode' object.
Args:
url (str): The URL of the node
final_url (str): The final URL of the node (possibly different if a redirect was followed)
status_code (int): Status code of Response
headers (dict): Headers of Response
contents (bytes): Contents of Response
error (Exception): Optionally contains error/exceptions.
"""
self.url = url
self.request_type = request_type
if final_url:
self.final_url = final_url
else:
self.final_url = url
self.status_code = status_code
self.headers = headers
self.contents = contents
self.error = error
@property
def is_ok(self):
# Returns whether the status_code is 200 OK
return self.status_code == 200
@property
def is_html(self):
# Returns whether the content type is 'text/html'
return 'text/html' in self.headers["Content-Type"].lower()
@property
def loaded(self):
# Returns whether the status_code exists and is not -1 (used to indicate that loading the page failed/timed out)
if self.status_code == -1:
return False
if self.status_code:
# only return True if status_code is not None
return True
return False
@property
def final_domain(self):
parsed_url = urlparse(self.final_url)
return parsed_url.netloc
@property
def neighbors(self):
"""
Return a list of neighbors
"""
neighbors = []
if self.is_ok and self.is_html:
neighbors = get_all_referenced_urls(self.contents, self.final_url)
return neighbors
def __repr__(self):
error_msg = ''
if self.error:
error_msg = ' ' + self.error
return "<SiteNode {} {} [{}]{}>".format(self.request_type, self.url, self.status_code, error_msg)
if __name__ == "__main__":
url = sys.argv[1]
print(fetch_url(url))
```
#### File: badlinkfinder/badlinkfinder/taskqueue.py
```python
from threading import Thread
from queue import Queue
class TaskQueue(Queue):
def __init__(self, num_workers=1):
super().__init__()
self.num_workers = num_workers
self.start_workers()
def add_task(self, task, *args, **kwargs):
args = args or ()
kwargs = kwargs or {}
self.put((task, args, kwargs))
def start_workers(self):
for i in range(self.num_workers):
t = Thread(target=self.worker)
t.daemon = True
t.start()
def worker(self):
while True:
item, args, kwargs = self.get()
item(*args, **kwargs)
self.task_done()
``` |
{
"source": "jonchun/ptoys-mapper",
"score": 3
} |
#### File: ptoys-mapper/keycodes/emacs.py
```python
from typing import Union
from .key import Key, KeyCode, KeyCombo
_transform = {
"LC": "L_CTRL",
"LCtrl": "L_CTRL",
"RC": "R_CTRL",
"RCtrl": "R_CTRL",
"LM": "L_ALT",
"LAlt": "L_ALT",
"RM": "R_ALT",
"RAlt": "R_ALT",
"LShift": "L_SHIFT",
"RShift": "R_SHIFT",
"LSuper": "L_SUPER",
"LWin": "L_SUPER",
"RSuper": "R_SUPER",
"RWin": "R_SUPER",
# TODO: Transform . / = - ; ' []\`
}
# parses a single shortcut separated by -
# does not parse multiple key presses separated by spaces
def kbd_parse(input: str) -> Union[KeyCode, KeyCombo]:
_keys = []
for key in input.split("-"):
key = _transform.get(key, key.upper())
_keys.append(Key(key))
if len(_keys) == 1:
return _keys[0]
sum = KeyCombo()
for _key in _keys:
sum = sum + _key
return sum
```
#### File: keycodes/key/keycodes.py
```python
from typing import Iterable, Union
class KeyCode:
def __init__(self, key: str, code: int) -> None:
self.key: str = key
self.code: int = code
def __hash__(self) -> int:
return hash((self.key, self.code))
def __eq__(self, other) -> bool:
return hash(self) == hash(other)
def __str__(self) -> str:
return "<{}: {}>".format(self.key, self.code)
def __repr__(self) -> str:
return "<{class_name}: key={key}, code={code}>".format(
class_name=self.__class__.__name__, key=repr(self.key), code=repr(self.code)
)
def __add__(self, other: Union["KeyCode", "KeyCombo"]):
if isinstance(other, KeyCode):
return KeyCombo(self, other)
elif isinstance(other, KeyCombo):
return KeyCombo(self) + other
else:
raise TypeError("Can only add togther KeyCode and KeyCombo")
class KeyCombo:
def __init__(self, *args) -> None:
# using a list instead of a set to preserve order
_self_combo = []
# Flatten args non-recursively just to make KeyCombo(x,y,z) nicer to use.
for arg in args:
if isinstance(arg, Iterable):
for key_code in arg:
if isinstance(key_code, KeyCode):
if key_code not in _self_combo:
_self_combo.append(key_code)
else:
raise KeyComboTypeError()
else:
key_code = arg
if isinstance(key_code, KeyCode):
if key_code not in _self_combo:
_self_combo.append(key_code)
else:
raise KeyComboTypeError()
self._combo = tuple(_self_combo)
def __hash__(self) -> int:
return hash(self._combo)
def __eq__(self, other) -> bool:
return hash(self) == hash(other)
def __str__(self) -> str:
output = "{"
for key in self._combo:
output = "{}{}, ".format(output, str(key))
output = output.rstrip(", ")
output += "}"
return output
def __repr__(self) -> str:
return repr(self._combo)
# output = "{KeyCombo: "
# for key in self._combo:
# output = "{}{}, ".format(output, repr(key))
# output = output.rstrip(", ")
# output += "}"
# return output
def __add__(self, other: Union[KeyCode, "KeyCombo"]):
if isinstance(other, KeyCode):
return self + KeyCombo(other)
elif isinstance(other, KeyCombo):
# use list instead of set to preserve order
_self_combo = list(self._combo)
for _key in other._combo:
if _key not in _self_combo:
_self_combo.append(_key)
return KeyCombo(_self_combo)
else:
raise TypeError("Can only add together KeyCode and KeyCombo")
def __iter__(self):
return iter(self._combo)
def __next__(self):
pass
class KeyComboTypeError(TypeError):
def __init__(self, *args: object) -> None:
if not args:
msg = "KeyCombo can only consist of KeyCodes"
else:
msg = args[0]
args = args[:-1]
super().__init__(msg, *args)
```
#### File: keycodes/key/key.py
```python
from typing import Union
from .keycodes import KeyCode
class _Key:
_codes = None
def __init__(self):
pass
# fallback if key code is not found.
def _default_code(self, key_name: str) -> None:
return None
def get_code(self, key_name: str) -> KeyCode:
code = None
if self._codes:
try:
code = self._codes[self.translate(key_name)]
except KeyError:
code = self._default_code(key_name)
return KeyCode(key_name, code)
def translate(self, key_name: str) -> str:
return key_name
# when Key(KeyCode) is called, it should convert it into a KeyCode from itself
#
# This allows "casting" between different keys as follows:
# key = Key.SEMICOLON
# win_key = WinKey(key)
# print(repr(key))
# print(repr(win_key))
# print(repr(Key(key)))
#
# Output:
# <KeyCode: key='SEMICOLON', code=None>
# <KeyCode: key='SEMICOLON', code=<WinCodes.VK_OEM_1: 186>>
# <KeyCode: key='SEMICOLON', code=None>
def __call__(self, key_code: Union[KeyCode, str]) -> KeyCode:
if isinstance(key_code, KeyCode):
key_name = key_code.key
elif isinstance(key_code, str):
key_name = key_code
else:
raise TypeError("Can only create a KeyCode from another KeyCode or string")
return getattr(self, key_name)
# ===== START SPECIAL =====
# These keys don't actually exist
@property
def _DISABLED(self) -> KeyCode:
return self.get_code("DISABLED")
DISABLED = _DISABLED
@property
def _SUPER(self) -> KeyCode:
return self.get_code("SUPER")
SUPER = _SUPER
WIN = _SUPER
META = _SUPER
@property
def _CTRL(self) -> KeyCode:
return self.get_code("CTRL")
CTRL = _CTRL
@property
def _SHIFT(self) -> KeyCode:
return self.get_code("SHIFT")
SHIFT = _SHIFT
@property
def _ALT(self) -> KeyCode:
return self.get_code("ALT")
ALT = _ALT
# ===== END SPECIAL =====
@property
def _ESCAPE(self) -> KeyCode:
return self.get_code("ESCAPE")
ESCAPE = _ESCAPE
ESC = _ESCAPE
@property
def _F1(self) -> KeyCode:
return self.get_code("F1")
F1 = _F1
@property
def _F2(self) -> KeyCode:
return self.get_code("F2")
F2 = _F2
@property
def _F3(self) -> KeyCode:
return self.get_code("F3")
F3 = _F3
@property
def _F4(self) -> KeyCode:
return self.get_code("F4")
F4 = _F4
@property
def _F5(self) -> KeyCode:
return self.get_code("F5")
F5 = _F5
@property
def _F6(self) -> KeyCode:
return self.get_code("F6")
F6 = _F6
@property
def _F7(self) -> KeyCode:
return self.get_code("F7")
F7 = _F7
@property
def _F8(self) -> KeyCode:
return self.get_code("F8")
F8 = _F8
@property
def _F9(self) -> KeyCode:
return self.get_code("F9")
F9 = _F9
@property
def _F10(self) -> KeyCode:
return self.get_code("F10")
F10 = _F10
@property
def _F11(self) -> KeyCode:
return self.get_code("F11")
F11 = _F11
@property
def _F12(self) -> KeyCode:
return self.get_code("F12")
F12 = _F12
@property
def _F13(self) -> KeyCode:
return self.get_code("F13")
F13 = _F13
@property
def _F14(self) -> KeyCode:
return self.get_code("F14")
F14 = _F14
@property
def _F15(self) -> KeyCode:
return self.get_code("F15")
F15 = _F15
@property
def _F16(self) -> KeyCode:
return self.get_code("F16")
F16 = _F16
@property
def _F17(self) -> KeyCode:
return self.get_code("F17")
F17 = _F17
@property
def _F18(self) -> KeyCode:
return self.get_code("F18")
F18 = _F18
@property
def _F19(self) -> KeyCode:
return self.get_code("F19")
F19 = _F19
@property
def _F20(self) -> KeyCode:
return self.get_code("F20")
F20 = _F20
@property
def _F21(self) -> KeyCode:
return self.get_code("F21")
F21 = _F21
@property
def _F22(self) -> KeyCode:
return self.get_code("F22")
F22 = _F22
@property
def _F23(self) -> KeyCode:
return self.get_code("F23")
F23 = _F23
@property
def _F24(self) -> KeyCode:
return self.get_code("F24")
F24 = _F24
@property
def _BACKTICK(self) -> KeyCode:
return self.get_code("BACKTICK")
BACKTICK = _BACKTICK
TILDE = _BACKTICK
GRAVE = _BACKTICK
@property
def _ONE(self) -> KeyCode:
return self.get_code("ONE")
ONE = _ONE
KEY_1 = _ONE
@property
def _TWO(self) -> KeyCode:
return self.get_code("TWO")
TWO = _TWO
KEY_2 = _TWO
@property
def _THREE(self) -> KeyCode:
return self.get_code("THREE")
THREE = _THREE
KEY_3 = _THREE
@property
def _FOUR(self) -> KeyCode:
return self.get_code("FOUR")
FOUR = _FOUR
KEY_4 = _FOUR
@property
def _FIVE(self) -> KeyCode:
return self.get_code("FIVE")
FIVE = _FIVE
KEY_5 = _FIVE
@property
def _SIX(self) -> KeyCode:
return self.get_code("SIX")
SIX = _SIX
KEY_6 = _SIX
@property
def _SEVEN(self) -> KeyCode:
return self.get_code("SEVEN")
SEVEN = _SEVEN
KEY_7 = _SEVEN
@property
def _EIGHT(self) -> KeyCode:
return self.get_code("EIGHT")
EIGHT = _EIGHT
KEY_8 = _EIGHT
@property
def _NINE(self) -> KeyCode:
return self.get_code("NINE")
NINE = _NINE
KEY_9 = _NINE
@property
def _ZERO(self) -> KeyCode:
return self.get_code("ZERO")
ZERO = _ZERO
KEY_0 = _ZERO
@property
def _HYPHEN(self) -> KeyCode:
return self.get_code("HYPHEN")
HYPHEN = _HYPHEN
MINUS = _HYPHEN
@property
def _EQUAL(self) -> KeyCode:
return self.get_code("EQUAL")
EQUAL = _EQUAL
@property
def _BACKSLASH(self) -> KeyCode:
return self.get_code("BACKSLASH")
BACKSLASH = _BACKSLASH
@property
def _BACKSPACE(self) -> KeyCode:
return self.get_code("BACKSPACE")
BACKSPACE = _BACKSPACE
@property
def _TAB(self) -> KeyCode:
return self.get_code("TAB")
TAB = _TAB
@property
def _Q(self) -> KeyCode:
return self.get_code("Q")
Q = _Q
@property
def _W(self) -> KeyCode:
return self.get_code("W")
W = _W
@property
def _E(self) -> KeyCode:
return self.get_code("E")
E = _E
@property
def _R(self) -> KeyCode:
return self.get_code("R")
R = _R
@property
def _T(self) -> KeyCode:
return self.get_code("T")
T = _T
@property
def _Y(self) -> KeyCode:
return self.get_code("Y")
Y = _Y
@property
def _U(self) -> KeyCode:
return self.get_code("U")
U = _U
@property
def _I(self) -> KeyCode:
return self.get_code("I")
I = _I
@property
def _O(self) -> KeyCode:
return self.get_code("O")
O = _O
@property
def _P(self) -> KeyCode:
return self.get_code("P")
P = _P
@property
def _L_BRACE(self) -> KeyCode:
return self.get_code("L_BRACE")
L_BRACE = _L_BRACE
LEFT_BRACE = _L_BRACE
@property
def _R_BRACE(self) -> KeyCode:
return self.get_code("R_BRACE")
R_BRACE = _R_BRACE
RIGHT_BRACE = _R_BRACE
@property
def _CAPS_LOCK(self) -> KeyCode:
return self.get_code("CAPS_LOCK")
CAPS_LOCK = _CAPS_LOCK
@property
def _A(self) -> KeyCode:
return self.get_code("A")
A = _A
@property
def _S(self) -> KeyCode:
return self.get_code("S")
S = _S
@property
def _D(self) -> KeyCode:
return self.get_code("D")
D = _D
@property
def _F(self) -> KeyCode:
return self.get_code("F")
F = _F
@property
def _G(self) -> KeyCode:
return self.get_code("G")
G = _G
@property
def _H(self) -> KeyCode:
return self.get_code("H")
H = _H
@property
def _J(self) -> KeyCode:
return self.get_code("J")
J = _J
@property
def _K(self) -> KeyCode:
return self.get_code("K")
K = _K
@property
def _L(self) -> KeyCode:
return self.get_code("L")
L = _L
@property
def _SEMICOLON(self) -> KeyCode:
return self.get_code("SEMICOLON")
SEMICOLON = _SEMICOLON
@property
def _QUOTE(self) -> KeyCode:
return self.get_code("QUOTE")
QUOTE = _QUOTE
APOSTROPHE = _QUOTE
@property
def _ENTER(self) -> KeyCode:
return self.get_code("ENTER")
ENTER = _ENTER
@property
def _L_SHIFT(self) -> KeyCode:
return self.get_code("L_SHIFT")
L_SHIFT = _L_SHIFT
LEFT_SHIFT = _L_SHIFT
@property
def _Z(self) -> KeyCode:
return self.get_code("Z")
Z = _Z
@property
def _X(self) -> KeyCode:
return self.get_code("X")
X = _X
@property
def _C(self) -> KeyCode:
return self.get_code("C")
C = _C
@property
def _V(self) -> KeyCode:
return self.get_code("V")
V = _V
@property
def _B(self) -> KeyCode:
return self.get_code("B")
B = _B
@property
def _N(self) -> KeyCode:
return self.get_code("N")
N = _N
@property
def _M(self) -> KeyCode:
return self.get_code("M")
M = _M
@property
def _COMMA(self) -> KeyCode:
return self.get_code("COMMA")
COMMA = _COMMA
@property
def _PERIOD(self) -> KeyCode:
return self.get_code("PERIOD")
PERIOD = _PERIOD
@property
def _SLASH(self) -> KeyCode:
return self.get_code("SLASH")
SLASH = _SLASH
@property
def _R_SHIFT(self) -> KeyCode:
return self.get_code("R_SHIFT")
R_SHIFT = _R_SHIFT
RIGHT_SHIFT = _R_SHIFT
@property
def _L_CTRL(self) -> KeyCode:
return self.get_code("L_CTRL")
L_CTRL = _L_CTRL
LEFT_CTRL = _L_CTRL
@property
def _L_SUPER(self) -> KeyCode:
return self.get_code("L_SUPER")
L_SUPER = _L_SUPER
LEFT_SUPER = _L_SUPER
L_WIN = _L_SUPER
LEFT_WIN = _L_SUPER
L_META = _L_SUPER
LEFT_META = _L_SUPER
@property
def _L_ALT(self) -> KeyCode:
return self.get_code("L_ALT")
L_ALT = _L_ALT
LEFT_ALT = _L_ALT
@property
def _SPACE(self) -> KeyCode:
return self.get_code("SPACE")
SPACE = _SPACE
@property
def _R_ALT(self) -> KeyCode:
return self.get_code("R_ALT")
R_ALT = _R_ALT
RIGHT_ALT = _R_ALT
@property
def _R_SUPER(self) -> KeyCode:
return self.get_code("R_SUPER")
R_SUPER = _R_SUPER
RIGHT_SUPER = _R_SUPER
R_WIN = _R_SUPER
RIGHT_WIN = _R_SUPER
R_META = _R_SUPER
RIGHT_META = _R_SUPER
@property
def _R_CTRL(self) -> KeyCode:
return self.get_code("R_CTRL")
R_CTRL = _R_CTRL
RIGHT_CTRL = _R_CTRL
@property
def _PRINT_SCREEN(self) -> KeyCode:
return self.get_code("PRINT_SCREEN")
PRINT_SCREEN = _PRINT_SCREEN
@property
def _SCROLL_LOCK(self) -> KeyCode:
return self.get_code("SCROLL_LOCK")
SCROLL_LOCK = _SCROLL_LOCK
@property
def _PAUSE(self) -> KeyCode:
return self.get_code("PAUSE")
PAUSE = _PAUSE
@property
def _INSERT(self) -> KeyCode:
return self.get_code("INSERT")
INSERT = _INSERT
@property
def _HOME(self) -> KeyCode:
return self.get_code("HOME")
HOME = _HOME
@property
def _PAGE_UP(self) -> KeyCode:
return self.get_code("PAGE_UP")
PAGE_UP = _PAGE_UP
@property
def _DELETE(self) -> KeyCode:
return self.get_code("DELETE")
DELETE = _DELETE
@property
def _END(self) -> KeyCode:
return self.get_code("END")
END = _END
@property
def _PAGE_DOWN(self) -> KeyCode:
return self.get_code("PAGE_DOWN")
PAGE_DOWN = _PAGE_DOWN
@property
def _UP(self) -> KeyCode:
return self.get_code("UP")
UP = _UP
@property
def _LEFT(self) -> KeyCode:
return self.get_code("LEFT")
LEFT = _LEFT
@property
def _DOWN(self) -> KeyCode:
return self.get_code("DOWN")
DOWN = _DOWN
@property
def _RIGHT(self) -> KeyCode:
return self.get_code("RIGHT")
RIGHT = _RIGHT
@property
def _NUM_LOCK(self) -> KeyCode:
return self.get_code("NUM_LOCK")
NUM_LOCK = _NUM_LOCK
@property
def _NUM_DIVIDE(self) -> KeyCode:
return self.get_code("NUM_DIVIDE")
NUM_DIVIDE = _NUM_DIVIDE
@property
def _NUM_MULTIPLY(self) -> KeyCode:
return self.get_code("NUM_MULTIPLY")
NUM_MULTIPLY = _NUM_MULTIPLY
@property
def _NUM_MINUS(self) -> KeyCode:
return self.get_code("NUM_MINUS")
NUM_MINUS = _NUM_MINUS
@property
def _NUM_PLUS(self) -> KeyCode:
return self.get_code("NUM_PLUS")
NUM_PLUS = _NUM_PLUS
@property
def _NUM_ENTER(self) -> KeyCode:
return self.get_code("NUM_ENTER")
NUM_ENTER = _NUM_ENTER
@property
def _NUM_ONE(self) -> KeyCode:
return self.get_code("NUM_ONE")
NUM_ONE = _NUM_ONE
NUM_1 = _NUM_ONE
@property
def _NUM_TWO(self) -> KeyCode:
return self.get_code("NUM_TWO")
NUM_TWO = _NUM_TWO
NUM_2 = _NUM_TWO
@property
def _NUM_THREE(self) -> KeyCode:
return self.get_code("NUM_THREE")
NUM_THREE = _NUM_THREE
NUM_3 = _NUM_THREE
@property
def _NUM_FOUR(self) -> KeyCode:
return self.get_code("NUM_FOUR")
NUM_FOUR = _NUM_FOUR
NUM_4 = _NUM_FOUR
@property
def _NUM_FIVE(self) -> KeyCode:
return self.get_code("NUM_FIVE")
NUM_FIVE = _NUM_FIVE
NUM_5 = _NUM_FIVE
@property
def _NUM_SIX(self) -> KeyCode:
return self.get_code("NUM_SIX")
NUM_SIX = _NUM_SIX
NUM_6 = _NUM_SIX
@property
def _NUM_SEVEN(self) -> KeyCode:
return self.get_code("NUM_SEVEN")
NUM_SEVEN = _NUM_SEVEN
NUM_7 = _NUM_SEVEN
@property
def _NUM_EIGHT(self) -> KeyCode:
return self.get_code("NUM_EIGHT")
NUM_EIGHT = _NUM_EIGHT
NUM_8 = _NUM_EIGHT
@property
def _NUM_NINE(self) -> KeyCode:
return self.get_code("NUM_NINE")
NUM_NINE = _NUM_NINE
NUM_9 = _NUM_NINE
@property
def _NUM_ZERO(self) -> KeyCode:
return self.get_code("NUM_ZERO")
NUM_ZERO = _NUM_ZERO
NUM_0 = _NUM_ZERO
@property
def _NUM_DECIMAL(self) -> KeyCode:
return self.get_code("NUM_DECIMAL")
NUM_DECIMAL = _NUM_DECIMAL
# The below can be used as a starting point when defining a map of translations.
#
# "DISABLED": "",
# "SUPER": "",
# "CTRL": "",
# "SHIFT": "",
# "ALT": "",
# "ESCAPE": "",
# "F1": "",
# "F2": "",
# "F3": "",
# "F4": "",
# "F5": "",
# "F6": "",
# "F7": "",
# "F8": "",
# "F9": "",
# "F10": "",
# "F11": "",
# "F12": "",
# "F13": "",
# "F14": "",
# "F15": "",
# "F16": "",
# "F17": "",
# "F18": "",
# "F19": "",
# "F20": "",
# "F21": "",
# "F22": "",
# "F23": "",
# "F24": "",
# "BACKTICK": "",
# "ONE": "",
# "TWO": "",
# "THREE": "",
# "FOUR": "",
# "FIVE": "",
# "SIX": "",
# "SEVEN": "",
# "EIGHT": "",
# "NINE": "",
# "ZERO": "",
# "HYPHEN": "",
# "EQUAL": "",
# "BACKSLASH": "",
# "BACKSPACE": "",
# "TAB": "",
# "Q": "",
# "W": "",
# "E": "",
# "R": "",
# "T": "",
# "Y": "",
# "U": "",
# "I": "",
# "O": "",
# "P": "",
# "L_BRACE": "",
# "R_BRACE": "",
# "CAPS_LOCK": "",
# "A": "",
# "S": "",
# "D": "",
# "F": "",
# "G": "",
# "H": "",
# "J": "",
# "K": "",
# "L": "",
# "SEMICOLON": "",
# "QUOTE": "",
# "ENTER": "",
# "L_SHIFT": "",
# "Z": "",
# "X": "",
# "C": "",
# "V": "",
# "B": "",
# "N": "",
# "M": "",
# "COMMA": "",
# "PERIOD": "",
# "SLASH": "",
# "R_SHIFT": "",
# "L_CTRL": "",
# "L_SUPER": "",
# "L_ALT": "",
# "SPACE": "",
# "R_ALT": "",
# "R_SUPER": "",
# "R_CTRL": "",
# "PRINT_SCREEN": "",
# "SCROLL_LOCK": "",
# "PAUSE": "",
# "INSERT": "",
# "HOME": "",
# "PAGE_UP": "",
# "DELETE": "",
# "END": "",
# "PAGE_DOWN": "",
# "UP": "",
# "LEFT": "",
# "DOWN": "",
# "RIGHT": "",
# "NUM_LOCK": "",
# "NUM_DIVIDE": "",
# "NUM_MULTIPLY": "",
# "NUM_MINUS": "",
# "NUM_PLUS": "",
# "NUM_ENTER": "",
# "NUM_ONE": "",
# "NUM_TWO": "",
# "NUM_THREE": "",
# "NUM_FOUR": "",
# "NUM_FIVE": "",
# "NUM_SIX": "",
# "NUM_SEVEN": "",
# "NUM_EIGHT": "",
# "NUM_NINE": "",
# "NUM_ZERO": "",
# "NUM_DECIMAL": "",
```
#### File: jonchun/ptoys-mapper/mapper.py
```python
from collections import defaultdict
import json
from typing import Dict, Iterable, Union
from keycodes import KeyCombo, KeyCode, WinKey
import constants as const
class not_list(list):
pass
class PToysMapper:
def __init__(self) -> None:
self.key_map = defaultdict(dict)
# key_map looks as follows, where currently everything is keyed under None = global shortcuts
# {
# None: {
# K.A: KC(K.B, K.C),
# K.L_ALT: K.R_CTRL
# }
# }
self.shortcut_map = defaultdict(dict)
# shortcut_map looks as follows, where None = global shortcuts
# {
# None: {
# KC(K.A, K.B): KC(K.C, K.D)
# },
# "app_1": {
# KC(K.E, K.F): KC(K.G, K.H)
# }
# }
self.disabled_shortcuts = defaultdict(list)
# disabled_shortcuts looks as follows. It is used to disable any shortcuts in
# an app-specific setting to support not_list
# {
# "app_1": [KC(K.A, K.B), KC(K.C, K.D)],
# "app_2": [KC(K.E, K.F), KC(K.G, K.H)],
# "app_3": [KC(K.E, K.F), KC(K.G, K.H)],
# }
def map_key(self, condition, mapping, name="anon"):
for old, new in mapping.items():
if not isinstance(condition, Iterable):
condition = [condition]
for c in condition:
if c is None:
if c:
c = c.lower()
if old in self.key_map[c]:
# this means the same key remap has been defined more than once
pass
else:
# the mapping that was defined first takes priority. leaving the code like this because
# I'm undecided on what the behavior here should be
self.key_map[c][old] = new
else:
raise TypeError(
"Unknown type in condition. Single key remaps currently only support None for condition."
)
def map_shortcut(self, condition, mapping, name="anon"):
for old, new in mapping.items():
# if _not_list == True, we want to negate the condition so that rather than app-specific, we are adding
# rules that are global but disabled for the provided app name
_not_list: bool = False
if isinstance(condition, not_list):
_not_list = True
if not isinstance(condition, Iterable):
condition = [condition]
for c in condition:
if isinstance(c, str) or c is None:
if c:
# windows process names are case insensitive, and powertoys will force the JSON to lowercase anyways
c = c.lower()
if _not_list:
# in this block, the provided condition was a list of processes that are exceptions.
# need to add the rule to globals instead of app-specific and disable them later as app-specific shortcuts
globals = None
if old not in self.shortcut_map[globals]:
self.shortcut_map[globals][old] = new
self.disabled_shortcuts[c].append(old)
else:
if old in self.shortcut_map[c]:
# this means the same shortcut has been defined more than once
pass
else:
# the mapping that was defined first takes priority. leaving the code like this because
# I'm undecided on what the behavior here should be
self.shortcut_map[c][old] = new
else:
raise TypeError("Unknown type in condition")
# helper to return ptoys config struct
@staticmethod
def _remap_dict(old, new, app_name) -> Dict[str, str]:
d = {
const.ORIGINAL_KEYS_SETTING_NAME: PToysMapper._code_helper(old),
const.NEW_REMAP_KEYS_SETTING_NAME: PToysMapper._code_helper(new),
}
if app_name:
d[const.TARGET_APP_SETTING_NAME] = app_name
return d
@staticmethod
def _code_helper(shortcut: Union[KeyCode, KeyCombo]) -> str:
if isinstance(shortcut, KeyCode):
shortcut = KeyCombo(shortcut)
if isinstance(shortcut, KeyCombo):
return ";".join([str(int(WinKey(key).code)) for key in shortcut])
else:
raise TypeError(
"can't format unknown type with code helper: {}".format(type(shortcut))
)
def generate(self):
# The app_name of "None" = global key remaps
global_km: Dict[KeyCode, KeyCode] = (
self.key_map.pop(None) if None in self.key_map else {}
)
global_km_json: Iterable[Dict[str, str]] = [
self._remap_dict(old, new, None) for old, new in global_km.items()
]
# The app_name of "None" = global shortcuts
global_sc: Dict[KeyCombo, KeyCombo] = (
self.shortcut_map.pop(None) if None in self.shortcut_map else {}
)
global_sc_json: Iterable[Dict[str, str]] = [
self._remap_dict(old, new, None) for old, new in global_sc.items()
]
# We process disabled_shortcuts at the very end to make sure we don't disable any shortcuts that have app-specific overrides
for app_name, disabled_shortcuts in self.disabled_shortcuts.items():
for disabled_sc in disabled_shortcuts:
# if the disabled_sc is already inside of the shortcut map, that means the same shortcut already has an override, so don't disable the shortcut in that case
# otherwise, we set the key to disabled
if disabled_sc not in self.shortcut_map[app_name]:
self.shortcut_map[app_name][disabled_sc] = WinKey.DISABLED
app_sc_json: Iterable[Dict[str, str]] = []
for app_name, app_sc in self.shortcut_map.items():
app_sc_json.extend(
[self._remap_dict(old, new, app_name) for old, new in app_sc.items()]
)
ptoys_config = {
const.REMAP_KEYS_SETTING_NAME: {
const.IN_PROCESS_REMAP_KEYS_SETTING_NAME: global_km_json
},
const.REMAP_SHORTCUTS_SETTING_NAME: {
const.GLOBAL_REMAP_SHORTCUTS_SETTING_NAME: global_sc_json,
const.APP_SPECIFIC_REMAP_SHORTCUTS_SETTING_NAME: app_sc_json,
},
}
with open("default.json", "w") as f:
json.dump(ptoys_config, f, indent=2)
``` |
{
"source": "Jonchun/pubkeysync",
"score": 2
} |
#### File: pubkeysync/pks/pubkeysync.py
```python
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
import logging
import os
import yaml
from .objects.category import Category
from .objects.group import Group
from .objects.user import User
from .objects.server import Server
class PubKeySync:
def __init__(self, **kwargs):
logging.basicConfig(filename='logs/pubkeysync.log', level=logging.INFO, format='%(asctime).19s | %(levelname)s | %(message)s')
self._log = logging.getLogger("pks")
self.printlog('-----PubKeySync Started-----')
self.printlog('Loading configuration...')
self.config()
def config(self):
# Catch this cleanly and output messages if config files are invalid.
try:
with open('config/pubkeysync.yaml', 'r') as f:
config = yaml.load(f.read())
self.private_key_file = config['private_key']
self.public_key_file = config['public_key']
# Add Groups
self.groups = []
for group_dict in config['groups']:
if isinstance(group_dict, str):
group_dict = {group_dict: {}}
group = Group(self, group_dict)
self.groups.append(group)
# Add Users
self.users = []
for user_dict in config['users']:
if isinstance(user_dict, str):
user_dict = {user_dict: {}}
user = User(self, user_dict)
self.users.append(user)
# Add Categories
self.categories = []
for category_dict in config['categories']:
if isinstance(category_dict, str):
category_dict = {category_dict: {}}
category = Category(self, category_dict)
self.categories.append(category)
# Add Servers
self.servers = []
for server_dict in config['servers']:
server = Server(self, server_dict)
self.servers.append(server)
self.load_agent_keys()
except Exception as e:
print('Load Failed: {}'.format(e))
self.log_error('Load Failed: {}'.format(e))
exit()
def printlog(self, msg):
print(msg)
self.log(msg)
def log(self, msg):
self._log.info(msg)
def log_error(self, msg):
self._log.error(msg)
def load_agent_keys(self):
try:
if not os.path.isfile('keys/{}'.format(self.private_key_file)) and not os.path.isfile('keys/{}'.format(self.public_key_file)):
print('Agent key-pair does not exist. Creating...')
# generate new keypair if files don't exist
key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, key_size=2048)
# get public key in OpenSSH format
_public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, \
serialization.PublicFormat.OpenSSH)
# get private key in PEM container format
pem = key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
with open('keys/{}'.format(self.private_key_file), 'wb') as f:
f.write(pem)
# Set owner-only permissions
os.chmod('keys/{}'.format(self.private_key_file), 0o600)
self.public_key = _public_key.decode('utf-8') + ' pubkeysync-agent'
with open('keys/{}'.format(self.public_key_file), 'w') as f:
f.write(self.public_key)
return self.public_key
else:
if not os.path.isfile('keys/{}'.format(self.private_key_file)):
raise Exception
with open('keys/{}'.format(self.public_key_file), 'r') as f:
self.public_key = f.read()
return self.public_key
except Exception:
raise Exception('Unable to load key file for PKS agent')
def push_keys(self, verbose=False):
category_servers = []
for category in self.categories:
category_servers.extend(category.get_new_servers())
all_servers = list(set(self.servers + category_servers))
for server in all_servers:
server.update_authorized_keys(verbose)
``` |
{
"source": "Joncichawa/MLOps_Transformers",
"score": 3
} |
#### File: start/classify/predictonnx.py
```python
import numpy as np # we're going to use numpy to process input and output data
import onnxruntime # to inference ONNX models, we use the ONNX Runtime
import onnx
from onnx import numpy_helper
from urllib.request import urlopen
import json
import time
import logging
import os
import sys
from datetime import datetime
from typing import Dict, List, Tuple
import torch
import yaml
from torch import Tensor
from torch.utils.data import DataLoader
from transformers import DistilBertTokenizer
import requests
# Run the model on the backend
d=os.path.dirname(os.path.abspath(__file__))
modelfile=os.path.join(d , 'model.onnx')
labelfile=os.path.join(d , 'labels.json')
session = onnxruntime.InferenceSession(modelfile)
np.set_printoptions(precision=None, suppress=True, formatter={'float_kind':'{:f}'.format})
# get the name of the inputs of the model
input_name1 = session.get_inputs()[0].name
input_name2 = session.get_inputs()[1].name
def predict_loader(texts: List[str], config: dict) -> List[Dict[str, Tensor]]:
tokenizer = DistilBertTokenizer.from_pretrained(
"distilbert-base-uncased", cache_dir="/cache"
)
token_text_list = list(
map(
lambda e: tokenizer.encode_plus(
e,
add_special_tokens=True,
max_length=config["model"]["max_sentence_length"],
pad_to_max_length=True,
truncation=True,
),
texts,
)
)
def f(e):
return {
"input_ids": torch.tensor(e.data["input_ids"], dtype=torch.long).unsqueeze(
0
),
"attention_mask": torch.tensor(
e.data["attention_mask"], dtype=torch.long
).unsqueeze(0),
}
dataset = list(map(f, token_text_list))
return dataset
def predict_class_from_text(input_text):
config_path = "config.yaml"
with open(config_path) as f:
config = yaml.safe_load(f)
labels = ['Company', 'EducationalInstitution', 'Artist', 'Athlete', 'OfficeHolder',
'MeanOfTransportation', 'Building', 'NaturalPlace', 'Village', 'Animal',
'Plant', 'Album', 'Film', 'WrittenWork']
pred_loader = predict_loader(input_text, config)
start = time.time()
results = []
for x in pred_loader:
ids = x["input_ids"]
mask = x["attention_mask"]
raw_result = session.run(None, {input_name1: ids.numpy(), input_name2: mask.numpy()})
res = torch.exp(torch.tensor(raw_result))
idx = np.argmax(res.squeeze().numpy()).astype(int)
results.append((str(idx), labels[idx]))
end = time.time()
inference_time = np.round((end - start) * 1000, 2)
response = {
'created': datetime.utcnow().isoformat(),
'predictions': results,
'latency': inference_time,
}
logging.info(f'returning {response}')
return response
if __name__ == '__main__':
print(predict_class_from_text(sys.argv[1]))
```
#### File: MLOps_Transformers/tests/test_data.py
```python
from typing import Dict, List, Tuple
import pytest
import torch
import yaml
from datasets import load_dataset
from datasets import logging as logging_ds
from torch import Tensor
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataset import T_co
from transformers import DistilBertTokenizer
from transformers import logging as logging_tf
from src.models.distil_bert_classifier import DistillBERTClass
from src.paths import DATA_PATH, EXPERIMENTS_PATH, MODELS_PATH, TESTS_PATH
from src.data.make_dataset import prepare_train_loaders
def test_data_shape():
config_path = TESTS_PATH / "test-config.yaml"
with open(config_path) as f:
config = yaml.safe_load(f)
train_loader, val_loader, test_loader = prepare_train_loaders(config)
# print(train_loader.size)
# print(train_loader.dataset['columns'])
# assert train_loader.dataset['columns'] == ['attention_mask', 'input_ids', 'label']
# assert train_loader.dataset == Dataset({'features': ['attention_mask', 'input_ids', 'label'],
# 'num_rows': 14
# })
assert True
``` |
{
"source": "joncinque/django_form",
"score": 2
} |
#### File: management/commands/encounters_prime.py
```python
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from encounters.models import Encounter, Patient, Facility, Physician
from encounters.tests import (
TEST_ENCOUNTERS,
TEST_PATIENTS,
TEST_FACILITIES,
TEST_PHYSICIANS,
)
class Command(BaseCommand):
help = '''Add some default models for testing'''
def handle(self, *args, **kwargs):
try:
get_user_model().objects.create_superuser('test', '<EMAIL>', 'test')
except Exception:
pass
try:
get_user_model().objects.create_superuser('admin', '<EMAIL>', 'admin')
except Exception:
pass
[Facility.objects.update_or_create(**obj) for obj in TEST_FACILITIES]
[Patient.objects.update_or_create(**obj) for obj in TEST_PATIENTS]
[Physician.objects.update_or_create(**obj) for obj in TEST_PHYSICIANS]
facility = Facility.objects.all().first()
patient = Patient.objects.all().first()
physician = Physician.objects.all().first()
[Encounter.objects.update_or_create(patient=patient, facility=facility, physician=physician, **obj) for obj in TEST_ENCOUNTERS]
```
#### File: django_form/encounters/models.py
```python
from django.db import models
from django.urls import reverse
MAX_NAME_LENGTH = 200
MAX_ADDRESS_LENGTH = 200
MAX_SSN_LENGTH = 15
MAX_PHONE_LENGTH = 30
class Sex(models.TextChoices):
MALE = 'Male'
FEMALE = 'Female'
OTHER = 'Other'
DECLINE = 'Decline to answer'
class EncounterType(models.TextChoices):
INPATIENT = 'Inpatient'
OUTPATIENT_AMBULANCE = 'Outpatient Ambulance'
OUTPATIENT_OFFICE = 'Outpatient Office'
class Patient(models.Model):
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
email = models.EmailField(blank=True)
ssn = models.CharField(max_length=MAX_SSN_LENGTH, blank=True)
address = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
city = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
state = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
zipcode = models.CharField(max_length=MAX_PHONE_LENGTH, blank=True)
phone = models.CharField(max_length=MAX_PHONE_LENGTH, blank=True)
birthdate = models.DateField(blank=True, null=True)
sex = models.CharField(
max_length=MAX_PHONE_LENGTH,
choices=Sex.choices,
default=Sex.OTHER,
)
def get_absolute_url(self):
return reverse('patients-detail', args=[self.id])
def get_delete_url(self):
return reverse('patients-delete', args=[self.id])
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Patient'
verbose_name_plural = 'Patients'
class Facility(models.Model):
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
email = models.EmailField(blank=True)
taxid = models.CharField(max_length=MAX_SSN_LENGTH, blank=True)
address = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
city = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
state = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
zipcode = models.CharField(max_length=MAX_PHONE_LENGTH, blank=True)
phone = models.CharField(max_length=MAX_PHONE_LENGTH, blank=True)
fax = models.CharField(max_length=MAX_PHONE_LENGTH, blank=True)
admin = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
def get_absolute_url(self):
return reverse('facilities-detail', args=[self.id])
def get_delete_url(self):
return reverse('facilities-delete', args=[self.id])
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Facility'
verbose_name_plural = 'Facilities'
class Physician(models.Model):
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True)
email = models.EmailField(blank=True)
taxid = models.CharField(max_length=MAX_SSN_LENGTH, blank=True)
address = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
city = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
state = models.CharField(max_length=MAX_NAME_LENGTH, blank=True)
zipcode = models.CharField(max_length=MAX_PHONE_LENGTH, blank=True)
phone = models.CharField(max_length=MAX_PHONE_LENGTH, blank=True)
fax = models.CharField(max_length=MAX_PHONE_LENGTH, blank=True)
def get_absolute_url(self):
return reverse('physicians-detail', args=[self.id])
def get_delete_url(self):
return reverse('physicians-delete', args=[self.id])
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Physician'
verbose_name_plural = 'Physicians'
class Encounter(models.Model):
requested_type = models.CharField(
max_length=MAX_PHONE_LENGTH,
choices=EncounterType.choices,
default=EncounterType.INPATIENT,
)
assigned_type = models.CharField(
max_length=MAX_PHONE_LENGTH,
choices=EncounterType.choices,
default=EncounterType.INPATIENT,
)
patient = models.ForeignKey(Patient, on_delete=models.CASCADE)
facility = models.ForeignKey(Facility, on_delete=models.CASCADE)
physician = models.ForeignKey(Physician, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
charting = models.TextField(blank=True) # TODO formset
reviews = models.TextField(null=True, blank=True) # TODO formset
def get_absolute_url(self):
return reverse('encounters-detail', args=[self.id])
def get_delete_url(self):
return reverse('encounters-delete', args=[self.id])
def __str__(self):
return self.created.strftime('%Y/%m/%d %H:%M') + ' - ' + self.patient.name
class Meta:
ordering = ['last_modified', 'created']
verbose_name = 'Encounter'
verbose_name_plural = 'Encounters'
```
#### File: django_form/encounters/tests.py
```python
from datetime import date, datetime, timedelta, timezone
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from .forms import PatientForm
from .models import Facility, Patient, Physician, Encounter
TEST_PATIENT1 = {
'name': 'TestPatient1',
'email': '<EMAIL>',
'ssn': 'xxx-xx-xxxx',
'phone': '(xxx) xxx-xxxx',
'address': '123 Fake Street',
'city': 'Springfield',
'state': 'NV',
'zipcode': '10001',
'birthdate': date(1950, 12, 2),
}
TEST_PATIENT2 = {
'name': 'TestPatient2',
'email': '<EMAIL>',
'ssn': 'xxx-xx-xxxx',
'phone': '(xxx) xxx-xxxx',
'address': '1 Real Lane',
'city': 'Spring',
'state': 'TX',
'zip': '10002',
}
TEST_PATIENT3 = {
'name': 'TestPatient3',
'email': '<EMAIL>',
'ssn': 'xxx-xx-xxxx',
'phone': '(xxx) xxx-xxxx',
'address': '142 Some Street',
'city': 'Spring',
'state': 'TX',
'zip': '10002',
}
TEST_PATIENTS = [TEST_PATIENT1, TEST_PATIENT2, TEST_PATIENT3]
TEST_FACILITY1 = {
'name': 'Facility1',
'admin': 'Admin1',
'address': 'Address1',
'phone': '(111) 111-1111',
}
TEST_FACILITY2 = {
'name': 'Facility2',
'admin': 'Admin2',
'address': 'Address2',
'phone': '(222) 222-2222',
}
TEST_FACILITIES = [TEST_FACILITY1, TEST_FACILITY2]
TEST_PHYSICIAN1 = {
'name': 'Physician1',
'address': 'Address1',
'phone': '(111) 111-1111',
}
TEST_PHYSICIAN2 = {
'name': 'Physician2',
'address': 'Address2',
'phone': '(222) 222-2222',
}
TEST_PHYSICIANS = [TEST_PHYSICIAN1, TEST_PHYSICIAN2]
TEST_ENCOUNTERS = [
{
'charting': 'Lots of useful information here in free-form',
'created': datetime.now(tz=timezone.utc),
'last_modified': datetime.now(tz=timezone.utc),
},
{
'charting': 'There was a lot of information to keep track of here.',
'created': datetime.now(tz=timezone.utc) - timedelta(days=2),
'last_modified': datetime.now(tz=timezone.utc) - timedelta(days=1),
},
]
class PatientFormTest(TestCase):
@classmethod
def setUpTestData(cls):
[Patient.objects.create(**patient) for patient in TEST_PATIENTS]
def setUp(self):
self.username = 'testuser'
self.password = '<PASSWORD>'
self.email = '<EMAIL>'
self.user = get_user_model().objects.create_user(self.username, self.email, self.password)
self.client.login(username=self.username, password=self.password)
def test_create(self):
list_url = reverse('patients-list')
patients_view = self.client.get(list_url)
initial_patients = list(patients_view.context['patient_list'])
create_url = reverse('patients-create')
new_patient_response = self.client.post(create_url, data=TEST_PATIENT1)
self.assertEqual(new_patient_response.status_code, 302)
patient_url = new_patient_response['Location']
patient_view = self.client.get(patient_url)
patient = patient_view.context['patient']
for k, v in TEST_PATIENT1.items():
self.assertEqual(getattr(patient, k), v)
patients_view = self.client.get(list_url)
final_patients = list(patients_view.context['patient_list'])
self.assertEqual(len(initial_patients) + 1, len(final_patients))
def test_create_blank(self):
create_url = reverse('patients-create')
new_patient_response = self.client.post(create_url, data={})
self.assertEqual(new_patient_response.context['field_errors'], ['This field is required.'])
def test_modify(self):
form = PatientForm(TEST_PATIENT2)
self.assertTrue(form.is_valid())
form.save()
patient_object = Patient.objects.filter(name=TEST_PATIENT2['name']).first()
for k, v in TEST_PATIENT2.items():
self.assertEqual(getattr(patient_object, k), v)
form = PatientForm(instance=patient_object)
new_email = '<EMAIL>'
form.data['email'] = new_email
class PhysicianFormTest(TestCase):
@classmethod
def setUpTestData(cls):
[Physician.objects.create(**obj) for obj in TEST_PHYSICIANS]
class FacilityFormTest(TestCase):
@classmethod
def setUpTestData(cls):
[Facility.objects.create(**obj) for obj in TEST_FACILITIES]
class EncounterFormTest(TestCase):
@classmethod
def setUpTestData(cls):
[Patient.objects.create(**patient) for patient in TEST_PATIENTS]
[Facility.objects.create(**obj) for obj in TEST_FACILITIES]
[Physician.objects.create(**obj) for obj in TEST_PHYSICIANS]
facility = Facility.objects.all().first()
patient = Patient.objects.all().first()
physician = Physician.objects.all().first()
[Encounter.objects.update_or_create(patient=patient, facility=facility, physician=physician, **obj) for obj in TEST_ENCOUNTERS]
def setUp(self):
self.username = 'testuser'
self.password = '<PASSWORD>'
self.email = '<EMAIL>'
self.user = get_user_model().objects.create_user(self.username, self.email, self.password)
self.client.login(username=self.username, password=self.password)
def test_create(self):
list_url = reverse('encounters-list')
list_view = self.client.get(list_url)
initial = list(list_view.context['object_list'])
self.assertIsInstance(initial, list)
create_url = reverse('encounters-create')
form_response = self.client.get(create_url)
self.assertEqual(form_response.status_code, 200)
print(list(form_response.context.keys()))
print(form_response.context['form'])
``` |
{
"source": "joncinque/yoga-site",
"score": 2
} |
#### File: yoga-site/theme/page_processors.py
```python
from mezzanine.pages.page_processors import processor_for
from .models import HomePage
from theme.portfolio.models import PortfolioItem
@processor_for(HomePage)
def home_processor(request, page):
homepage = HomePage.objects.prefetch_related(
'slides', 'boxes').select_related(
'featured_portfolio', 'featured_gallery').get(id=page.homepage.id)
items = PortfolioItem.objects.published(for_user=request.user).prefetch_related('categories')
items = items.filter(parent=homepage.featured_portfolio)[:homepage.max_portfolio_items]
return {"homepage": homepage, 'items': items}
``` |
{
"source": "jonc-ives/ConAlarm",
"score": 2
} |
#### File: jonc-ives/ConAlarm/application.py
```python
from flask import Flask, render_template
from flask_restful import Resource, Api
import endpoints, os, multiprocessing
# configure application -- this'll work for now
config = {
"mongoURI": "127.0.0.1",
"mongoDB": "alarmcon",
"mongoAlColl": "alarms",
"mongoLogColl": "logs",
"mongoPort": 27017,
"debug": True,
"port": 5000
}
# instantiate server
if __name__ == '__main__':
manager = multiprocessing.Manager()
thread_flags = manager.dict({})
# pass configuration to controller
controller = endpoints.initialize(config, thread_flags)
# create server instance
app = Flask(__name__, template_folder=os.path.dirname(os.path.abspath("application.py")))
api = Api(app)
@app.route('/', methods=['GET'])
def Index():
print(os.path.dirname(os.path.abspath(__file__)))
return render_template("index.html")
# initialize server endpoints
api.add_resource(endpoints.AlarmInfo, '/alarms')
api.add_resource(endpoints.AlarmActions, '/alarm/<aid>')
api.add_resource(endpoints.AlarmHandle, '/handle/<aid>')
app.run(debug=True)
``` |
{
"source": "JonClayton/Advent-of-Code",
"score": 4
} |
#### File: Advent-of-Code/python/december_2.py
```python
from python.classes import Solution
def first_solution(a_list_of_actions):
steps = convert_to_action_tuples(a_list_of_actions)
down = 0
forward = 0
for pair in steps:
down += pair[1]
forward += pair[0]
return forward * down
def second_solution(a_list_of_actions):
steps = convert_to_action_tuples(a_list_of_actions)
aim = 0
down = 0
forward = 0
for pair in steps:
aim += pair[1]
down += aim * pair[0]
forward += pair[0]
return forward * down
solution = Solution('inputs/inputs_02.json', first_solution, second_solution)
def convert_to_action_tuples(a_list_of_actions):
result = []
for item in a_list_of_actions:
x_and_y = item.split()
if x_and_y[0] == 'down':
result.append((0, int(x_and_y[1])))
if x_and_y[0] == 'up':
result.append((0, -int(x_and_y[1])))
if x_and_y[0] == 'forward':
result.append((int(x_and_y[1]), 0))
return result
``` |
{
"source": "jonclothcat/OpenPype",
"score": 2
} |
#### File: OpenPype/igniter/__main__.py
```python
import sys
from Qt import QtWidgets # noqa
from Qt.QtCore import Signal # noqa
from .install_dialog import InstallDialog
RESULT = 0
def get_result(res: int):
"""Sets result returned from dialog."""
global RESULT
RESULT = res
app = QtWidgets.QApplication(sys.argv)
d = InstallDialog()
d.finished.connect(get_result)
d.open()
app.exec()
sys.exit(RESULT)
```
#### File: OpenPype/igniter/nice_progress_bar.py
```python
from Qt import QtCore, QtGui, QtWidgets # noqa
class NiceProgressBar(QtWidgets.QProgressBar):
def __init__(self, parent=None):
super(NiceProgressBar, self).__init__(parent)
self._real_value = 0
def setValue(self, value):
self._real_value = value
if value != 0 and value < 11:
value = 11
super(NiceProgressBar, self).setValue(value)
def value(self):
return self._real_value
def text(self):
return "{} %".format(self._real_value)
```
#### File: OpenPype/igniter/update_window.py
```python
import os
from .update_thread import UpdateThread
from Qt import QtCore, QtGui, QtWidgets # noqa
from .bootstrap_repos import OpenPypeVersion
from .nice_progress_bar import NiceProgressBar
from .tools import load_stylesheet
class UpdateWindow(QtWidgets.QDialog):
"""OpenPype update window."""
_width = 500
_height = 100
def __init__(self, version: OpenPypeVersion, parent=None):
super(UpdateWindow, self).__init__(parent)
self._openpype_version = version
self._result_version_path = None
self.setWindowTitle(
f"OpenPype is updating ..."
)
self.setModal(True)
self.setWindowFlags(
QtCore.Qt.WindowMinimizeButtonHint
)
current_dir = os.path.dirname(os.path.abspath(__file__))
roboto_font_path = os.path.join(current_dir, "RobotoMono-Regular.ttf")
poppins_font_path = os.path.join(current_dir, "Poppins")
icon_path = os.path.join(current_dir, "openpype_icon.png")
# Install roboto font
QtGui.QFontDatabase.addApplicationFont(roboto_font_path)
for filename in os.listdir(poppins_font_path):
if os.path.splitext(filename)[1] == ".ttf":
QtGui.QFontDatabase.addApplicationFont(filename)
# Load logo
pixmap_openpype_logo = QtGui.QPixmap(icon_path)
# Set logo as icon of window
self.setWindowIcon(QtGui.QIcon(pixmap_openpype_logo))
self._pixmap_openpype_logo = pixmap_openpype_logo
self._update_thread = None
self.resize(QtCore.QSize(self._width, self._height))
self._init_ui()
# Set stylesheet
self.setStyleSheet(load_stylesheet())
self._run_update()
def _init_ui(self):
# Main info
# --------------------------------------------------------------------
main_label = QtWidgets.QLabel(
f"<b>OpenPype</b> is updating to {self._openpype_version}", self)
main_label.setWordWrap(True)
main_label.setObjectName("MainLabel")
# Progress bar
# --------------------------------------------------------------------
progress_bar = NiceProgressBar(self)
progress_bar.setAlignment(QtCore.Qt.AlignCenter)
progress_bar.setTextVisible(False)
# add all to main
main = QtWidgets.QVBoxLayout(self)
main.addSpacing(15)
main.addWidget(main_label, 0)
main.addSpacing(15)
main.addWidget(progress_bar, 0)
main.addSpacing(15)
self._progress_bar = progress_bar
def _run_update(self):
"""Start install process.
This will once again validate entered path and mongo if ok, start
working thread that will do actual job.
"""
# Check if install thread is not already running
if self._update_thread and self._update_thread.isRunning():
return
self._progress_bar.setRange(0, 0)
update_thread = UpdateThread(self)
update_thread.set_version(self._openpype_version)
update_thread.message.connect(self.update_console)
update_thread.progress.connect(self._update_progress)
update_thread.finished.connect(self._installation_finished)
self._update_thread = update_thread
update_thread.start()
def get_version_path(self):
return self._result_version_path
def _installation_finished(self):
status = self._update_thread.result()
self._result_version_path = status
self._progress_bar.setRange(0, 1)
self._update_progress(100)
QtWidgets.QApplication.processEvents()
self.done(0)
def _update_progress(self, progress: int):
# not updating progress as we are not able to determine it
# correctly now. Progress bar is set to un-deterministic mode
# until we are able to get progress in better way.
"""
self._progress_bar.setRange(0, 0)
self._progress_bar.setValue(progress)
text_visible = self._progress_bar.isTextVisible()
if progress == 0:
if text_visible:
self._progress_bar.setTextVisible(False)
elif not text_visible:
self._progress_bar.setTextVisible(True)
"""
return
def update_console(self, msg: str, error: bool = False) -> None:
"""Display message in console.
Args:
msg (str): message.
error (bool): if True, print it red.
"""
print(msg)
```
#### File: plugins/publish/validate_instance_asset.py
```python
from avalon import api
import pyblish.api
import openpype.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.aftereffects.api import get_stub
class ValidateInstanceAssetRepair(pyblish.api.Action):
"""Repair the instance asset with value from Context."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
# Get the errored instances
failed = []
for result in context.data["results"]:
if (result["error"] is not None and result["instance"] is not None
and result["instance"] not in failed):
failed.append(result["instance"])
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(failed, plugin)
stub = get_stub()
for instance in instances:
data = stub.read(instance[0])
data["asset"] = api.Session["AVALON_ASSET"]
stub.imprint(instance[0], data)
class ValidateInstanceAsset(pyblish.api.InstancePlugin):
"""Validate the instance asset is the current selected context asset.
As it might happen that multiple worfiles are opened at same time,
switching between them would mess with selected context. (From Launcher
or Ftrack).
In that case outputs might be output under wrong asset!
Repair action will use Context asset value (from Workfiles or Launcher)
Closing and reopening with Workfiles will refresh Context value.
"""
label = "Validate Instance Asset"
hosts = ["aftereffects"]
actions = [ValidateInstanceAssetRepair]
order = openpype.api.ValidateContentsOrder
def process(self, instance):
instance_asset = instance.data["asset"]
current_asset = api.Session["AVALON_ASSET"]
msg = (
f"Instance asset {instance_asset} is not the same "
f"as current context {current_asset}."
)
if instance_asset != current_asset:
raise PublishXmlValidationError(self, msg)
```
#### File: plugins/load/load_look.py
```python
from pathlib import Path
from pprint import pformat
from typing import Dict, List, Optional
import os
import json
import bpy
from openpype.pipeline import get_representation_path
from openpype.hosts.blender.api import plugin
from openpype.hosts.blender.api.pipeline import (
containerise_existing,
AVALON_PROPERTY
)
class BlendLookLoader(plugin.AssetLoader):
"""Load models from a .blend file.
Because they come from a .blend file we can simply link the collection that
contains the model. There is no further need to 'containerise' it.
"""
families = ["look"]
representations = ["json"]
label = "Load Look"
icon = "code-fork"
color = "orange"
def get_all_children(self, obj):
children = list(obj.children)
for child in children:
children.extend(child.children)
return children
def _process(self, libpath, container_name, objects):
with open(libpath, "r") as fp:
data = json.load(fp)
path = os.path.dirname(libpath)
materials_path = f"{path}/resources"
materials = []
for entry in data:
file = entry.get('fbx_filename')
if file is None:
continue
bpy.ops.import_scene.fbx(filepath=f"{materials_path}/{file}")
mesh = [o for o in bpy.context.scene.objects if o.select_get()][0]
material = mesh.data.materials[0]
material.name = f"{material.name}:{container_name}"
texture_file = entry.get('tga_filename')
if texture_file:
node_tree = material.node_tree
pbsdf = node_tree.nodes['Principled BSDF']
base_color = pbsdf.inputs[0]
tex_node = base_color.links[0].from_node
tex_node.image.filepath = f"{materials_path}/{texture_file}"
materials.append(material)
for obj in objects:
for child in self.get_all_children(obj):
mesh_name = child.name.split(':')[0]
if mesh_name == material.name.split(':')[0]:
child.data.materials.clear()
child.data.materials.append(material)
break
bpy.data.objects.remove(mesh)
return materials, objects
def process_asset(
self, context: dict, name: str, namespace: Optional[str] = None,
options: Optional[Dict] = None
) -> Optional[List]:
"""
Arguments:
name: Use pre-defined name
namespace: Use pre-defined namespace
context: Full parenthood of representation to load
options: Additional settings dictionary
"""
libpath = self.fname
asset = context["asset"]["name"]
subset = context["subset"]["name"]
lib_container = plugin.asset_name(
asset, subset
)
unique_number = plugin.get_unique_number(
asset, subset
)
namespace = namespace or f"{asset}_{unique_number}"
container_name = plugin.asset_name(
asset, subset, unique_number
)
container = bpy.data.collections.new(lib_container)
container.name = container_name
containerise_existing(
container,
name,
namespace,
context,
self.__class__.__name__,
)
metadata = container.get(AVALON_PROPERTY)
metadata["libpath"] = libpath
metadata["lib_container"] = lib_container
selected = [o for o in bpy.context.scene.objects if o.select_get()]
materials, objects = self._process(libpath, container_name, selected)
# Save the list of imported materials in the metadata container
metadata["objects"] = objects
metadata["materials"] = materials
metadata["parent"] = str(context["representation"]["parent"])
metadata["family"] = context["representation"]["context"]["family"]
nodes = list(container.objects)
nodes.append(container)
self[:] = nodes
return nodes
def update(self, container: Dict, representation: Dict):
collection = bpy.data.collections.get(container["objectName"])
libpath = Path(get_representation_path(representation))
extension = libpath.suffix.lower()
self.log.info(
"Container: %s\nRepresentation: %s",
pformat(container, indent=2),
pformat(representation, indent=2),
)
assert collection, (
f"The asset is not loaded: {container['objectName']}"
)
assert not (collection.children), (
"Nested collections are not supported."
)
assert libpath, (
"No existing library file found for {container['objectName']}"
)
assert libpath.is_file(), (
f"The file doesn't exist: {libpath}"
)
assert extension in plugin.VALID_EXTENSIONS, (
f"Unsupported file: {libpath}"
)
collection_metadata = collection.get(AVALON_PROPERTY)
collection_libpath = collection_metadata["libpath"]
normalized_collection_libpath = (
str(Path(bpy.path.abspath(collection_libpath)).resolve())
)
normalized_libpath = (
str(Path(bpy.path.abspath(str(libpath))).resolve())
)
self.log.debug(
"normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
normalized_collection_libpath,
normalized_libpath,
)
if normalized_collection_libpath == normalized_libpath:
self.log.info("Library already loaded, not updating...")
return
for obj in collection_metadata['objects']:
for child in self.get_all_children(obj):
child.data.materials.clear()
for material in collection_metadata['materials']:
bpy.data.materials.remove(material)
namespace = collection_metadata['namespace']
name = collection_metadata['name']
container_name = f"{namespace}_{name}"
materials, objects = self._process(
libpath, container_name, collection_metadata['objects'])
collection_metadata["objects"] = objects
collection_metadata["materials"] = materials
collection_metadata["libpath"] = str(libpath)
collection_metadata["representation"] = str(representation["_id"])
def remove(self, container: Dict) -> bool:
collection = bpy.data.collections.get(container["objectName"])
if not collection:
return False
collection_metadata = collection.get(AVALON_PROPERTY)
for obj in collection_metadata['objects']:
for child in self.get_all_children(obj):
child.data.materials.clear()
for material in collection_metadata['materials']:
bpy.data.materials.remove(material)
bpy.data.collections.remove(collection)
return True
```
#### File: plugins/publish/extract_blend_animation.py
```python
import os
import bpy
import openpype.api
class ExtractBlendAnimation(openpype.api.Extractor):
"""Extract a blend file."""
label = "Extract Blend"
hosts = ["blender"]
families = ["animation"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
data_blocks = set()
for obj in instance:
if isinstance(obj, bpy.types.Object) and obj.type == 'EMPTY':
child = obj.children[0]
if child and child.type == 'ARMATURE':
if child.animation_data and child.animation_data.action:
if not obj.animation_data:
obj.animation_data_create()
obj.animation_data.action = child.animation_data.action
obj.animation_data_clear()
data_blocks.add(child.animation_data.action)
data_blocks.add(obj)
bpy.data.libraries.write(filepath, data_blocks)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'blend',
'ext': 'blend',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
```
#### File: flame/otio/utils.py
```python
import re
import opentimelineio as otio
import logging
log = logging.getLogger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
def timecode_to_frames(timecode, framerate):
rt = otio.opentime.from_timecode(timecode, framerate)
return int(otio.opentime.to_frames(rt))
def frames_to_timecode(frames, framerate):
rt = otio.opentime.from_frames(frames, framerate)
return otio.opentime.to_timecode(rt)
def frames_to_seconds(frames, framerate):
rt = otio.opentime.from_frames(frames, framerate)
return otio.opentime.to_seconds(rt)
def get_reformated_filename(filename, padded=True):
"""
Return fixed python expression path
Args:
filename (str): file name
Returns:
type: string with reformated path
Example:
get_reformated_filename("plate.1001.exr") > plate.%04d.exr
"""
found = FRAME_PATTERN.search(filename)
if not found:
log.info("File name is not sequence: {}".format(filename))
return filename
padding = get_padding_from_filename(filename)
replacement = "%0{}d".format(padding) if padded else "%d"
start_idx, end_idx = found.span(1)
return replacement.join(
[filename[:start_idx], filename[end_idx:]]
)
def get_padding_from_filename(filename):
"""
Return padding number from Flame path style
Args:
filename (str): file name
Returns:
int: padding number
Example:
get_padding_from_filename("plate.0001.exr") > 4
"""
found = get_frame_from_filename(filename)
return len(found) if found else None
def get_frame_from_filename(filename):
"""
Return sequence number from Flame path style
Args:
filename (str): file name
Returns:
int: sequence frame number
Example:
def get_frame_from_filename(path):
("plate.0001.exr") > 0001
"""
found = re.findall(FRAME_PATTERN, filename)
return found.pop() if found else None
```
#### File: plugins/create/create_template.py
```python
from openpype.hosts.harmony.api import plugin
class CreateTemplate(plugin.Creator):
"""Composite node for publishing to templates."""
name = "templateDefault"
label = "Template"
family = "harmony.template"
def __init__(self, *args, **kwargs):
super(CreateTemplate, self).__init__(*args, **kwargs)
```
#### File: plugins/publish/extract_template.py
```python
import os
import shutil
import openpype.api
import openpype.hosts.harmony.api as harmony
import openpype.hosts.harmony
class ExtractTemplate(openpype.api.Extractor):
"""Extract the connected nodes to the composite instance."""
label = "Extract Template"
hosts = ["harmony"]
families = ["harmony.template"]
def process(self, instance):
"""Plugin entry point."""
staging_dir = self.staging_dir(instance)
filepath = os.path.join(staging_dir, f"{instance.name}.tpl")
self.log.info(f"Outputting template to {staging_dir}")
dependencies = []
self.get_dependencies(instance.data["setMembers"][0], dependencies)
# Get backdrops.
backdrops = {}
for dependency in dependencies:
for backdrop in self.get_backdrops(dependency):
backdrops[backdrop["title"]["text"]] = backdrop
unique_backdrops = [backdrops[x] for x in set(backdrops.keys())]
if not unique_backdrops:
self.log.error(("No backdrops detected for template. "
"Please move template instance node onto "
"some backdrop and try again."))
raise AssertionError("No backdrop detected")
# Get non-connected nodes within backdrops.
all_nodes = instance.context.data.get("allNodes")
for node in [x for x in all_nodes if x not in dependencies]:
within_unique_backdrops = bool(
[x for x in self.get_backdrops(node) if x in unique_backdrops]
)
if within_unique_backdrops:
dependencies.append(node)
# Make sure we dont export the instance node.
if instance.data["setMembers"][0] in dependencies:
dependencies.remove(instance.data["setMembers"][0])
# Export template.
openpype.hosts.harmony.api.export_template(
unique_backdrops, dependencies, filepath
)
# Prep representation.
os.chdir(staging_dir)
shutil.make_archive(
f"{instance.name}",
"zip",
os.path.join(staging_dir, f"{instance.name}.tpl")
)
representation = {
"name": "tpl",
"ext": "zip",
"files": f"{instance.name}.zip",
"stagingDir": staging_dir
}
self.log.info(instance.data.get("representations"))
if instance.data.get("representations"):
instance.data["representations"].extend([representation])
else:
instance.data["representations"] = [representation]
instance.data["version_name"] = "{}_{}".format(
instance.data["subset"], os.environ["AVALON_TASK"])
def get_backdrops(self, node: str) -> list:
"""Get backdrops for the node.
Args:
node (str): Node path.
Returns:
list: list of Backdrops.
"""
self_name = self.__class__.__name__
return harmony.send({
"function": f"PypeHarmony.Publish.{self_name}.getBackdropsByNode",
"args": node})["result"]
def get_dependencies(
self, node: str, dependencies: list = None) -> list:
"""Get node dependencies.
This will return recursive dependency list of given node.
Args:
node (str): Path to the node.
dependencies (list, optional): existing dependency list.
Returns:
list: List of dependent nodes.
"""
current_dependencies = harmony.send(
{
"function": "PypeHarmony.getDependencies",
"args": node}
)["result"]
for dependency in current_dependencies:
if not dependency:
continue
if dependency in dependencies:
continue
dependencies.append(dependency)
self.get_dependencies(dependency, dependencies)
```
#### File: Python/StartupUI/setPosterFrame.py
```python
import hiero.core
import hiero.ui
try:
from PySide.QtGui import *
from PySide.QtCore import *
except:
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from PySide2.QtCore import *
def setPosterFrame(posterFrame=.5):
"""
Update the poster frame of the given clipItmes
posterFrame = .5 uses the centre frame, a value of 0 uses the first frame, a value of 1 uses the last frame
"""
view = hiero.ui.activeView()
selectedBinItems = view.selection()
selectedClipItems = [(item.activeItem()
if hasattr(item, "activeItem") else item)
for item in selectedBinItems]
for clip in selectedClipItems:
centreFrame = int(clip.duration() * posterFrame)
clip.setPosterFrame(centreFrame)
class SetPosterFrameAction(QAction):
def __init__(self):
QAction.__init__(self, "Set Poster Frame (centre)", None)
self._selection = None
self.triggered.connect(lambda: setPosterFrame(.5))
hiero.core.events.registerInterest("kShowContextMenu/kBin",
self.eventHandler)
def eventHandler(self, event):
view = event.sender
# Add the Menu to the right-click menu
event.menu.addAction(self)
# The act of initialising the action adds it to the right-click menu...
SetPosterFrameAction()
```
#### File: hiero/api/tags.py
```python
import re
import os
import hiero
from openpype.api import Logger
from avalon import io
log = Logger().get_logger(__name__)
def tag_data():
return {
# "Retiming": {
# "editable": "1",
# "note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa
# "icon": "retiming.png",
# "metadata": {
# "family": "retiming",
# "marginIn": 1,
# "marginOut": 1
# }
# },
"[Lenses]": {
"Set lense here": {
"editable": "1",
"note": "Adjust parameters of your lense and then drop to clip. Remember! You can always overwrite on clip", # noqa
"icon": "lense.png",
"metadata": {
"focalLengthMm": 57
}
}
},
# "NukeScript": {
# "editable": "1",
# "note": "Collecting track items to Nuke scripts.",
# "icon": "icons:TagNuke.png",
# "metadata": {
# "family": "nukescript",
# "subset": "main"
# }
# },
"Comment": {
"editable": "1",
"note": "Comment on a shot.",
"icon": "icons:TagComment.png",
"metadata": {
"family": "comment",
"subset": "main"
}
}
}
def create_tag(key, data):
"""
Creating Tag object.
Args:
key (str): name of tag
data (dict): parameters of tag
Returns:
object: Tag object
"""
tag = hiero.core.Tag(str(key))
return update_tag(tag, data)
def update_tag(tag, data):
"""
Fixing Tag object.
Args:
tag (obj): Tag object
data (dict): parameters of tag
"""
# set icon if any available in input data
if data.get("icon"):
tag.setIcon(str(data["icon"]))
# get metadata of tag
mtd = tag.metadata()
# get metadata key from data
data_mtd = data.get("metadata", {})
# due to hiero bug we have to make sure keys which are not existent in
# data are cleared of value by `None`
for _mk in mtd.keys():
if _mk.replace("tag.", "") not in data_mtd.keys():
mtd.setValue(_mk, str(None))
# set all data metadata to tag metadata
for k, v in data_mtd.items():
mtd.setValue(
"tag.{}".format(str(k)),
str(v)
)
# set note description of tag
tag.setNote(str(data["note"]))
return tag
def add_tags_to_workfile():
"""
Will create default tags from presets.
"""
from .lib import get_current_project
def add_tag_to_bin(root_bin, name, data):
# for Tags to be created in root level Bin
# at first check if any of input data tag is not already created
done_tag = next((t for t in root_bin.items()
if str(name) in t.name()), None)
if not done_tag:
# create Tag
tag = create_tag(name, data)
tag.setName(str(name))
log.debug("__ creating tag: {}".format(tag))
# adding Tag to Root Bin
root_bin.addItem(tag)
else:
# update only non hierarchy tags
update_tag(done_tag, data)
done_tag.setName(str(name))
log.debug("__ updating tag: {}".format(done_tag))
# get project and root bin object
project = get_current_project()
root_bin = project.tagsBin()
if "Tag Presets" in project.name():
return
log.debug("Setting default tags on project: {}".format(project.name()))
# get hiero tags.json
nks_pres_tags = tag_data()
# Get project task types.
tasks = io.find_one({"type": "project"})["config"]["tasks"]
nks_pres_tags["[Tasks]"] = {}
log.debug("__ tasks: {}".format(tasks))
for task_type in tasks.keys():
nks_pres_tags["[Tasks]"][task_type.lower()] = {
"editable": "1",
"note": task_type,
"icon": "icons:TagGood.png",
"metadata": {
"family": "task",
"type": task_type
}
}
# Get project assets. Currently Ftrack specific to differentiate between
# asset builds and shots.
if int(os.getenv("TAG_ASSETBUILD_STARTUP", 0)) == 1:
nks_pres_tags["[AssetBuilds]"] = {}
for asset in io.find({"type": "asset"}):
if asset["data"]["entityType"] == "AssetBuild":
nks_pres_tags["[AssetBuilds]"][asset["name"]] = {
"editable": "1",
"note": "",
"icon": {
"path": "icons:TagActor.png"
},
"metadata": {
"family": "assetbuild"
}
}
# loop through tag data dict and create deep tag structure
for _k, _val in nks_pres_tags.items():
# check if key is not decorated with [] so it is defined as bin
bin_find = None
pattern = re.compile(r"\[(.*)\]")
_bin_finds = pattern.findall(_k)
# if there is available any then pop it to string
if _bin_finds:
bin_find = _bin_finds.pop()
# if bin was found then create or update
if bin_find:
root_add = False
# first check if in root lever is not already created bins
bins = [b for b in root_bin.items()
if b.name() in str(bin_find)]
if bins:
bin = bins.pop()
else:
root_add = True
# create Bin object for processing
bin = hiero.core.Bin(str(bin_find))
# update or create tags in the bin
for __k, __v in _val.items():
add_tag_to_bin(bin, __k, __v)
# finally add the Bin object to the root level Bin
if root_add:
# adding Tag to Root Bin
root_bin.addItem(bin)
else:
add_tag_to_bin(root_bin, _k, _val)
log.info("Default Tags were set...")
```
#### File: plugins/publish/extract_clip_effects.py
```python
import os
import json
import pyblish.api
import openpype
class ExtractClipEffects(openpype.api.Extractor):
"""Extract clip effects instances."""
order = pyblish.api.ExtractorOrder
label = "Export Clip Effects"
families = ["effect"]
def process(self, instance):
item = instance.data["item"]
effects = instance.data.get("effects")
# skip any without effects
if not effects:
return
subset = instance.data.get("subset")
family = instance.data["family"]
self.log.debug("creating staging dir")
staging_dir = self.staging_dir(instance)
transfers = list()
if "transfers" not in instance.data:
instance.data["transfers"] = list()
ext = "json"
file = subset + "." + ext
# when instance is created during collection part
resources_dir = instance.data["resourcesDir"]
# change paths in effects to files
for k, effect in effects.items():
if "assignTo" in k:
continue
trn = self.copy_linked_files(effect, resources_dir)
if trn:
transfers.append((trn[0], trn[1]))
instance.data["transfers"].extend(transfers)
self.log.debug("_ transfers: `{}`".format(
instance.data["transfers"]))
# create representations
instance.data["representations"] = list()
transfer_data = [
"handleStart", "handleEnd",
"sourceStart", "sourceStartH", "sourceEnd", "sourceEndH",
"frameStart", "frameEnd",
"clipIn", "clipOut", "clipInH", "clipOutH",
"asset", "version"
]
# pass data to version
version_data = dict()
version_data.update({k: instance.data[k] for k in transfer_data})
# add to data of representation
version_data.update({
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": [family, "plate"],
"subset": subset,
"fps": instance.context.data["fps"]
})
instance.data["versionData"] = version_data
representation = {
'files': file,
'stagingDir': staging_dir,
'name': family + ext.title(),
'ext': ext
}
instance.data["representations"].append(representation)
self.log.debug("_ representations: `{}`".format(
instance.data["representations"]))
self.log.debug("_ version_data: `{}`".format(
instance.data["versionData"]))
with open(os.path.join(staging_dir, file), "w") as outfile:
outfile.write(json.dumps(effects, indent=4, sort_keys=True))
def copy_linked_files(self, effect, dst_dir):
for k, v in effect["node"].items():
if k in "file" and v != '':
base_name = os.path.basename(v)
dst = os.path.join(dst_dir, base_name).replace("\\", "/")
# add it to the json
effect["node"][k] = dst
return (v, dst)
```
#### File: plugins/publish/integrate_version_up_workfile.py
```python
from pyblish import api
import openpype.api as pype
class IntegrateVersionUpWorkfile(api.ContextPlugin):
"""Save as new workfile version"""
order = api.IntegratorOrder + 10.1
label = "Version-up Workfile"
hosts = ["hiero"]
optional = True
active = True
def process(self, context):
project = context.data["activeProject"]
path = context.data.get("currentFile")
new_path = pype.version_up(path)
if project:
project.saveAs(new_path)
self.log.info("Project workfile was versioned up")
```
#### File: plugins/publish_old_workflow/collect_host_version.py
```python
import pyblish.api
class CollectHostVersion(pyblish.api.ContextPlugin):
"""Inject the hosts version into context"""
label = "Collect Host and HostVersion"
order = pyblish.api.CollectorOrder - 0.5
def process(self, context):
import nuke
import pyblish.api
context.set_data("host", pyblish.api.current_host())
context.set_data('hostVersion', value=nuke.NUKE_VERSION_STRING)
```
#### File: plugins/publish_old_workflow/collect_tag_retime.py
```python
from pyblish import api
class CollectTagRetime(api.InstancePlugin):
"""Collect Retiming from Tags of selected track items."""
order = api.CollectorOrder + 0.014
label = "Collect Retiming Tag"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels
if "retiming" in t_family:
margin_in = t_metadata.get("tag.marginIn", "")
margin_out = t_metadata.get("tag.marginOut", "")
instance.data["retimeMarginIn"] = int(margin_in)
instance.data["retimeMarginOut"] = int(margin_out)
instance.data["retime"] = True
self.log.info("retimeMarginIn: `{}`".format(margin_in))
self.log.info("retimeMarginOut: `{}`".format(margin_out))
instance.data["families"] += ["retime"]
```
#### File: plugins/publish/precollect_workfile.py
```python
import os
import pyblish.api
import hiero.ui
from openpype.hosts.hiero import api as phiero
from avalon import api as avalon
from pprint import pformat
from openpype.hosts.hiero.api.otio import hiero_export
from Qt.QtGui import QPixmap
import tempfile
class PrecollectWorkfile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
label = "Precollect Workfile"
order = pyblish.api.CollectorOrder - 0.5
def process(self, context):
asset = avalon.Session["AVALON_ASSET"]
subset = "workfile"
project = phiero.get_current_project()
active_timeline = hiero.ui.activeSequence()
fps = active_timeline.framerate().toFloat()
# adding otio timeline to context
otio_timeline = hiero_export.create_otio_timeline()
# get workfile thumnail paths
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
thumbnail_name = "workfile_thumbnail.png"
thumbnail_path = os.path.join(tmp_staging, thumbnail_name)
# search for all windows with name of actual sequence
_windows = [w for w in hiero.ui.windowManager().windows()
if active_timeline.name() in w.windowTitle()]
# export window to thumb path
QPixmap.grabWidget(_windows[-1]).save(thumbnail_path, 'png')
# thumbnail
thumb_representation = {
'files': thumbnail_name,
'stagingDir': tmp_staging,
'name': "thumbnail",
'thumbnail': True,
'ext': "png"
}
# get workfile paths
curent_file = project.path()
staging_dir, base_name = os.path.split(curent_file)
# creating workfile representation
workfile_representation = {
'name': 'hrox',
'ext': 'hrox',
'files': base_name,
"stagingDir": staging_dir,
}
instance_data = {
"name": "{}_{}".format(asset, subset),
"asset": asset,
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile",
"representations": [workfile_representation, thumb_representation]
}
# create instance with workfile
instance = context.create_instance(**instance_data)
# update context with main project attributes
context_data = {
"activeProject": project,
"otioTimeline": otio_timeline,
"currentFile": curent_file,
"colorspace": self.get_colorspace(project),
"fps": fps
}
context.data.update(context_data)
self.log.info("Creating instance: {}".format(instance))
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
self.log.debug("__ context_data: {}".format(pformat(context_data)))
def get_colorspace(self, project):
# get workfile's colorspace properties
return {
"useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(),
"lutSetting16Bit": project.lutSetting16Bit(),
"lutSetting8Bit": project.lutSetting8Bit(),
"lutSettingFloat": project.lutSettingFloat(),
"lutSettingLog": project.lutSettingLog(),
"lutSettingViewer": project.lutSettingViewer(),
"lutSettingWorkingSpace": project.lutSettingWorkingSpace(),
"lutUseOCIOForExport": project.lutUseOCIOForExport(),
"ocioConfigName": project.ocioConfigName(),
"ocioConfigPath": project.ocioConfigPath()
}
```
#### File: plugins/create/create_usdrender.py
```python
import hou
from openpype.hosts.houdini.api import plugin
class CreateUSDRender(plugin.Creator):
"""USD Render ROP in /stage"""
label = "USD Render (experimental)"
family = "usdrender"
icon = "magic"
def __init__(self, *args, **kwargs):
super(CreateUSDRender, self).__init__(*args, **kwargs)
self.parent = hou.node("/stage")
# Remove the active, we are checking the bypass flag of the nodes
self.data.pop("active", None)
self.data.update({"node_type": "usdrender"})
def _process(self, instance):
"""Creator main entry point.
Args:
instance (hou.Node): Created Houdini instance.
"""
parms = {
# Render frame range
"trange": 1
}
if self.nodes:
node = self.nodes[0]
parms.update({"loppath": node.path()})
instance.setParms(parms)
# Lock some Avalon attributes
to_lock = ["family", "id"]
for name in to_lock:
parm = instance.parm(name)
parm.lock(True)
```
#### File: plugins/publish/collect_redshift_rop.py
```python
import re
import os
import hou
import pyblish.api
def get_top_referenced_parm(parm):
processed = set() # disallow infinite loop
while True:
if parm.path() in processed:
raise RuntimeError("Parameter references result in cycle.")
processed.add(parm.path())
ref = parm.getReferencedParm()
if ref.path() == parm.path():
# It returns itself when it doesn't reference
# another parameter
return ref
else:
parm = ref
def evalParmNoFrame(node, parm, pad_character="#"):
parameter = node.parm(parm)
assert parameter, "Parameter does not exist: %s.%s" % (node, parm)
# If the parameter has a parameter reference, then get that
# parameter instead as otherwise `unexpandedString()` fails.
parameter = get_top_referenced_parm(parameter)
# Substitute out the frame numbering with padded characters
try:
raw = parameter.unexpandedString()
except hou.Error as exc:
print("Failed: %s" % parameter)
raise RuntimeError(exc)
def replace(match):
padding = 1
n = match.group(2)
if n and int(n):
padding = int(n)
return pad_character * padding
expression = re.sub(r"(\$F([0-9]*))", replace, raw)
with hou.ScriptEvalContext(parameter):
return hou.expandStringAtFrame(expression, 0)
class CollectRedshiftROPRenderProducts(pyblish.api.InstancePlugin):
"""Collect USD Render Products
Collects the instance.data["files"] for the render products.
Provides:
instance -> files
"""
label = "Redshift ROP Render Products"
order = pyblish.api.CollectorOrder + 0.4
hosts = ["houdini"]
families = ["redshift_rop"]
def process(self, instance):
rop = instance[0]
# Collect chunkSize
chunk_size_parm = rop.parm("chunkSize")
if chunk_size_parm:
chunk_size = int(chunk_size_parm.eval())
instance.data["chunkSize"] = chunk_size
self.log.debug("Chunk Size: %s" % chunk_size)
default_prefix = evalParmNoFrame(rop, "RS_outputFileNamePrefix")
beauty_suffix = rop.evalParm("RS_outputBeautyAOVSuffix")
render_products = []
# Default beauty AOV
beauty_product = self.get_render_product_name(
prefix=default_prefix, suffix=beauty_suffix
)
render_products.append(beauty_product)
num_aovs = rop.evalParm("RS_aov")
for index in range(num_aovs):
i = index + 1
# Skip disabled AOVs
if not rop.evalParm("RS_aovEnable_%s" % i):
continue
aov_suffix = rop.evalParm("RS_aovSuffix_%s" % i)
aov_prefix = evalParmNoFrame(rop, "RS_aovCustomPrefix_%s" % i)
if not aov_prefix:
aov_prefix = default_prefix
aov_product = self.get_render_product_name(aov_prefix, aov_suffix)
render_products.append(aov_product)
for product in render_products:
self.log.debug("Found render product: %s" % product)
filenames = list(render_products)
instance.data["files"] = filenames
def get_render_product_name(self, prefix, suffix):
"""Return the output filename using the AOV prefix and suffix"""
# When AOV is explicitly defined in prefix we just swap it out
# directly with the AOV suffix to embed it.
# Note: ${AOV} seems to be evaluated in the parameter as %AOV%
has_aov_in_prefix = "%AOV%" in prefix
if has_aov_in_prefix:
# It seems that when some special separator characters are present
# before the %AOV% token that Redshift will secretly remove it if
# there is no suffix for the current product, for example:
# foo_%AOV% -> foo.exr
pattern = "%AOV%" if suffix else "[._-]?%AOV%"
product_name = re.sub(pattern, suffix, prefix, flags=re.IGNORECASE)
else:
if suffix:
# Add ".{suffix}" before the extension
prefix_base, ext = os.path.splitext(prefix)
product_name = prefix_base + "." + suffix + ext
else:
product_name = prefix
return product_name
```
#### File: plugins/publish/collect_remote_publish.py
```python
import pyblish.api
import openpype.api
import hou
from openpype.hosts.houdini.api import lib
class CollectRemotePublishSettings(pyblish.api.ContextPlugin):
"""Collect custom settings of the Remote Publish node."""
order = pyblish.api.CollectorOrder
families = ["*"]
hosts = ["houdini"]
targets = ["deadline"]
label = "Remote Publish Submission Settings"
actions = [openpype.api.RepairAction]
def process(self, context):
node = hou.node("/out/REMOTE_PUBLISH")
if not node:
return
attributes = lib.read(node)
# Debug the settings we have collected
for key, value in sorted(attributes.items()):
self.log.debug("Collected %s: %s" % (key, value))
context.data.update(attributes)
```
#### File: plugins/publish/collect_workscene_fps.py
```python
import pyblish.api
import hou
class CollectWorksceneFPS(pyblish.api.ContextPlugin):
"""Get the FPS of the work scene."""
label = "Workscene FPS"
order = pyblish.api.CollectorOrder
hosts = ["houdini"]
def process(self, context):
fps = hou.fps()
self.log.info("Workscene FPS: %s" % fps)
context.data.update({"fps": fps})
```
#### File: plugins/publish/increment_current_file.py
```python
import pyblish.api
import avalon.api
from openpype.api import version_up
from openpype.action import get_errored_plugins_from_data
class IncrementCurrentFile(pyblish.api.InstancePlugin):
"""Increment the current file.
Saves the current scene with an increased version number.
"""
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"]
families = ["colorbleed.usdrender", "redshift_rop"]
targets = ["local"]
def process(self, instance):
# This should be a ContextPlugin, but this is a workaround
# for a bug in pyblish to run once for a family: issue #250
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
context = instance.context
errored_plugins = get_errored_plugins_from_data(context)
if any(
plugin.__name__ == "HoudiniSubmitPublishDeadline"
for plugin in errored_plugins
):
raise RuntimeError(
"Skipping incrementing current file because "
"submission to deadline failed."
)
# Filename must not have changed since collecting
host = avalon.api.registered_host()
current_file = host.current_file()
assert (
context.data["currentFile"] == current_file
), "Collected filename from current scene name."
new_filepath = version_up(current_file)
host.save(new_filepath)
```
#### File: plugins/publish/validate_alembic_face_sets.py
```python
import pyblish.api
import openpype.api
class ValidateAlembicROPFaceSets(pyblish.api.InstancePlugin):
"""Validate Face Sets are disabled for extraction to pointcache.
When groups are saved as Face Sets with the Alembic these show up
as shadingEngine connections in Maya - however, with animated groups
these connections in Maya won't work as expected, it won't update per
frame. Additionally, it can break shader assignments in some cases
where it requires to first break this connection to allow a shader to
be assigned.
It is allowed to include Face Sets, so only an issue is logged to
identify that it could introduce issues down the pipeline.
"""
order = openpype.api.ValidateContentsOrder + 0.1
families = ["pointcache"]
hosts = ["houdini"]
label = "Validate Alembic ROP Face Sets"
def process(self, instance):
rop = instance[0]
facesets = rop.parm("facesets").eval()
# 0 = No Face Sets
# 1 = Save Non-Empty Groups as Face Sets
# 2 = Save All Groups As Face Sets
if facesets != 0:
self.log.warning(
"Alembic ROP saves 'Face Sets' for Geometry. "
"Are you sure you want this?"
)
```
#### File: plugins/publish/validate_no_errors.py
```python
import pyblish.api
import openpype.api
import hou
def cook_in_range(node, start, end):
current = hou.intFrame()
if start >= current >= end:
# Allow cooking current frame since we're in frame range
node.cook(force=False)
else:
node.cook(force=False, frame_range=(start, start))
def get_errors(node):
"""Get cooking errors.
If node already has errors check whether it needs to recook
If so, then recook first to see if that solves it.
"""
if node.errors() and node.needsToCook():
node.cook()
return node.errors()
class ValidateNoErrors(pyblish.api.InstancePlugin):
"""Validate the Instance has no current cooking errors."""
order = openpype.api.ValidateContentsOrder
hosts = ["houdini"]
label = "Validate no errors"
def process(self, instance):
validate_nodes = []
if len(instance) > 0:
validate_nodes.append(instance[0])
output_node = instance.data.get("output_node")
if output_node:
validate_nodes.append(output_node)
for node in validate_nodes:
self.log.debug("Validating for errors: %s" % node.path())
errors = get_errors(node)
if errors:
# If there are current errors, then try an unforced cook
# to see whether the error will disappear.
self.log.debug(
"Recooking to revalidate error "
"is up to date for: %s" % node.path()
)
current_frame = hou.intFrame()
start = instance.data.get("frameStart", current_frame)
end = instance.data.get("frameEnd", current_frame)
cook_in_range(node, start=start, end=end)
# Check for errors again after the forced recook
errors = get_errors(node)
if errors:
self.log.error(errors)
raise RuntimeError("Node has errors: %s" % node.path())
```
#### File: vendor/husdoutputprocessors/stagingdir_processor.py
```python
import hou
import husdoutputprocessors.base as base
import os
class StagingDirOutputProcessor(base.OutputProcessorBase):
"""Output all USD Rop file nodes into the Staging Directory
Ignore any folders and paths set in the Configured Layers
and USD Rop node, just take the filename and save into a
single directory.
"""
theParameters = None
parameter_prefix = "stagingdiroutputprocessor_"
stagingdir_parm_name = parameter_prefix + "stagingDir"
def __init__(self):
self.staging_dir = None
def displayName(self):
return 'StagingDir Output Processor'
def parameters(self):
if not self.theParameters:
parameters = hou.ParmTemplateGroup()
rootdirparm = hou.StringParmTemplate(
self.stagingdir_parm_name,
'Staging Directory', 1,
string_type=hou.stringParmType.FileReference,
file_type=hou.fileType.Directory
)
parameters.append(rootdirparm)
self.theParameters = parameters.asDialogScript()
return self.theParameters
def beginSave(self, config_node, t):
# Use the Root Directory parameter if it is set.
root_dir_parm = config_node.parm(self.stagingdir_parm_name)
if root_dir_parm:
self.staging_dir = root_dir_parm.evalAtTime(t)
if not self.staging_dir:
out_file_parm = config_node.parm('lopoutput')
if out_file_parm:
self.staging_dir = out_file_parm.evalAtTime(t)
if self.staging_dir:
(self.staging_dir, filename) = os.path.split(self.staging_dir)
def endSave(self):
self.staging_dir = None
def processAsset(self, asset_path,
asset_path_for_save,
referencing_layer_path,
asset_is_layer,
for_save):
"""
Args:
asset_path (str): The incoming file path you want to alter or not.
asset_path_for_save (bool): Whether the current path is a
referenced path in the USD file. When True, return the path
you want inside USD file.
referencing_layer_path (str): ???
asset_is_layer (bool): Whether this asset is a USD layer file.
If this is False, the asset is something else (for example,
a texture or volume file).
for_save (bool): Whether the asset path is for a file to be saved
out. If so, then return actual written filepath.
Returns:
The refactored asset path.
"""
# Treat save paths as being relative to the output path.
if for_save and self.staging_dir:
# Whenever we're processing a Save Path make sure to
# resolve it to the Staging Directory
filename = os.path.basename(asset_path)
return os.path.join(self.staging_dir, filename)
return asset_path
output_processor = StagingDirOutputProcessor()
def usdOutputProcessor():
return output_processor
```
#### File: plugins/create/create_model.py
```python
from openpype.hosts.maya.api import plugin
class CreateModel(plugin.Creator):
"""Polygonal static geometry"""
name = "modelMain"
label = "Model"
family = "model"
icon = "cube"
defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"]
def __init__(self, *args, **kwargs):
super(CreateModel, self).__init__(*args, **kwargs)
# Vertex colors with the geometry
self.data["writeColorSets"] = False
self.data["writeFaceSets"] = False
# Include attributes by attribute name or prefix
self.data["attr"] = ""
self.data["attrPrefix"] = ""
# Whether to include parent hierarchy of nodes in the instance
self.data["includeParentHierarchy"] = False
```
#### File: plugins/publish/clean_nodes.py
```python
from maya import cmds # noqa
import pyblish.api
class CleanNodesUp(pyblish.api.InstancePlugin):
"""Cleans up the staging directory after a successful publish.
This will also clean published renders and delete their parent directories.
"""
order = pyblish.api.IntegratorOrder + 10
label = "Clean Nodes"
optional = True
active = True
def process(self, instance):
if not instance.data.get("cleanNodes"):
self.log.info("Nothing to clean.")
return
nodes_to_clean = instance.data.pop("cleanNodes", [])
self.log.info("Removing {} nodes".format(len(nodes_to_clean)))
for node in nodes_to_clean:
try:
cmds.delete(node)
except ValueError:
# object might be already deleted, don't complain about it
pass
```
#### File: plugins/publish/collect_unreal_staticmesh.py
```python
from maya import cmds
import pyblish.api
class CollectUnrealStaticMesh(pyblish.api.InstancePlugin):
"""Collect Unreal Static Mesh
Ensures always only a single frame is extracted (current frame). This
also sets correct FBX options for later extraction.
"""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Unreal Static Meshes"
families = ["unrealStaticMesh"]
def process(self, instance):
# add fbx family to trigger fbx extractor
instance.data["families"].append("fbx")
# take the name from instance (without the `S_` prefix)
instance.data["staticMeshCombinedName"] = instance.name[2:]
geometry_set = [i for i in instance if i == "geometry_SET"]
instance.data["membersToCombine"] = cmds.sets(
geometry_set, query=True)
collision_set = [i for i in instance if i == "collisions_SET"]
instance.data["collisionMembers"] = cmds.sets(
collision_set, query=True)
# set fbx overrides on instance
instance.data["smoothingGroups"] = True
instance.data["smoothMesh"] = True
instance.data["triangulate"] = True
frame = cmds.currentTime(query=True)
instance.data["frameStart"] = frame
instance.data["frameEnd"] = frame
```
#### File: plugins/publish/save_scene.py
```python
import pyblish.api
class SaveCurrentScene(pyblish.api.ContextPlugin):
"""Save current scene
"""
label = "Save current file"
order = pyblish.api.ExtractorOrder - 0.49
hosts = ["maya"]
families = ["renderlayer", "workfile"]
def process(self, context):
import maya.cmds as cmds
current = cmds.file(query=True, sceneName=True)
assert context.data['currentFile'] == current
# If file has no modifications, skip forcing a file save
if not cmds.file(query=True, modified=True):
self.log.debug("Skipping file save as there "
"are no modifications..")
return
self.log.info("Saving current file..")
cmds.file(save=True, force=True)
```
#### File: plugins/publish/validate_camera_contents.py
```python
from maya import cmds
import pyblish.api
import openpype.api
import openpype.hosts.maya.api.action
class ValidateCameraContents(pyblish.api.InstancePlugin):
"""Validates Camera instance contents.
A Camera instance may only hold a SINGLE camera's transform, nothing else.
It may hold a "locator" as shape, but different shapes are down the
hierarchy.
"""
order = openpype.api.ValidateContentsOrder
families = ['camera']
hosts = ['maya']
label = 'Camera Contents'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
@classmethod
def get_invalid(cls, instance):
# get cameras
members = instance.data['setMembers']
shapes = cmds.ls(members, dag=True, shapes=True, long=True)
# single camera
invalid = []
cameras = cmds.ls(shapes, type='camera', long=True)
if len(cameras) != 1:
cls.log.warning("Camera instance must have a single camera. "
"Found {0}: {1}".format(len(cameras), cameras))
invalid.extend(cameras)
# We need to check this edge case because returning an extended
# list when there are no actual cameras results in
# still an empty 'invalid' list
if len(cameras) < 1:
raise RuntimeError("No cameras in instance.")
# non-camera shapes
valid_shapes = cmds.ls(shapes, type=('camera', 'locator'), long=True)
shapes = set(shapes) - set(valid_shapes)
if shapes:
shapes = list(shapes)
cls.log.warning("Camera instance should only contain camera "
"shapes. Found: {0}".format(shapes))
invalid.extend(shapes)
invalid = list(set(invalid))
return invalid
def process(self, instance):
"""Process all the nodes in the instance"""
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Invalid camera contents: "
"{0}".format(invalid))
```
#### File: plugins/publish/validate_instance_has_members.py
```python
import pyblish.api
import openpype.api
import openpype.hosts.maya.api.action
class ValidateInstanceHasMembers(pyblish.api.InstancePlugin):
"""Validates instance objectSet has *any* members."""
order = openpype.api.ValidateContentsOrder
hosts = ["maya"]
label = 'Instance has members'
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
@classmethod
def get_invalid(cls, instance):
invalid = list()
if not instance.data["setMembers"]:
objectset_name = instance.data['name']
invalid.append(objectset_name)
return invalid
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Empty instances found: {0}".format(invalid))
```
#### File: plugins/publish/validate_mesh_ngons.py
```python
from maya import cmds
import pyblish.api
import openpype.api
import openpype.hosts.maya.api.action
from openpype.hosts.maya.api import lib
class ValidateMeshNgons(pyblish.api.Validator):
"""Ensure that meshes don't have ngons
Ngon are faces with more than 4 sides.
To debug the problem on the meshes you can use Maya's modeling
tool: "Mesh > Cleanup..."
"""
order = openpype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["model"]
label = "Mesh ngons"
actions = [openpype.hosts.maya.api.action.SelectInvalidAction]
@staticmethod
def get_invalid(instance):
meshes = cmds.ls(instance, type='mesh', long=True)
# Get all faces
faces = ['{0}.f[*]'.format(node) for node in meshes]
# Filter to n-sided polygon faces (ngons)
invalid = lib.polyConstraint(faces,
t=0x0008, # type=face
size=3) # size=nsided
return invalid
def process(self, instance):
"""Process all the nodes in the instance "objectSet"""
invalid = self.get_invalid(instance)
if invalid:
raise ValueError("Meshes found with n-gons"
"values: {0}".format(invalid))
```
#### File: plugins/publish/validate_shape_zero.py
```python
from maya import cmds
import pyblish.api
import openpype.api
import openpype.hosts.maya.api.action
from openpype.hosts.maya.api import lib
class ValidateShapeZero(pyblish.api.Validator):
"""Shape components may not have any "tweak" values
To solve this issue, try freezing the shapes.
"""
order = openpype.api.ValidateContentsOrder
hosts = ["maya"]
families = ["model"]
label = "Shape Zero (Freeze)"
actions = [
openpype.hosts.maya.api.action.SelectInvalidAction,
openpype.api.RepairAction
]
@staticmethod
def get_invalid(instance):
"""Returns the invalid shapes in the instance.
This is the same as checking:
- all(pnt == [0,0,0] for pnt in shape.pnts[:])
Returns:
list: Shape with non freezed vertex
"""
shapes = cmds.ls(instance, type="shape")
invalid = []
for shape in shapes:
if cmds.polyCollapseTweaks(shape, q=True, hasVertexTweaks=True):
invalid.append(shape)
return invalid
@classmethod
def repair(cls, instance):
invalid_shapes = cls.get_invalid(instance)
if not invalid_shapes:
return
with lib.maintained_selection():
with lib.tool("selectSuperContext"):
for shape in invalid_shapes:
cmds.polyCollapseTweaks(shape)
# cmds.polyCollapseTweaks keeps selecting the geometry
# after each command. When running on many meshes
# after one another this tends to get really heavy
cmds.select(clear=True)
def process(self, instance):
"""Process all the nodes in the instance "objectSet"""
invalid = self.get_invalid(instance)
if invalid:
raise ValueError("Shapes found with non-zero component tweaks: "
"{0}".format(invalid))
```
#### File: plugins/load/load_effects_ip.py
```python
import json
from collections import OrderedDict
import nuke
from avalon import io
from openpype.pipeline import (
load,
get_representation_path,
)
from openpype.hosts.nuke.api import lib
from openpype.hosts.nuke.api import (
containerise,
update_container,
viewer_update_and_undo_stop
)
class LoadEffectsInputProcess(load.LoaderPlugin):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["effectJson"]
families = ["effect"]
label = "Load Effects - Input Process"
order = 0
icon = "eye"
color = "#cc0000"
ignore_attr = ["useLifetime"]
def load(self, context, name, namespace, data):
"""
Loading function to get the soft effects to particular read node
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
GN = nuke.createNode(
"Group",
"name {}_1".format(object_name))
# adding content to the group node
with GN:
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if k in self.ignore_attr:
continue
try:
node[k].value()
except NameError as e:
self.log.warning(e)
continue
if isinstance(v, list) and len(v) > 4:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# try to place it under Viewer1
if not self.connect_active_viewer(GN):
nuke.delete(GN)
return
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
return containerise(
node=GN,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# Update the imprinted representation
update_container(
GN,
data_imprint
)
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
# adding content to the group node
with GN:
# first remove all nodes
[nuke.delete(n) for n in nuke.allNodes()]
# create input node
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if k in self.ignore_attr:
continue
try:
node[k].value()
except NameError as e:
self.log.warning(e)
continue
if isinstance(v, list) and len(v) > 4:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
# create output node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# # try to place it under Viewer1
# if not self.connect_active_viewer(GN):
# nuke.delete(GN)
# return
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd84f20ff", 16))
else:
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("updated to version: {}".format(version.get("name")))
def connect_active_viewer(self, group_node):
"""
Finds Active viewer and
place the node under it, also adds
name of group into Input Process of the viewer
Arguments:
group_node (nuke node): nuke group node object
"""
group_node_name = group_node["name"].value()
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
if len(viewer) > 0:
viewer = viewer[0]
else:
msg = str("Please create Viewer node before you "
"run this action again")
self.log.error(msg)
nuke.message(msg)
return None
# get coordinates of Viewer1
xpos = viewer["xpos"].value()
ypos = viewer["ypos"].value()
ypos += 150
viewer["ypos"].setValue(ypos)
# set coordinates to group node
group_node["xpos"].setValue(xpos)
group_node["ypos"].setValue(ypos + 50)
# add group node name to Viewer Input Process
viewer["input_process_node"].setValue(group_node_name)
# put backdrop under
lib.create_backdrop(
label="Input Process",
layer=2,
nodes=[viewer, group_node],
color="0x7c7faaff")
return True
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()
if isinstance(v, dict)]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
if isinstance(v, dict)]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
for subTrackIndex in range(
min(subTrackNums), max(subTrackNums) + 1):
item = self.get_item(data, trackIndex, subTrackIndex)
if item is not {}:
new_order.update(item)
return new_order
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if isinstance(val, dict)
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes through all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)
```
#### File: plugins/load/load_gizmo.py
```python
import nuke
from avalon import io
from openpype.pipeline import (
load,
get_representation_path,
)
from openpype.hosts.nuke.api.lib import (
maintained_selection,
get_avalon_knob_data,
set_avalon_knob_data
)
from openpype.hosts.nuke.api import (
containerise,
update_container,
viewer_update_and_undo_stop
)
class LoadGizmo(load.LoaderPlugin):
"""Loading nuke Gizmo"""
representations = ["gizmo"]
families = ["gizmo"]
label = "Load Gizmo"
order = 0
icon = "dropbox"
color = "white"
node_color = "0x75338eff"
def load(self, context, name, namespace, data):
"""
Loading function to get Gizmo into node graph
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
with maintained_selection():
# add group from nk
nuke.nodePaste(file)
GN = nuke.selectedNode()
GN["name"].setValue(object_name)
return containerise(
node=GN,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
with maintained_selection():
xpos = GN.xpos()
ypos = GN.ypos()
avalon_data = get_avalon_knob_data(GN)
nuke.delete(GN)
# add group from nk
nuke.nodePaste(file)
GN = nuke.selectedNode()
set_avalon_knob_data(GN, avalon_data)
GN.setXYpos(xpos, ypos)
GN["name"].setValue(object_name)
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd88467ff", 16))
else:
GN["tile_color"].setValue(int(self.node_color, 16))
self.log.info("updated to version: {}".format(version.get("name")))
return update_container(GN, data_imprint)
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)
```
#### File: plugins/publish/collect_gizmo.py
```python
import pyblish.api
import nuke
@pyblish.api.log
class CollectGizmo(pyblish.api.InstancePlugin):
"""Collect Gizmo (group) node instance and its content
"""
order = pyblish.api.CollectorOrder + 0.22
label = "Collect Gizmo (Group)"
hosts = ["nuke"]
families = ["gizmo"]
def process(self, instance):
grpn = instance[0]
# add family to familiess
instance.data["families"].insert(0, instance.data["family"])
# make label nicer
instance.data["label"] = "{0} ({1} nodes)".format(
grpn.name(), len(instance) - 1)
# Get frame range
handle_start = instance.context.data["handleStart"]
handle_end = instance.context.data["handleEnd"]
first_frame = int(nuke.root()["first_frame"].getValue())
last_frame = int(nuke.root()["last_frame"].getValue())
# Add version data to instance
version_data = {
"handles": handle_start,
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": first_frame + handle_start,
"frameEnd": last_frame - handle_end,
"colorspace": nuke.root().knob('workingSpaceLUT').value(),
"families": [instance.data["family"]] + instance.data["families"],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
instance.data.update({
"versionData": version_data,
"frameStart": first_frame,
"frameEnd": last_frame
})
self.log.info("Gizmo content collected: `{}`".format(instance[:]))
self.log.info("Gizmo instance collected: `{}`".format(instance))
```
#### File: plugins/publish/remove_ouput_node.py
```python
import nuke
import pyblish.api
class RemoveOutputNode(pyblish.api.ContextPlugin):
"""Removing output node for each output write node
"""
label = 'Output Node Remove'
order = pyblish.api.IntegratorOrder + 0.4
families = ["workfile"]
hosts = ['nuke']
def process(self, context):
try:
output_node = context.data["outputNode"]
name = output_node["name"].value()
self.log.info("Removing output node: '{}'".format(name))
nuke.delete(output_node)
except Exception:
return
```
#### File: plugins/publish/validate_script.py
```python
import pyblish.api
from avalon import io
from openpype import lib
@pyblish.api.log
class ValidateScript(pyblish.api.InstancePlugin):
""" Validates file output. """
order = pyblish.api.ValidatorOrder + 0.1
families = ["workfile"]
label = "Check script settings"
hosts = ["nuke"]
optional = True
def process(self, instance):
ctx_data = instance.context.data
asset_name = ctx_data["asset"]
asset = lib.get_asset(asset_name)
asset_data = asset["data"]
# These attributes will be checked
attributes = [
"fps",
"frameStart",
"frameEnd",
"resolutionWidth",
"resolutionHeight",
"handleStart",
"handleEnd"
]
# Value of these attributes can be found on parents
hierarchical_attributes = [
"fps",
"resolutionWidth",
"resolutionHeight",
"pixelAspect",
"handleStart",
"handleEnd"
]
missing_attributes = []
asset_attributes = {}
for attr in attributes:
if attr in asset_data:
asset_attributes[attr] = asset_data[attr]
elif attr in hierarchical_attributes:
# Try to find fps on parent
parent = asset['parent']
if asset_data['visualParent'] is not None:
parent = asset_data['visualParent']
value = self.check_parent_hierarchical(parent, attr)
if value is None:
missing_attributes.append(attr)
else:
asset_attributes[attr] = value
else:
missing_attributes.append(attr)
# Raise error if attributes weren't found on asset in database
if len(missing_attributes) > 0:
atr = ", ".join(missing_attributes)
msg = 'Missing attributes "{}" in asset "{}"'
message = msg.format(atr, asset_name)
raise ValueError(message)
# Get handles from database, Default is 0 (if not found)
handle_start = 0
handle_end = 0
if "handleStart" in asset_attributes:
handle_start = asset_attributes["handleStart"]
if "handleEnd" in asset_attributes:
handle_end = asset_attributes["handleEnd"]
asset_attributes["fps"] = float("{0:.4f}".format(
asset_attributes["fps"]))
# Get values from nukescript
script_attributes = {
"handleStart": ctx_data["handleStart"],
"handleEnd": ctx_data["handleEnd"],
"fps": float("{0:.4f}".format(ctx_data["fps"])),
"frameStart": ctx_data["frameStart"],
"frameEnd": ctx_data["frameEnd"],
"resolutionWidth": ctx_data["resolutionWidth"],
"resolutionHeight": ctx_data["resolutionHeight"],
"pixelAspect": ctx_data["pixelAspect"]
}
# Compare asset's values Nukescript X Database
not_matching = []
for attr in attributes:
self.log.debug("asset vs script attribute \"{}\": {}, {}".format(
attr, asset_attributes[attr], script_attributes[attr])
)
if asset_attributes[attr] != script_attributes[attr]:
not_matching.append(attr)
# Raise error if not matching
if len(not_matching) > 0:
msg = "Attributes '{}' are not set correctly"
# Alert user that handles are set if Frame start/end not match
if (
(("frameStart" in not_matching) or ("frameEnd" in not_matching)) and
((handle_start > 0) or (handle_end > 0))
):
msg += " (`handle_start` are set to {})".format(handle_start)
msg += " (`handle_end` are set to {})".format(handle_end)
message = msg.format(", ".join(not_matching))
raise ValueError(message)
def check_parent_hierarchical(self, entityId, attr):
if entityId is None:
return None
entity = io.find_one({"_id": entityId})
if attr in entity['data']:
self.log.info(attr)
return entity['data'][attr]
else:
return self.check_parent_hierarchical(entity['parent'], attr)
```
#### File: plugins/publish/collect_workfile.py
```python
import os
import pyblish.api
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Collect current script for publish."""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Workfile"
hosts = ["photoshop"]
def process(self, context):
family = "workfile"
task = os.getenv("AVALON_TASK", None)
subset = family + task.capitalize()
file_path = context.data["currentFile"]
staging_dir = os.path.dirname(file_path)
base_name = os.path.basename(file_path)
# Create instance
instance = context.create_instance(subset)
instance.data.update({
"subset": subset,
"label": base_name,
"name": base_name,
"family": family,
"families": [],
"representations": [],
"asset": os.environ["AVALON_ASSET"]
})
# creating representation
_, ext = os.path.splitext(file_path)
instance.data["representations"].append({
"name": ext[1:],
"ext": ext[1:],
"files": base_name,
"stagingDir": staging_dir,
})
```
#### File: resolve/api/action.py
```python
from __future__ import absolute_import
import pyblish.api
from ...action import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid clips in Resolve timeline when plug-in failed.
To retrieve the invalid nodes this assumes a static `get_invalid()`
method is available on the plugin.
"""
label = "Select invalid"
on = "failed" # This action is only available on a failed plug-in
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
try:
from .lib import get_project_manager
pm = get_project_manager()
self.log.debug(pm)
except ImportError:
raise ImportError("Current host is not Resolve")
errored_instances = get_errored_instances_from_context(context)
# Apply pyblish.logic to get the instances for the plug-in
instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid clips..")
invalid = list()
for instance in instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning("Plug-in returned to be invalid, "
"but has no selectable nodes.")
# Ensure unique (process each node only once)
invalid = list(set(invalid))
if invalid:
self.log.info("Selecting invalid nodes: %s" % ", ".join(invalid))
# TODO: select resolve timeline track items in current timeline
else:
self.log.info("No invalid nodes found.")
```
#### File: resolve/api/testing_utils.py
```python
class TestGUI:
def __init__(self):
resolve = bmd.scriptapp("Resolve") # noqa
self.fu = resolve.Fusion()
ui = self.fu.UIManager
self.disp = bmd.UIDispatcher(self.fu.UIManager) # noqa
self.title_font = ui.Font({"PixelSize": 18})
self._dialogue = self.disp.AddWindow(
{
"WindowTitle": "Get Testing folder",
"ID": "TestingWin",
"Geometry": [250, 250, 250, 100],
"Spacing": 0,
"Margin": 10
},
[
ui.VGroup(
{
"Spacing": 2
},
[
ui.Button(
{
"ID": "inputTestSourcesFolder",
"Text": "Select folder with testing media",
"Weight": 1.25,
"ToolTip": (
"Chose folder with videos, sequences, "
"single images, nested folders with "
"media"
),
"Flat": False
}
),
ui.VGap(),
ui.Button(
{
"ID": "openButton",
"Text": "Process Test",
"Weight": 2,
"ToolTip": "Run the test...",
"Flat": False
}
)
]
)
]
)
self._widgets = self._dialogue.GetItems()
self._dialogue.On.TestingWin.Close = self._close_window
self._dialogue.On.inputTestSourcesFolder.Clicked = self._open_dir_button_pressed # noqa
self._dialogue.On.openButton.Clicked = self.process
def _close_window(self, event):
self.disp.ExitLoop()
def process(self, event):
# placeholder function this supposed to be run from child class
pass
def _open_dir_button_pressed(self, event):
# placeholder function this supposed to be run from child class
pass
def show_gui(self):
self._dialogue.Show()
self.disp.RunLoop()
self._dialogue.Hide()
```
#### File: plugins/publish/precollect_workfile.py
```python
import pyblish.api
from openpype.hosts import resolve
from avalon import api as avalon
from pprint import pformat
# dev
from importlib import reload
from openpype.hosts.resolve.otio import davinci_export
reload(davinci_export)
class PrecollectWorkfile(pyblish.api.ContextPlugin):
"""Precollect the current working file into context"""
label = "Precollect Workfile"
order = pyblish.api.CollectorOrder - 0.5
def process(self, context):
asset = avalon.Session["AVALON_ASSET"]
subset = "workfile"
project = resolve.get_current_project()
fps = project.GetSetting("timelineFrameRate")
video_tracks = resolve.get_video_track_names()
# adding otio timeline to context
otio_timeline = davinci_export.create_otio_timeline(project)
instance_data = {
"name": "{}_{}".format(asset, subset),
"asset": asset,
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile"
}
# create instance with workfile
instance = context.create_instance(**instance_data)
# update context with main project attributes
context_data = {
"activeProject": project,
"otioTimeline": otio_timeline,
"videoTracks": video_tracks,
"currentFile": project.GetName(),
"fps": fps,
}
context.data.update(context_data)
self.log.info("Creating instance: {}".format(instance))
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
self.log.debug("__ context_data: {}".format(pformat(context_data)))
```
#### File: plugins/publish/collect_editorial_resources.py
```python
import os
import re
import tempfile
import pyblish.api
from copy import deepcopy
import clique
class CollectInstanceResources(pyblish.api.InstancePlugin):
"""Collect instance's resources"""
# must be after `CollectInstances`
order = pyblish.api.CollectorOrder + 0.011
label = "Collect Editorial Resources"
hosts = ["standalonepublisher"]
families = ["clip"]
def process(self, instance):
self.context = instance.context
self.log.info(f"Processing instance: {instance}")
self.new_instances = []
subset_files = dict()
subset_dirs = list()
anatomy = self.context.data["anatomy"]
anatomy_data = deepcopy(self.context.data["anatomyData"])
anatomy_data.update({"root": anatomy.roots})
subset = instance.data["subset"]
clip_name = instance.data["clipName"]
editorial_source_root = instance.data["editorialSourceRoot"]
editorial_source_path = instance.data["editorialSourcePath"]
# if `editorial_source_path` then loop through
if editorial_source_path:
# add family if mov or mp4 found which is longer for
# cutting `trimming` to enable `ExtractTrimmingVideoAudio` plugin
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data["stagingDir"] = staging_dir
instance.data["families"] += ["trimming"]
return
# if template pattern in path then fill it with `anatomy_data`
if "{" in editorial_source_root:
editorial_source_root = editorial_source_root.format(
**anatomy_data)
self.log.debug(f"root: {editorial_source_root}")
# loop `editorial_source_root` and find clip name in folders
# and look for any subset name alternatives
for root, dirs, _files in os.walk(editorial_source_root):
# search only for directories related to clip name
correct_clip_dir = None
for _d_search in dirs:
# avoid all non clip dirs
if _d_search not in clip_name:
continue
# found correct dir for clip
correct_clip_dir = _d_search
# continue if clip dir was not found
if not correct_clip_dir:
continue
clip_dir_path = os.path.join(root, correct_clip_dir)
subset_files_items = list()
# list content of clip dir and search for subset items
for subset_item in os.listdir(clip_dir_path):
# avoid all items which are not defined as subsets by name
if subset not in subset_item:
continue
subset_item_path = os.path.join(
clip_dir_path, subset_item)
# if it is dir store it to `subset_dirs` list
if os.path.isdir(subset_item_path):
subset_dirs.append(subset_item_path)
# if it is file then store it to `subset_files` list
if os.path.isfile(subset_item_path):
subset_files_items.append(subset_item_path)
if subset_files_items:
subset_files.update({clip_dir_path: subset_files_items})
# break the loop if correct_clip_dir was captured
# no need to cary on if correct folder was found
if correct_clip_dir:
break
if subset_dirs:
# look all dirs and check for subset name alternatives
for _dir in subset_dirs:
instance_data = deepcopy(
{k: v for k, v in instance.data.items()})
sub_dir = os.path.basename(_dir)
# if subset name is only alternative then create new instance
if sub_dir != subset:
instance_data = self.duplicate_instance(
instance_data, subset, sub_dir)
# create all representations
self.create_representations(
os.listdir(_dir), instance_data, _dir)
if sub_dir == subset:
self.new_instances.append(instance_data)
# instance.data.update(instance_data)
if subset_files:
unique_subset_names = list()
root_dir = list(subset_files.keys()).pop()
files_list = subset_files[root_dir]
search_pattern = f"({subset}[A-Za-z0-9]+)(?=[\\._\\s])"
for _file in files_list:
pattern = re.compile(search_pattern)
match = pattern.findall(_file)
if not match:
continue
match_subset = match.pop()
if match_subset in unique_subset_names:
continue
unique_subset_names.append(match_subset)
self.log.debug(f"unique_subset_names: {unique_subset_names}")
for _un_subs in unique_subset_names:
instance_data = self.duplicate_instance(
instance.data, subset, _un_subs)
# create all representations
self.create_representations(
[os.path.basename(f) for f in files_list
if _un_subs in f],
instance_data, root_dir)
# remove the original instance as it had been used only
# as template and is duplicated
self.context.remove(instance)
# create all instances in self.new_instances into context
for new_instance in self.new_instances:
_new_instance = self.context.create_instance(
new_instance["name"])
_new_instance.data.update(new_instance)
def duplicate_instance(self, instance_data, subset, new_subset):
new_instance_data = dict()
for _key, _value in instance_data.items():
new_instance_data[_key] = _value
if not isinstance(_value, str):
continue
if subset in _value:
new_instance_data[_key] = _value.replace(
subset, new_subset)
self.log.info(f"Creating new instance: {new_instance_data['name']}")
self.new_instances.append(new_instance_data)
return new_instance_data
def create_representations(
self, files_list, instance_data, staging_dir):
""" Create representations from Collection object
"""
# collecting frames for later frame start/end reset
frames = list()
# break down Collection object to collections and reminders
collections, remainder = clique.assemble(files_list)
# add staging_dir to instance_data
instance_data["stagingDir"] = staging_dir
# add representations to instance_data
instance_data["representations"] = list()
collection_head_name = None
# loop through collections and create representations
for _collection in collections:
ext = _collection.tail[1:]
collection_head_name = _collection.head
frame_start = list(_collection.indexes)[0]
frame_end = list(_collection.indexes)[-1]
repre_data = {
"frameStart": frame_start,
"frameEnd": frame_end,
"name": ext,
"ext": ext,
"files": [item for item in _collection],
"stagingDir": staging_dir
}
if instance_data.get("keepSequence"):
repre_data_keep = deepcopy(repre_data)
instance_data["representations"].append(repre_data_keep)
if "review" in instance_data["families"]:
repre_data.update({
"thumbnail": True,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": self.context.data.get("fps"),
"name": "review",
"tags": ["review", "ftrackreview", "delete"],
})
instance_data["representations"].append(repre_data)
# add to frames for frame range reset
frames.append(frame_start)
frames.append(frame_end)
# loop through reminders and create representations
for _reminding_file in remainder:
ext = os.path.splitext(_reminding_file)[-1][1:]
if ext not in instance_data["extensions"]:
continue
if collection_head_name and (
(collection_head_name + ext) not in _reminding_file
) and (ext in ["mp4", "mov"]):
self.log.info(f"Skipping file: {_reminding_file}")
continue
frame_start = 1
frame_end = 1
repre_data = {
"name": ext,
"ext": ext,
"files": _reminding_file,
"stagingDir": staging_dir
}
# exception for thumbnail
if "thumb" in _reminding_file:
repre_data.update({
'name': "thumbnail",
'thumbnail': True
})
# exception for mp4 preview
if ext in ["mp4", "mov"]:
frame_start = 0
frame_end = (
(instance_data["frameEnd"] - instance_data["frameStart"])
+ 1)
# add review ftrack family into families
for _family in ["review", "ftrack"]:
if _family not in instance_data["families"]:
instance_data["families"].append(_family)
repre_data.update({
"frameStart": frame_start,
"frameEnd": frame_end,
"frameStartFtrack": frame_start,
"frameEndFtrack": frame_end,
"step": 1,
"fps": self.context.data.get("fps"),
"name": "review",
"thumbnail": True,
"tags": ["review", "ftrackreview", "delete"],
})
# add to frames for frame range reset only if no collection
if not collections:
frames.append(frame_start)
frames.append(frame_end)
instance_data["representations"].append(repre_data)
# reset frame start / end
instance_data["frameStart"] = min(frames)
instance_data["frameEnd"] = max(frames)
```
#### File: plugins/publish/validate_texture_workfiles.py
```python
import pyblish.api
import openpype.api
from openpype.pipeline import PublishXmlValidationError
class ValidateTextureBatchWorkfiles(pyblish.api.InstancePlugin):
"""Validates that textures workfile has collected resources (optional).
Collected resources means secondary workfiles (in most cases).
"""
label = "Validate Texture Workfile Has Resources"
hosts = ["standalonepublisher"]
order = openpype.api.ValidateContentsOrder
families = ["texture_batch_workfile"]
optional = True
# from presets
main_workfile_extensions = ['mra']
def process(self, instance):
if instance.data["family"] == "workfile":
ext = instance.data["representations"][0]["ext"]
if ext not in self.main_workfile_extensions:
self.log.warning("Only secondary workfile present!")
return
if not instance.data.get("resources"):
msg = "No secondary workfile present for workfile '{}'". \
format(instance.data["name"])
ext = self.main_workfile_extensions[0]
formatting_data = {"file_name": instance.data["name"],
"extension": ext}
raise PublishXmlValidationError(self, msg,
formatting_data=formatting_data
)
```
#### File: plugins/publish/collect_instance_frames.py
```python
import pyblish.api
class CollectOutputFrameRange(pyblish.api.ContextPlugin):
"""Collect frame start/end from context.
When instances are collected context does not contain `frameStart` and
`frameEnd` keys yet. They are collected in global plugin
`CollectAvalonEntities`.
"""
label = "Collect output frame range"
order = pyblish.api.CollectorOrder
hosts = ["tvpaint"]
def process(self, context):
for instance in context:
frame_start = instance.data.get("frameStart")
frame_end = instance.data.get("frameEnd")
if frame_start is not None and frame_end is not None:
self.log.debug(
"Instance {} already has set frames {}-{}".format(
str(instance), frame_start, frame_end
)
)
return
frame_start = context.data.get("frameStart")
frame_end = context.data.get("frameEnd")
instance.data["frameStart"] = frame_start
instance.data["frameEnd"] = frame_end
self.log.info(
"Set frames {}-{} on instance {} ".format(
frame_start, frame_end, str(instance)
)
)
```
#### File: plugins/publish/increment_workfile_version.py
```python
import pyblish.api
from openpype.api import version_up
from openpype.hosts.tvpaint.api import workio
class IncrementWorkfileVersion(pyblish.api.ContextPlugin):
"""Increment current workfile version."""
order = pyblish.api.IntegratorOrder + 1
label = "Increment Workfile Version"
optional = True
hosts = ["tvpaint"]
def process(self, context):
assert all(result["success"] for result in context.data["results"]), (
"Publishing not successful so version is not increased.")
path = context.data["currentFile"]
workio.save_file(version_up(path))
self.log.info('Incrementing workfile version')
```
#### File: plugins/publish/validate_duplicated_layer_names.py
```python
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
class ValidateLayersGroup(pyblish.api.InstancePlugin):
"""Validate layer names for publishing are unique for whole workfile."""
label = "Validate Duplicated Layers Names"
order = pyblish.api.ValidatorOrder
families = ["renderPass"]
def process(self, instance):
# Prepare layers
layers_by_name = instance.context.data["layersByName"]
# Layers ids of an instance
layer_names = instance.data["layer_names"]
# Check if all layers from render pass are in right group
duplicated_layer_names = []
for layer_name in layer_names:
layers = layers_by_name.get(layer_name)
if len(layers) > 1:
duplicated_layer_names.append(layer_name)
# Everything is OK and skip exception
if not duplicated_layer_names:
return
layers_msg = ", ".join([
"\"{}\"".format(layer_name)
for layer_name in duplicated_layer_names
])
detail_lines = [
"- {}".format(layer_name)
for layer_name in set(duplicated_layer_names)
]
raise PublishXmlValidationError(
self,
(
"Layers have duplicated names for instance {}."
# Description what's wrong
" There are layers with same name and one of them is marked"
" for publishing so it is not possible to know which should"
" be published. Please look for layers with names: {}"
).format(instance.data["label"], layers_msg),
formatting_data={
"layer_names": "<br/>".join(detail_lines)
}
)
```
#### File: plugins/publish/validate_start_frame.py
```python
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import lib
class RepairStartFrame(pyblish.api.Action):
"""Repair start frame."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
lib.execute_george("tv_startframe 0")
class ValidateStartFrame(pyblish.api.ContextPlugin):
"""Validate start frame being at frame 0."""
label = "Validate Start Frame"
order = pyblish.api.ValidatorOrder
hosts = ["tvpaint"]
actions = [RepairStartFrame]
optional = True
def process(self, context):
start_frame = lib.execute_george("tv_startframe")
if start_frame == 0:
return
raise PublishXmlValidationError(
self,
"Start frame has to be frame 0.",
formatting_data={
"current_start_frame": start_frame
}
)
```
#### File: tvpaint/tvpaint_plugin/__init__.py
```python
import os
def get_plugin_files_path():
current_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(current_dir, "plugin_files")
```
#### File: plugins/publish/collect_current_file.py
```python
import unreal # noqa
import pyblish.api
class CollectUnrealCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context."""
order = pyblish.api.CollectorOrder - 0.5
label = "Unreal Current File"
hosts = ['unreal']
def process(self, context):
"""Inject the current working file."""
current_file = unreal.Paths.get_project_file_path()
context.data['currentFile'] = current_file
assert current_file != '', "Current file is empty. " \
"Save the file before continuing."
```
#### File: webpublisher/api/__init__.py
```python
import os
import logging
from avalon import api as avalon
from avalon import io
from pyblish import api as pyblish
import openpype.hosts.webpublisher
log = logging.getLogger("openpype.hosts.webpublisher")
HOST_DIR = os.path.dirname(os.path.abspath(
openpype.hosts.webpublisher.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
def install():
print("Installing Pype config...")
pyblish.register_plugin_path(PUBLISH_PATH)
log.info(PUBLISH_PATH)
io.install()
def uninstall():
pyblish.deregister_plugin_path(PUBLISH_PATH)
# to have required methods for interface
def ls():
pass
```
#### File: plugins/publish/collect_tvpaint_workfile_data.py
```python
import os
import uuid
import json
import shutil
import pyblish.api
from openpype.lib.plugin_tools import parse_json
from openpype.hosts.tvpaint.worker import (
SenderTVPaintCommands,
CollectSceneData
)
class CollectTVPaintWorkfileData(pyblish.api.ContextPlugin):
label = "Collect TVPaint Workfile data"
order = pyblish.api.CollectorOrder - 0.4
hosts = ["webpublisher"]
targets = ["tvpaint_worker"]
def process(self, context):
# Get JobQueue module
modules = context.data["openPypeModules"]
job_queue_module = modules["job_queue"]
jobs_root = job_queue_module.get_jobs_root()
if not jobs_root:
raise ValueError("Job Queue root is not set.")
context.data["jobsRoot"] = jobs_root
context_staging_dir = self._create_context_staging_dir(jobs_root)
workfile_path = self._extract_workfile_path(
context, context_staging_dir
)
context.data["contextStagingDir"] = context_staging_dir
context.data["workfilePath"] = workfile_path
# Prepare tvpaint command
collect_scene_data_command = CollectSceneData()
# Create TVPaint sender commands
commands = SenderTVPaintCommands(workfile_path, job_queue_module)
commands.add_command(collect_scene_data_command)
# Send job and wait for answer
commands.send_job_and_wait()
collected_data = collect_scene_data_command.result()
layers_data = collected_data["layers_data"]
groups_data = collected_data["groups_data"]
scene_data = collected_data["scene_data"]
exposure_frames_by_layer_id = (
collected_data["exposure_frames_by_layer_id"]
)
pre_post_beh_by_layer_id = (
collected_data["pre_post_beh_by_layer_id"]
)
# Store results
# scene data store the same way as TVPaint collector
scene_data = {
"sceneWidth": scene_data["width"],
"sceneHeight": scene_data["height"],
"scenePixelAspect": scene_data["pixel_aspect"],
"sceneFps": scene_data["fps"],
"sceneFieldOrder": scene_data["field_order"],
"sceneMarkIn": scene_data["mark_in"],
# scene_data["mark_in_state"],
"sceneMarkInState": scene_data["mark_in_set"],
"sceneMarkOut": scene_data["mark_out"],
# scene_data["mark_out_state"],
"sceneMarkOutState": scene_data["mark_out_set"],
"sceneStartFrame": scene_data["start_frame"],
"sceneBgColor": scene_data["bg_color"]
}
context.data["sceneData"] = scene_data
# Store only raw data
context.data["groupsData"] = groups_data
context.data["layersData"] = layers_data
context.data["layersExposureFrames"] = exposure_frames_by_layer_id
context.data["layersPrePostBehavior"] = pre_post_beh_by_layer_id
self.log.debug(
(
"Collected data"
"\nScene data: {}"
"\nLayers data: {}"
"\nExposure frames: {}"
"\nPre/Post behavior: {}"
).format(
json.dumps(scene_data, indent=4),
json.dumps(layers_data, indent=4),
json.dumps(exposure_frames_by_layer_id, indent=4),
json.dumps(pre_post_beh_by_layer_id, indent=4)
)
)
def _create_context_staging_dir(self, jobs_root):
if not os.path.exists(jobs_root):
os.makedirs(jobs_root)
random_folder_name = str(uuid.uuid4())
full_path = os.path.join(jobs_root, random_folder_name)
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
def _extract_workfile_path(self, context, context_staging_dir):
"""Find first TVPaint file in tasks and use it."""
batch_dir = context.data["batchDir"]
batch_data = context.data["batchData"]
src_workfile_path = None
for task_id in batch_data["tasks"]:
if src_workfile_path is not None:
break
task_dir = os.path.join(batch_dir, task_id)
task_manifest_path = os.path.join(task_dir, "manifest.json")
task_data = parse_json(task_manifest_path)
task_files = task_data["files"]
for filename in task_files:
_, ext = os.path.splitext(filename)
if ext.lower() == ".tvpp":
src_workfile_path = os.path.join(task_dir, filename)
break
# Copy workfile to job queue work root
new_workfile_path = os.path.join(
context_staging_dir, os.path.basename(src_workfile_path)
)
shutil.copy(src_workfile_path, new_workfile_path)
return new_workfile_path
```
#### File: webpublisher/webserver_service/webserver_cli.py
```python
import collections
import time
import os
from datetime import datetime
import requests
import json
import subprocess
from openpype.lib import PypeLogger
from .webpublish_routes import (
RestApiResource,
OpenPypeRestApiResource,
HiearchyEndpoint,
ProjectsEndpoint,
ConfiguredExtensionsEndpoint,
BatchPublishEndpoint,
BatchReprocessEndpoint,
BatchStatusEndpoint,
TaskPublishEndpoint,
UserReportEndpoint
)
from openpype.lib.remote_publish import (
ERROR_STATUS,
REPROCESS_STATUS,
SENT_REPROCESSING_STATUS
)
log = PypeLogger().get_logger("webserver_gui")
def run_webserver(*args, **kwargs):
"""Runs webserver in command line, adds routes."""
from openpype.modules import ModulesManager
manager = ModulesManager()
webserver_module = manager.modules_by_name["webserver"]
host = kwargs.get("host") or "localhost"
port = kwargs.get("port") or 8079
server_manager = webserver_module.create_new_server_manager(port, host)
webserver_url = server_manager.url
# queue for remotepublishfromapp tasks
studio_task_queue = collections.deque()
resource = RestApiResource(server_manager,
upload_dir=kwargs["upload_dir"],
executable=kwargs["executable"],
studio_task_queue=studio_task_queue)
projects_endpoint = ProjectsEndpoint(resource)
server_manager.add_route(
"GET",
"/api/projects",
projects_endpoint.dispatch
)
hiearchy_endpoint = HiearchyEndpoint(resource)
server_manager.add_route(
"GET",
"/api/hierarchy/{project_name}",
hiearchy_endpoint.dispatch
)
configured_ext_endpoint = ConfiguredExtensionsEndpoint(resource)
server_manager.add_route(
"GET",
"/api/webpublish/configured_ext/{project_name}",
configured_ext_endpoint.dispatch
)
# triggers publish
webpublisher_task_publish_endpoint = \
BatchPublishEndpoint(resource)
server_manager.add_route(
"POST",
"/api/webpublish/batch",
webpublisher_task_publish_endpoint.dispatch
)
webpublisher_batch_publish_endpoint = \
TaskPublishEndpoint(resource)
server_manager.add_route(
"POST",
"/api/webpublish/task",
webpublisher_batch_publish_endpoint.dispatch
)
# reporting
openpype_resource = OpenPypeRestApiResource()
batch_status_endpoint = BatchStatusEndpoint(openpype_resource)
server_manager.add_route(
"GET",
"/api/batch_status/{batch_id}",
batch_status_endpoint.dispatch
)
user_status_endpoint = UserReportEndpoint(openpype_resource)
server_manager.add_route(
"GET",
"/api/publishes/{user}",
user_status_endpoint.dispatch
)
webpublisher_batch_reprocess_endpoint = \
BatchReprocessEndpoint(openpype_resource)
server_manager.add_route(
"POST",
"/api/webpublish/reprocess/{batch_id}",
webpublisher_batch_reprocess_endpoint.dispatch
)
server_manager.start_server()
last_reprocessed = time.time()
while True:
if time.time() - last_reprocessed > 20:
reprocess_failed(kwargs["upload_dir"], webserver_url)
last_reprocessed = time.time()
if studio_task_queue:
args = studio_task_queue.popleft()
subprocess.call(args) # blocking call
time.sleep(1.0)
def reprocess_failed(upload_dir, webserver_url):
# log.info("check_reprocesable_records")
from openpype.lib import OpenPypeMongoConnection
mongo_client = OpenPypeMongoConnection.get_mongo_client()
database_name = os.environ["OPENPYPE_DATABASE_NAME"]
dbcon = mongo_client[database_name]["webpublishes"]
results = dbcon.find({"status": REPROCESS_STATUS})
reprocessed_batches = set()
for batch in results:
if batch["batch_id"] in reprocessed_batches:
continue
batch_url = os.path.join(upload_dir,
batch["batch_id"],
"manifest.json")
log.info("batch:: {} {}".format(os.path.exists(batch_url), batch_url))
if not os.path.exists(batch_url):
msg = "Manifest {} not found".format(batch_url)
print(msg)
dbcon.update_one(
{"_id": batch["_id"]},
{"$set":
{
"finish_date": datetime.now(),
"status": ERROR_STATUS,
"progress": 100,
"log": batch.get("log") + msg
}}
)
continue
server_url = "{}/api/webpublish/batch".format(webserver_url)
with open(batch_url) as f:
data = json.loads(f.read())
dbcon.update_many(
{
"batch_id": batch["batch_id"],
"status": {"$in": [ERROR_STATUS, REPROCESS_STATUS]}
},
{
"$set": {
"finish_date": datetime.now(),
"status": SENT_REPROCESSING_STATUS,
"progress": 100
}
}
)
try:
r = requests.post(server_url, json=data)
log.info("response{}".format(r))
except Exception:
log.info("exception", exc_info=True)
reprocessed_batches.add(batch["batch_id"])
```
#### File: openpype/lib/profiling.py
```python
import os
import cProfile
def do_profile(fn, to_file=None):
"""Wraps function in profiler run and print stat after it is done.
Args:
to_file (str, optional): If specified, dumps stats into the file
instead of printing.
"""
if to_file:
to_file = to_file.format(pid=os.getpid())
def profiled(*args, **kwargs):
profiler = cProfile.Profile()
try:
profiler.enable()
res = fn(*args, **kwargs)
profiler.disable()
return res
finally:
if to_file:
profiler.dump_stats(to_file)
else:
profiler.print_stats()
```
#### File: modules/clockify/clockify_api.py
```python
import os
import re
import time
import json
import datetime
import requests
from .constants import (
CLOCKIFY_ENDPOINT,
ADMIN_PERMISSION_NAMES
)
from openpype.lib.local_settings import OpenPypeSecureRegistry
def time_check(obj):
if obj.request_counter < 10:
obj.request_counter += 1
return
wait_time = 1 - (time.time() - obj.request_time)
if wait_time > 0:
time.sleep(wait_time)
obj.request_time = time.time()
obj.request_counter = 0
class ClockifyAPI:
def __init__(self, api_key=None, master_parent=None):
self.workspace_name = None
self.workspace_id = None
self.master_parent = master_parent
self.api_key = api_key
self.request_counter = 0
self.request_time = time.time()
self._secure_registry = None
@property
def secure_registry(self):
if self._secure_registry is None:
self._secure_registry = OpenPypeSecureRegistry("clockify")
return self._secure_registry
@property
def headers(self):
return {"X-Api-Key": self.api_key}
def verify_api(self):
for key, value in self.headers.items():
if value is None or value.strip() == '':
return False
return True
def set_api(self, api_key=None):
if api_key is None:
api_key = self.get_api_key()
if api_key is not None and self.validate_api_key(api_key) is True:
self.api_key = api_key
self.set_workspace()
if self.master_parent:
self.master_parent.signed_in()
return True
return False
def validate_api_key(self, api_key):
test_headers = {'X-Api-Key': api_key}
action_url = 'workspaces/'
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=test_headers
)
if response.status_code != 200:
return False
return True
def validate_workspace_perm(self, workspace_id=None):
user_id = self.get_user_id()
if user_id is None:
return False
if workspace_id is None:
workspace_id = self.workspace_id
action_url = "/workspaces/{}/users/{}/permissions".format(
workspace_id, user_id
)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
user_permissions = response.json()
for perm in user_permissions:
if perm['name'] in ADMIN_PERMISSION_NAMES:
return True
return False
def get_user_id(self):
action_url = 'v1/user/'
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
# this regex is neccessary: UNICODE strings are crashing
# during json serialization
id_regex = '\"{1}id\"{1}\:{1}\"{1}\w+\"{1}'
result = re.findall(id_regex, str(response.content))
if len(result) != 1:
# replace with log and better message?
print('User ID was not found (this is a BUG!!!)')
return None
return json.loads('{'+result[0]+'}')['id']
def set_workspace(self, name=None):
if name is None:
name = os.environ.get('CLOCKIFY_WORKSPACE', None)
self.workspace_name = name
self.workspace_id = None
if self.workspace_name is None:
return
try:
result = self.validate_workspace()
except Exception:
result = False
if result is not False:
self.workspace_id = result
if self.master_parent is not None:
self.master_parent.start_timer_check()
return True
return False
def validate_workspace(self, name=None):
if name is None:
name = self.workspace_name
all_workspaces = self.get_workspaces()
if name in all_workspaces:
return all_workspaces[name]
return False
def get_api_key(self):
return self.secure_registry.get_item("api_key", None)
def save_api_key(self, api_key):
self.secure_registry.set_item("api_key", api_key)
def get_workspaces(self):
action_url = 'workspaces/'
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
return {
workspace["name"]: workspace["id"] for workspace in response.json()
}
def get_projects(self, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/'.format(workspace_id)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
return {
project["name"]: project["id"] for project in response.json()
}
def get_project_by_id(self, project_id, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/{}/'.format(
workspace_id, project_id
)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
return response.json()
def get_tags(self, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/tags/'.format(workspace_id)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
return {
tag["name"]: tag["id"] for tag in response.json()
}
def get_tasks(self, project_id, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/{}/tasks/'.format(
workspace_id, project_id
)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
return {
task["name"]: task["id"] for task in response.json()
}
def get_workspace_id(self, workspace_name):
all_workspaces = self.get_workspaces()
if workspace_name not in all_workspaces:
return None
return all_workspaces[workspace_name]
def get_project_id(self, project_name, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
all_projects = self.get_projects(workspace_id)
if project_name not in all_projects:
return None
return all_projects[project_name]
def get_tag_id(self, tag_name, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
all_tasks = self.get_tags(workspace_id)
if tag_name not in all_tasks:
return None
return all_tasks[tag_name]
def get_task_id(
self, task_name, project_id, workspace_id=None
):
if workspace_id is None:
workspace_id = self.workspace_id
all_tasks = self.get_tasks(
project_id, workspace_id
)
if task_name not in all_tasks:
return None
return all_tasks[task_name]
def get_current_time(self):
return str(datetime.datetime.utcnow().isoformat())+'Z'
def start_time_entry(
self, description, project_id, task_id=None, tag_ids=[],
workspace_id=None, billable=True
):
# Workspace
if workspace_id is None:
workspace_id = self.workspace_id
# Check if is currently run another times and has same values
current = self.get_in_progress(workspace_id)
if current is not None:
if (
current.get("description", None) == description and
current.get("projectId", None) == project_id and
current.get("taskId", None) == task_id
):
self.bool_timer_run = True
return self.bool_timer_run
self.finish_time_entry(workspace_id)
# Convert billable to strings
if billable:
billable = 'true'
else:
billable = 'false'
# Rest API Action
action_url = 'workspaces/{}/timeEntries/'.format(workspace_id)
start = self.get_current_time()
body = {
"start": start,
"billable": billable,
"description": description,
"projectId": project_id,
"taskId": task_id,
"tagIds": tag_ids
}
time_check(self)
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
)
success = False
if response.status_code < 300:
success = True
return success
def get_in_progress(self, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/timeEntries/inProgress'.format(
workspace_id
)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
try:
output = response.json()
except json.decoder.JSONDecodeError:
output = None
return output
def finish_time_entry(self, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
current = self.get_in_progress(workspace_id)
if current is None:
return
current_id = current["id"]
action_url = 'workspaces/{}/timeEntries/{}'.format(
workspace_id, current_id
)
body = {
"start": current["timeInterval"]["start"],
"billable": current["billable"],
"description": current["description"],
"projectId": current["projectId"],
"taskId": current["taskId"],
"tagIds": current["tagIds"],
"end": self.get_current_time()
}
time_check(self)
response = requests.put(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
)
return response.json()
def get_time_entries(
self, workspace_id=None, quantity=10
):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/timeEntries/'.format(workspace_id)
time_check(self)
response = requests.get(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
return response.json()[:quantity]
def remove_time_entry(self, tid, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/timeEntries/{}'.format(
workspace_id, tid
)
time_check(self)
response = requests.delete(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers
)
return response.json()
def add_project(self, name, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/'.format(workspace_id)
body = {
"name": name,
"clientId": "",
"isPublic": "false",
"estimate": {
"estimate": 0,
"type": "AUTO"
},
"color": "#f44336",
"billable": "true"
}
time_check(self)
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
)
return response.json()
def add_workspace(self, name):
action_url = 'workspaces/'
body = {"name": name}
time_check(self)
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
)
return response.json()
def add_task(
self, name, project_id, workspace_id=None
):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/projects/{}/tasks/'.format(
workspace_id, project_id
)
body = {
"name": name,
"projectId": project_id
}
time_check(self)
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
)
return response.json()
def add_tag(self, name, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = 'workspaces/{}/tags'.format(workspace_id)
body = {
"name": name
}
time_check(self)
response = requests.post(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
json=body
)
return response.json()
def delete_project(
self, project_id, workspace_id=None
):
if workspace_id is None:
workspace_id = self.workspace_id
action_url = '/workspaces/{}/projects/{}'.format(
workspace_id, project_id
)
time_check(self)
response = requests.delete(
CLOCKIFY_ENDPOINT + action_url,
headers=self.headers,
)
return response.json()
def convert_input(
self, entity_id, entity_name, mode='Workspace', project_id=None
):
if entity_id is None:
error = False
error_msg = 'Missing information "{}"'
if mode.lower() == 'workspace':
if entity_id is None and entity_name is None:
if self.workspace_id is not None:
entity_id = self.workspace_id
else:
error = True
else:
entity_id = self.get_workspace_id(entity_name)
else:
if entity_id is None and entity_name is None:
error = True
elif mode.lower() == 'project':
entity_id = self.get_project_id(entity_name)
elif mode.lower() == 'task':
entity_id = self.get_task_id(
task_name=entity_name, project_id=project_id
)
else:
raise TypeError('Unknown type')
# Raise error
if error:
raise ValueError(error_msg.format(mode))
return entity_id
```
#### File: plugins/publish/submit_aftereffects_deadline.py
```python
import os
import attr
import getpass
import pyblish.api
from avalon import api
from openpype.lib import env_value_to_bool
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@attr.s
class DeadlinePluginInfo():
Comp = attr.ib(default=None)
SceneFile = attr.ib(default=None)
OutputFilePath = attr.ib(default=None)
Output = attr.ib(default=None)
StartupDirectory = attr.ib(default=None)
Arguments = attr.ib(default=None)
ProjectPath = attr.ib(default=None)
AWSAssetFile0 = attr.ib(default=None)
Version = attr.ib(default=None)
MultiProcess = attr.ib(default=None)
class AfterEffectsSubmitDeadline(
abstract_submit_deadline.AbstractSubmitDeadline
):
label = "Submit AE to Deadline"
order = pyblish.api.IntegratorOrder + 0.1
hosts = ["aftereffects"]
families = ["render.farm"] # cannot be "render' as that is integrated
use_published = True
priority = 50
chunk_size = 1000000
primary_pool = None
secondary_pool = None
group = None
department = None
multiprocess = True
def get_job_info(self):
dln_job_info = DeadlineJobInfo(Plugin="AfterEffects")
context = self._instance.context
dln_job_info.Name = self._instance.data["name"]
dln_job_info.BatchName = os.path.basename(self._instance.
data["source"])
dln_job_info.Plugin = "AfterEffects"
dln_job_info.UserName = context.data.get(
"deadlineUser", getpass.getuser())
if self._instance.data["frameEnd"] > self._instance.data["frameStart"]:
# Deadline requires integers in frame range
frame_range = "{}-{}".format(
int(round(self._instance.data["frameStart"])),
int(round(self._instance.data["frameEnd"])))
dln_job_info.Frames = frame_range
dln_job_info.Priority = self.priority
dln_job_info.Pool = self.primary_pool
dln_job_info.SecondaryPool = self.secondary_pool
dln_job_info.Group = self.group
dln_job_info.Department = self.department
dln_job_info.ChunkSize = self.chunk_size
dln_job_info.OutputFilename = \
os.path.basename(self._instance.data["expectedFiles"][0])
dln_job_info.OutputDirectory = \
os.path.dirname(self._instance.data["expectedFiles"][0])
dln_job_info.JobDelay = "00:00:00"
keys = [
"FTRACK_API_KEY",
"FTRACK_API_USER",
"FTRACK_SERVER",
"AVALON_PROJECT",
"AVALON_ASSET",
"AVALON_TASK",
"AVALON_APP_NAME",
"OPENPYPE_DEV",
"OPENPYPE_LOG_NO_COLORS"
]
# Add mongo url if it's enabled
if self._instance.context.data.get("deadlinePassMongoUrl"):
keys.append("OPENPYPE_MONGO")
environment = dict({key: os.environ[key] for key in keys
if key in os.environ}, **api.Session)
for key in keys:
val = environment.get(key)
if val:
dln_job_info.EnvironmentKeyValue = "{key}={value}".format(
key=key,
value=val)
# to recognize job from PYPE for turning Event On/Off
dln_job_info.EnvironmentKeyValue = "OPENPYPE_RENDER_JOB=1"
return dln_job_info
def get_plugin_info(self):
deadline_plugin_info = DeadlinePluginInfo()
context = self._instance.context
script_path = context.data["currentFile"]
render_path = self._instance.data["expectedFiles"][0]
if len(self._instance.data["expectedFiles"]) > 1:
# replace frame ('000001') with Deadline's required '[#######]'
# expects filename in format project_asset_subset_version.FRAME.ext
render_dir = os.path.dirname(render_path)
file_name = os.path.basename(render_path)
arr = file_name.split('.')
assert len(arr) == 3, \
"Unable to parse frames from {}".format(file_name)
hashed = '[{}]'.format(len(arr[1]) * "#")
render_path = os.path.join(render_dir,
'{}.{}.{}'.format(arr[0], hashed,
arr[2]))
deadline_plugin_info.Comp = self._instance.data["comp_name"]
deadline_plugin_info.Version = self._instance.data["app_version"]
# must be here because of DL AE plugin
# added override of multiprocess by env var, if shouldn't be used for
# some app variant use MULTIPROCESS:false in Settings, default is True
env_multi = env_value_to_bool("MULTIPROCESS", default=True)
deadline_plugin_info.MultiProcess = env_multi and self.multiprocess
deadline_plugin_info.SceneFile = self.scene_path
deadline_plugin_info.Output = render_path.replace("\\", "/")
return attr.asdict(deadline_plugin_info)
def from_published_scene(self):
""" Do not overwrite expected files.
Use published is set to True, so rendering will be triggered
from published scene (in 'publish' folder). Default implementation
of abstract class renames expected (eg. rendered) files accordingly
which is not needed here.
"""
return super().from_published_scene(False)
```
#### File: plugins/publish/validate_expected_and_rendered_files.py
```python
import os
import requests
import pyblish.api
from openpype.lib.delivery import collect_frames
from openpype_modules.deadline.abstract_submit_deadline import requests_get
class ValidateExpectedFiles(pyblish.api.InstancePlugin):
"""Compare rendered and expected files"""
label = "Validate rendered files from Deadline"
order = pyblish.api.ValidatorOrder
families = ["render"]
targets = ["deadline"]
# check if actual frame range on render job wasn't different
# case when artists wants to render only subset of frames
allow_user_override = True
def process(self, instance):
self.instance = instance
frame_list = self._get_frame_list(instance.data["render_job_id"])
for repre in instance.data["representations"]:
expected_files = self._get_expected_files(repre)
staging_dir = repre["stagingDir"]
existing_files = self._get_existing_files(staging_dir)
if self.allow_user_override:
# We always check for user override because the user might have
# also overridden the Job frame list to be longer than the
# originally submitted frame range
# todo: We should first check if Job frame range was overridden
# at all so we don't unnecessarily override anything
file_name_template, frame_placeholder = \
self._get_file_name_template_and_placeholder(
expected_files)
if not file_name_template:
raise RuntimeError("Unable to retrieve file_name template"
"from files: {}".format(expected_files))
job_expected_files = self._get_job_expected_files(
file_name_template,
frame_placeholder,
frame_list)
job_files_diff = job_expected_files.difference(expected_files)
if job_files_diff:
self.log.debug(
"Detected difference in expected output files from "
"Deadline job. Assuming an updated frame list by the "
"user. Difference: {}".format(sorted(job_files_diff))
)
# Update the representation expected files
self.log.info("Update range from actual job range "
"to frame list: {}".format(frame_list))
repre["files"] = sorted(job_expected_files)
# Update the expected files
expected_files = job_expected_files
# We don't use set.difference because we do allow other existing
# files to be in the folder that we might not want to use.
missing = expected_files - existing_files
if missing:
raise RuntimeError("Missing expected files: {}".format(
sorted(missing)))
def _get_frame_list(self, original_job_id):
"""Returns list of frame ranges from all render job.
Render job might be re-submitted so job_id in metadata.json could be
invalid. GlobalJobPreload injects current job id to RENDER_JOB_IDS.
Args:
original_job_id (str)
Returns:
(list)
"""
all_frame_lists = []
render_job_ids = os.environ.get("RENDER_JOB_IDS")
if render_job_ids:
render_job_ids = render_job_ids.split(',')
else: # fallback
render_job_ids = [original_job_id]
for job_id in render_job_ids:
job_info = self._get_job_info(job_id)
frame_list = job_info["Props"]["Frames"]
if frame_list:
all_frame_lists.extend(frame_list.split(','))
return all_frame_lists
def _get_job_expected_files(self,
file_name_template,
frame_placeholder,
frame_list):
"""Calculates list of names of expected rendered files.
Might be different from expected files from submission if user
explicitly and manually changed the frame list on the Deadline job.
"""
# no frames in file name at all, eg 'renderCompositingMain.withLut.mov'
if not frame_placeholder:
return set([file_name_template])
real_expected_rendered = set()
src_padding_exp = "%0{}d".format(len(frame_placeholder))
for frames in frame_list:
if '-' not in frames: # single frame
frames = "{}-{}".format(frames, frames)
start, end = frames.split('-')
for frame in range(int(start), int(end) + 1):
ren_name = file_name_template.replace(
frame_placeholder, src_padding_exp % frame)
real_expected_rendered.add(ren_name)
return real_expected_rendered
def _get_file_name_template_and_placeholder(self, files):
"""Returns file name with frame replaced with # and this placeholder"""
sources_and_frames = collect_frames(files)
file_name_template = frame_placeholder = None
for file_name, frame in sources_and_frames.items():
# There might be cases where clique was unable to collect
# collections in `collect_frames` - thus we capture that case
if frame is not None:
frame_placeholder = "#" * len(frame)
file_name_template = os.path.basename(
file_name.replace(frame, frame_placeholder))
else:
file_name_template = file_name
break
return file_name_template, frame_placeholder
def _get_job_info(self, job_id):
"""Calls DL for actual job info for 'job_id'
Might be different than job info saved in metadata.json if user
manually changes job pre/during rendering.
"""
# get default deadline webservice url from deadline module
deadline_url = self.instance.context.data["defaultDeadline"]
# if custom one is set in instance, use that
if self.instance.data.get("deadlineUrl"):
deadline_url = self.instance.data.get("deadlineUrl")
assert deadline_url, "Requires Deadline Webservice URL"
url = "{}/api/jobs?JobID={}".format(deadline_url, job_id)
try:
response = requests_get(url)
except requests.exceptions.ConnectionError:
self.log.error("Deadline is not accessible at "
"{}".format(deadline_url))
return {}
if not response.ok:
self.log.error("Submission failed!")
self.log.error(response.status_code)
self.log.error(response.content)
raise RuntimeError(response.text)
json_content = response.json()
if json_content:
return json_content.pop()
return {}
def _get_existing_files(self, staging_dir):
"""Returns set of existing file names from 'staging_dir'"""
existing_files = set()
for file_name in os.listdir(staging_dir):
existing_files.add(file_name)
return existing_files
def _get_expected_files(self, repre):
"""Returns set of file names in representation['files']
The representations are collected from `CollectRenderedFiles` using
the metadata.json file submitted along with the render job.
Args:
repre (dict): The representation containing 'files'
Returns:
set: Set of expected file_names in the staging directory.
"""
expected_files = set()
files = repre["files"]
if not isinstance(files, list):
files = [files]
for file_name in files:
expected_files.add(file_name)
return expected_files
```
#### File: plugins/OpenPypeTileAssembler/OpenPypeTileAssembler.py
```python
import os
import re
import subprocess
import xml.etree.ElementTree
from System.IO import Path
from Deadline.Plugins import DeadlinePlugin
from Deadline.Scripting import (
FileUtils, RepositoryUtils, SystemUtils)
STRING_TAGS = {
"format"
}
INT_TAGS = {
"x", "y", "z",
"width", "height", "depth",
"full_x", "full_y", "full_z",
"full_width", "full_height", "full_depth",
"tile_width", "tile_height", "tile_depth",
"nchannels",
"alpha_channel",
"z_channel",
"deep",
"subimages",
}
XML_CHAR_REF_REGEX_HEX = re.compile(r"&#x?[0-9a-fA-F]+;")
# Regex to parse array attributes
ARRAY_TYPE_REGEX = re.compile(r"^(int|float|string)\[\d+\]$")
def convert_value_by_type_name(value_type, value):
"""Convert value to proper type based on type name.
In some cases value types have custom python class.
"""
# Simple types
if value_type == "string":
return value
if value_type == "int":
return int(value)
if value_type == "float":
return float(value)
# Vectors will probably have more types
if value_type == "vec2f":
return [float(item) for item in value.split(",")]
# Matrix should be always have square size of element 3x3, 4x4
# - are returned as list of lists
if value_type == "matrix":
output = []
current_index = -1
parts = value.split(",")
parts_len = len(parts)
if parts_len == 1:
divisor = 1
elif parts_len == 4:
divisor = 2
elif parts_len == 9:
divisor == 3
elif parts_len == 16:
divisor = 4
else:
print("Unknown matrix resolution {}. Value: \"{}\"".format(
parts_len, value
))
for part in parts:
output.append(float(part))
return output
for idx, item in enumerate(parts):
list_index = idx % divisor
if list_index > current_index:
current_index = list_index
output.append([])
output[list_index].append(float(item))
return output
if value_type == "rational2i":
parts = value.split("/")
top = float(parts[0])
bottom = 1.0
if len(parts) != 1:
bottom = float(parts[1])
return float(top) / float(bottom)
if value_type == "vector":
parts = [part.strip() for part in value.split(",")]
output = []
for part in parts:
if part == "-nan":
output.append(None)
continue
try:
part = float(part)
except ValueError:
pass
output.append(part)
return output
if value_type == "timecode":
return value
# Array of other types is converted to list
re_result = ARRAY_TYPE_REGEX.findall(value_type)
if re_result:
array_type = re_result[0]
output = []
for item in value.split(","):
output.append(
convert_value_by_type_name(array_type, item)
)
return output
print((
"MISSING IMPLEMENTATION:"
" Unknown attrib type \"{}\". Value: {}"
).format(value_type, value))
return value
def parse_oiio_xml_output(xml_string):
"""Parse xml output from OIIO info command."""
output = {}
if not xml_string:
return output
# Fix values with ampresand (lazy fix)
# - oiiotool exports invalid xml which ElementTree can't handle
# e.g. ""
# WARNING: this will affect even valid character entities. If you need
# those values correctly, this must take care of valid character ranges.
# See https://github.com/pypeclub/OpenPype/pull/2729
matches = XML_CHAR_REF_REGEX_HEX.findall(xml_string)
for match in matches:
new_value = match.replace("&", "&")
xml_string = xml_string.replace(match, new_value)
tree = xml.etree.ElementTree.fromstring(xml_string)
attribs = {}
output["attribs"] = attribs
for child in tree:
tag_name = child.tag
if tag_name == "attrib":
attrib_def = child.attrib
value = convert_value_by_type_name(
attrib_def["type"], child.text
)
attribs[attrib_def["name"]] = value
continue
# Channels are stored as tex on each child
if tag_name == "channelnames":
value = []
for channel in child:
value.append(channel.text)
# Convert known integer type tags to int
elif tag_name in INT_TAGS:
value = int(child.text)
# Keep value of known string tags
elif tag_name in STRING_TAGS:
value = child.text
# Keep value as text for unknown tags
# - feel free to add more tags
else:
value = child.text
print((
"MISSING IMPLEMENTATION:"
" Unknown tag \"{}\". Value \"{}\""
).format(tag_name, value))
output[child.tag] = value
return output
def info_about_input(oiiotool_path, filepath):
args = [
oiiotool_path,
"--info",
"-v",
"-i:infoformat=xml",
filepath
]
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
_stdout, _stderr = popen.communicate()
output = ""
if _stdout:
output += _stdout.decode("utf-8")
if _stderr:
output += _stderr.decode("utf-8")
output = output.replace("\r\n", "\n")
xml_started = False
lines = []
for line in output.split("\n"):
if not xml_started:
if not line.startswith("<"):
continue
xml_started = True
if xml_started:
lines.append(line)
if not xml_started:
raise ValueError(
"Failed to read input file \"{}\".\nOutput:\n{}".format(
filepath, output
)
)
xml_text = "\n".join(lines)
return parse_oiio_xml_output(xml_text)
def GetDeadlinePlugin(): # noqa: N802
"""Helper."""
return OpenPypeTileAssembler()
def CleanupDeadlinePlugin(deadlinePlugin): # noqa: N802, N803
"""Helper."""
deadlinePlugin.cleanup()
class OpenPypeTileAssembler(DeadlinePlugin):
"""Deadline plugin for assembling tiles using OIIO."""
def __init__(self):
"""Init."""
self.InitializeProcessCallback += self.initialize_process
self.RenderExecutableCallback += self.render_executable
self.RenderArgumentCallback += self.render_argument
self.PreRenderTasksCallback += self.pre_render_tasks
self.PostRenderTasksCallback += self.post_render_tasks
def cleanup(self):
"""Cleanup function."""
for stdoutHandler in self.StdoutHandlers:
del stdoutHandler.HandleCallback
del self.InitializeProcessCallback
del self.RenderExecutableCallback
del self.RenderArgumentCallback
del self.PreRenderTasksCallback
del self.PostRenderTasksCallback
def initialize_process(self):
"""Initialization."""
self.SingleFramesOnly = True
self.StdoutHandling = True
self.renderer = self.GetPluginInfoEntryWithDefault(
"Renderer", "undefined")
self.AddStdoutHandlerCallback(
".*Error.*").HandleCallback += self.handle_stdout_error
def render_executable(self):
"""Get render executable name.
Get paths from plugin configuration, find executable and return it.
Returns:
(str): Render executable.
"""
oiiotool_exe_list = self.GetConfigEntry("OIIOTool_RenderExecutable")
oiiotool_exe = FileUtils.SearchFileList(oiiotool_exe_list)
if oiiotool_exe == "":
self.FailRender(("No file found in the semicolon separated "
"list \"{}\". The path to the render executable "
"can be configured from the Plugin Configuration "
"in the Deadline Monitor.").format(
oiiotool_exe_list))
return oiiotool_exe
def render_argument(self):
"""Generate command line arguments for render executable.
Returns:
(str): arguments to add to render executable.
"""
# Read tile config file. This file is in compatible format with
# Draft Tile Assembler
data = {}
with open(self.config_file, "rU") as f:
for text in f:
# Parsing key-value pair and removing white-space
# around the entries
info = [x.strip() for x in text.split("=", 1)]
if len(info) > 1:
try:
data[str(info[0])] = info[1]
except Exception as e:
# should never be called
self.FailRender(
"Cannot parse config file: {}".format(e))
# Get output file. We support only EXRs now.
output_file = data["ImageFileName"]
output_file = RepositoryUtils.CheckPathMapping(output_file)
output_file = self.process_path(output_file)
"""
_, ext = os.path.splitext(output_file)
if "exr" not in ext:
self.FailRender(
"[{}] Only EXR format is supported for now.".format(ext))
"""
tile_info = []
for tile in range(int(data["TileCount"])):
tile_info.append({
"filepath": data["Tile{}".format(tile)],
"pos_x": int(data["Tile{}X".format(tile)]),
"pos_y": int(data["Tile{}Y".format(tile)]),
"height": int(data["Tile{}Height".format(tile)]),
"width": int(data["Tile{}Width".format(tile)])
})
# FFMpeg doesn't support tile coordinates at the moment.
# arguments = self.tile_completer_ffmpeg_args(
# int(data["ImageWidth"]), int(data["ImageHeight"]),
# tile_info, output_file)
arguments = self.tile_oiio_args(
int(data["ImageWidth"]), int(data["ImageHeight"]),
tile_info, output_file)
self.LogInfo(
"Using arguments: {}".format(" ".join(arguments)))
self.tiles = tile_info
return " ".join(arguments)
def process_path(self, filepath):
"""Handle slashes in file paths."""
if SystemUtils.IsRunningOnWindows():
filepath = filepath.replace("/", "\\")
if filepath.startswith("\\") and not filepath.startswith("\\\\"):
filepath = "\\" + filepath
else:
filepath = filepath.replace("\\", "/")
return filepath
def pre_render_tasks(self):
"""Load config file and do remapping."""
self.LogInfo("OpenPype Tile Assembler starting...")
scene_filename = self.GetDataFilename()
temp_scene_directory = self.CreateTempDirectory(
"thread" + str(self.GetThreadNumber()))
temp_scene_filename = Path.GetFileName(scene_filename)
self.config_file = Path.Combine(
temp_scene_directory, temp_scene_filename)
if SystemUtils.IsRunningOnWindows():
RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator(
scene_filename, self.config_file, "/", "\\")
else:
RepositoryUtils.CheckPathMappingInFileAndReplaceSeparator(
scene_filename, self.config_file, "\\", "/")
os.chmod(self.config_file, os.stat(self.config_file).st_mode)
def post_render_tasks(self):
"""Cleanup tiles if required."""
if self.GetBooleanPluginInfoEntryWithDefault("CleanupTiles", False):
self.LogInfo("Cleaning up Tiles...")
for tile in self.tiles:
try:
self.LogInfo("Deleting: {}".format(tile["filepath"]))
os.remove(tile["filepath"])
# By this time we would have errored out
# if error on missing was enabled
except KeyError:
pass
except OSError:
self.LogInfo("Failed to delete: {}".format(
tile["filepath"]))
pass
self.LogInfo("OpenPype Tile Assembler Job finished.")
def handle_stdout_error(self):
"""Handle errors in stdout."""
self.FailRender(self.GetRegexMatch(0))
def tile_oiio_args(
self, output_width, output_height, tile_info, output_path):
"""Generate oiio tool arguments for tile assembly.
Args:
output_width (int): Width of output image.
output_height (int): Height of output image.
tiles_info (list): List of tile items, each item must be
dictionary with `filepath`, `pos_x` and `pos_y` keys
representing path to file and x, y coordinates on output
image where top-left point of tile item should start.
output_path (str): Path to file where should be output stored.
Returns:
(list): oiio tools arguments.
"""
args = []
# Create new image with output resolution, and with same type and
# channels as input
oiiotool_path = self.render_executable()
first_tile_path = tile_info[0]["filepath"]
first_tile_info = info_about_input(oiiotool_path, first_tile_path)
create_arg_template = "--create{} {}x{} {}"
image_type = ""
image_format = first_tile_info.get("format")
if image_format:
image_type = ":type={}".format(image_format)
create_arg = create_arg_template.format(
image_type, output_width,
output_height, first_tile_info["nchannels"]
)
args.append(create_arg)
for tile in tile_info:
path = tile["filepath"]
pos_x = tile["pos_x"]
tile_height = info_about_input(oiiotool_path, path)["height"]
if self.renderer == "vray":
pos_y = tile["pos_y"]
else:
pos_y = output_height - tile["pos_y"] - tile_height
# Add input path and make sure inputs origin is 0, 0
args.append(path)
args.append("--origin +0+0")
# Swap to have input as foreground
args.append("--swap")
# Paste foreground to background
args.append("--paste +{}+{}".format(pos_x, pos_y))
args.append("-o")
args.append(output_path)
return args
def tile_completer_ffmpeg_args(
self, output_width, output_height, tiles_info, output_path):
"""Generate ffmpeg arguments for tile assembly.
Expected inputs are tiled images.
Args:
output_width (int): Width of output image.
output_height (int): Height of output image.
tiles_info (list): List of tile items, each item must be
dictionary with `filepath`, `pos_x` and `pos_y` keys
representing path to file and x, y coordinates on output
image where top-left point of tile item should start.
output_path (str): Path to file where should be output stored.
Returns:
(list): ffmpeg arguments.
"""
previous_name = "base"
ffmpeg_args = []
filter_complex_strs = []
filter_complex_strs.append("nullsrc=size={}x{}[{}]".format(
output_width, output_height, previous_name
))
new_tiles_info = {}
for idx, tile_info in enumerate(tiles_info):
# Add input and store input index
filepath = tile_info["filepath"]
ffmpeg_args.append("-i \"{}\"".format(filepath.replace("\\", "/")))
# Prepare initial filter complex arguments
index_name = "input{}".format(idx)
filter_complex_strs.append(
"[{}]setpts=PTS-STARTPTS[{}]".format(idx, index_name)
)
tile_info["index"] = idx
new_tiles_info[index_name] = tile_info
# Set frames to 1
ffmpeg_args.append("-frames 1")
# Concatenation filter complex arguments
global_index = 1
total_index = len(new_tiles_info)
for index_name, tile_info in new_tiles_info.items():
item_str = (
"[{previous_name}][{index_name}]overlay={pos_x}:{pos_y}"
).format(
previous_name=previous_name,
index_name=index_name,
pos_x=tile_info["pos_x"],
pos_y=tile_info["pos_y"]
)
new_previous = "tmp{}".format(global_index)
if global_index != total_index:
item_str += "[{}]".format(new_previous)
filter_complex_strs.append(item_str)
previous_name = new_previous
global_index += 1
joined_parts = ";".join(filter_complex_strs)
filter_complex_str = "-filter_complex \"{}\"".format(joined_parts)
ffmpeg_args.append(filter_complex_str)
ffmpeg_args.append("-y")
ffmpeg_args.append("\"{}\"".format(output_path))
return ffmpeg_args
```
#### File: example_addons/example_addon/addon.py
```python
import os
import click
from openpype.modules import (
JsonFilesSettingsDef,
OpenPypeAddOn,
ModulesManager
)
# Import interface defined by this addon to be able find other addons using it
from openpype_interfaces import (
IPluginPaths,
ITrayAction
)
# Settings definition of this addon using `JsonFilesSettingsDef`
# - JsonFilesSettingsDef is prepared settings definition using json files
# to define settings and store default values
class AddonSettingsDef(JsonFilesSettingsDef):
# This will add prefixes to every schema and template from `schemas`
# subfolder.
# - it is not required to fill the prefix but it is highly
# recommended as schemas and templates may have name clashes across
# multiple addons
# - it is also recommended that prefix has addon name in it
schema_prefix = "example_addon"
def get_settings_root_path(self):
"""Implemented abstract class of JsonFilesSettingsDef.
Return directory path where json files defying addon settings are
located.
"""
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"settings"
)
class ExampleAddon(OpenPypeAddOn, IPluginPaths, ITrayAction):
"""This Addon has defined it's settings and interface.
This example has system settings with an enabled option. And use
few other interfaces:
- `IPluginPaths` to define custom plugin paths
- `ITrayAction` to be shown in tray tool
"""
label = "Example Addon"
name = "example_addon"
def initialize(self, settings):
"""Initialization of addon."""
module_settings = settings[self.name]
# Enabled by settings
self.enabled = module_settings.get("enabled", False)
# Prepare variables that can be used or set afterwards
self._connected_modules = None
# UI which must not be created at this time
self._dialog = None
def tray_init(self):
"""Implementation of abstract method for `ITrayAction`.
We're definitely in tray tool so we can pre create dialog.
"""
self._create_dialog()
def _create_dialog(self):
# Don't recreate dialog if already exists
if self._dialog is not None:
return
from .widgets import MyExampleDialog
self._dialog = MyExampleDialog()
def show_dialog(self):
"""Show dialog with connected modules.
This can be called from anywhere but can also crash in headless mode.
There is no way to prevent addon to do invalid operations if he's
not handling them.
"""
# Make sure dialog is created
self._create_dialog()
# Show dialog
self._dialog.open()
def get_connected_modules(self):
"""Custom implementation of addon."""
names = set()
if self._connected_modules is not None:
for module in self._connected_modules:
names.add(module.name)
return names
def on_action_trigger(self):
"""Implementation of abstract method for `ITrayAction`."""
self.show_dialog()
def get_plugin_paths(self):
"""Implementation of abstract method for `IPluginPaths`."""
current_dir = os.path.dirname(os.path.abspath(__file__))
return {
"publish": [os.path.join(current_dir, "plugins", "publish")]
}
def cli(self, click_group):
click_group.add_command(cli_main)
@click.group(ExampleAddon.name, help="Example addon dynamic cli commands.")
def cli_main():
pass
@cli_main.command()
def nothing():
"""Does nothing but print a message."""
print("You've triggered \"nothing\" command.")
@cli_main.command()
def show_dialog():
"""Show ExampleAddon dialog.
We don't have access to addon directly through cli so we have to create
it again.
"""
from openpype.tools.utils.lib import qt_app_context
manager = ModulesManager()
example_addon = manager.modules_by_name[ExampleAddon.name]
with qt_app_context():
example_addon.show_dialog()
```
#### File: ftrack/event_handlers_user/action_applications.py
```python
import os
from uuid import uuid4
from openpype_modules.ftrack.lib import BaseAction
from openpype.lib.applications import (
ApplicationManager,
ApplicationLaunchFailed,
ApplictionExecutableNotFound,
CUSTOM_LAUNCH_APP_GROUPS
)
from avalon.api import AvalonMongoDB
class AppplicationsAction(BaseAction):
"""Applications Action class."""
type = "Application"
label = "Application action"
identifier = "openpype_app"
_launch_identifier_with_id = None
icon_url = os.environ.get("OPENPYPE_STATICS_SERVER")
def __init__(self, *args, **kwargs):
super(AppplicationsAction, self).__init__(*args, **kwargs)
self.application_manager = ApplicationManager()
self.dbcon = AvalonMongoDB()
@property
def discover_identifier(self):
if self._discover_identifier is None:
self._discover_identifier = "{}.{}".format(
self.identifier, self.process_identifier()
)
return self._discover_identifier
@property
def launch_identifier(self):
if self._launch_identifier is None:
self._launch_identifier = "{}.*".format(self.identifier)
return self._launch_identifier
@property
def launch_identifier_with_id(self):
if self._launch_identifier_with_id is None:
self._launch_identifier_with_id = "{}.{}".format(
self.identifier, self.process_identifier()
)
return self._launch_identifier_with_id
def construct_requirements_validations(self):
# Override validation as this action does not need them
return
def register(self):
"""Registers the action, subscribing the discover and launch topics."""
discovery_subscription = (
"topic=ftrack.action.discover and source.user.username={0}"
).format(self.session.api_user)
self.session.event_hub.subscribe(
discovery_subscription,
self._discover,
priority=self.priority
)
launch_subscription = (
"topic=ftrack.action.launch"
" and data.actionIdentifier={0}"
" and source.user.username={1}"
).format(
self.launch_identifier,
self.session.api_user
)
self.session.event_hub.subscribe(
launch_subscription,
self._launch
)
def _discover(self, event):
entities = self._translate_event(event)
items = self.discover(self.session, entities, event)
if items:
return {"items": items}
def discover(self, session, entities, event):
"""Return true if we can handle the selected entities.
Args:
session (ftrack_api.Session): Helps to query necessary data.
entities (list): Object of selected entities.
event (ftrack_api.Event): Ftrack event causing discover callback.
"""
if (
len(entities) != 1
or entities[0].entity_type.lower() != "task"
):
return False
entity = entities[0]
if entity["parent"].entity_type.lower() == "project":
return False
avalon_project_apps = event["data"].get("avalon_project_apps", None)
avalon_project_doc = event["data"].get("avalon_project_doc", None)
if avalon_project_apps is None:
if avalon_project_doc is None:
ft_project = self.get_project_from_entity(entity)
project_name = ft_project["full_name"]
if not self.dbcon.is_installed():
self.dbcon.install()
self.dbcon.Session["AVALON_PROJECT"] = project_name
avalon_project_doc = self.dbcon.find_one({
"type": "project"
}) or False
event["data"]["avalon_project_doc"] = avalon_project_doc
if not avalon_project_doc:
return False
project_apps_config = avalon_project_doc["config"].get("apps", [])
avalon_project_apps = [
app["name"] for app in project_apps_config
] or False
event["data"]["avalon_project_apps"] = avalon_project_apps
if not avalon_project_apps:
return False
items = []
for app_name in avalon_project_apps:
app = self.application_manager.applications.get(app_name)
if not app or not app.enabled:
continue
if app.group.name in CUSTOM_LAUNCH_APP_GROUPS:
continue
app_icon = app.icon
if app_icon and self.icon_url:
try:
app_icon = app_icon.format(self.icon_url)
except Exception:
self.log.warning((
"Couldn't fill icon path. Icon template: \"{}\""
" --- Icon url: \"{}\""
).format(app_icon, self.icon_url))
app_icon = None
items.append({
"label": app.group.label,
"variant": app.label,
"description": None,
"actionIdentifier": "{}.{}".format(
self.launch_identifier_with_id, app_name
),
"icon": app_icon
})
return items
def _launch(self, event):
event_identifier = event["data"]["actionIdentifier"]
# Check if identifier is same
# - show message that acion may not be triggered on this machine
if event_identifier.startswith(self.launch_identifier_with_id):
return BaseAction._launch(self, event)
return {
"success": False,
"message": (
"There are running more OpenPype processes"
" where Application can be launched."
)
}
def launch(self, session, entities, event):
"""Callback method for the custom action.
return either a bool (True if successful or False if the action failed)
or a dictionary with they keys `message` and `success`, the message
should be a string and will be displayed as feedback to the user,
success should be a bool, True if successful or False if the action
failed.
*session* is a `ftrack_api.Session` instance
*entities* is a list of tuples each containing the entity type and
the entity id. If the entity is a hierarchical you will always get
the entity type TypedContext, once retrieved through a get operation
you will have the "real" entity type ie. example Shot, Sequence
or Asset Build.
*event* the unmodified original event
"""
identifier = event["data"]["actionIdentifier"]
id_identifier_len = len(self.launch_identifier_with_id) + 1
app_name = identifier[id_identifier_len:]
entity = entities[0]
task_name = entity["name"]
asset_name = entity["parent"]["name"]
project_name = entity["project"]["full_name"]
self.log.info((
"Ftrack launch app: \"{}\" on Project/Asset/Task: {}/{}/{}"
).format(app_name, project_name, asset_name, task_name))
try:
self.application_manager.launch(
app_name,
project_name=project_name,
asset_name=asset_name,
task_name=task_name
)
except ApplictionExecutableNotFound as exc:
self.log.warning(exc.exc_msg)
return {
"success": False,
"message": exc.msg
}
except ApplicationLaunchFailed as exc:
self.log.error(str(exc))
return {
"success": False,
"message": str(exc)
}
except Exception:
msg = "Unexpected failure of application launch {}".format(
self.label
)
self.log.error(msg, exc_info=True)
return {
"success": False,
"message": msg
}
return {
"success": True,
"message": "Launching {0}".format(self.label)
}
def register(session):
"""Register action. Called when used as an event plugin."""
AppplicationsAction(session).register()
```
#### File: plugins/publish/collect_ftrack_api.py
```python
import logging
import pyblish.api
import avalon.api
class CollectFtrackApi(pyblish.api.ContextPlugin):
""" Collects an ftrack session and the current task id. """
order = pyblish.api.CollectorOrder + 0.4999
label = "Collect Ftrack Api"
def process(self, context):
ftrack_log = logging.getLogger('ftrack_api')
ftrack_log.setLevel(logging.WARNING)
ftrack_log = logging.getLogger('ftrack_api_old')
ftrack_log.setLevel(logging.WARNING)
# Collect session
# NOTE Import python module here to know if import was successful
import ftrack_api
session = ftrack_api.Session(auto_connect_event_hub=False)
self.log.debug("Ftrack user: \"{0}\"".format(session.api_user))
# Collect task
project_name = avalon.api.Session["AVALON_PROJECT"]
asset_name = avalon.api.Session["AVALON_ASSET"]
task_name = avalon.api.Session["AVALON_TASK"]
# Find project entity
project_query = 'Project where full_name is "{0}"'.format(project_name)
self.log.debug("Project query: < {0} >".format(project_query))
project_entities = list(session.query(project_query).all())
if len(project_entities) == 0:
raise AssertionError(
"Project \"{0}\" not found in Ftrack.".format(project_name)
)
# QUESTION Is possible to happen?
elif len(project_entities) > 1:
raise AssertionError((
"Found more than one project with name \"{0}\" in Ftrack."
).format(project_name))
project_entity = project_entities[0]
self.log.debug("Project found: {0}".format(project_entity))
asset_entity = None
if asset_name:
# Find asset entity
entity_query = (
'TypedContext where project_id is "{0}"'
' and name is "{1}"'
).format(project_entity["id"], asset_name)
self.log.debug("Asset entity query: < {0} >".format(entity_query))
asset_entities = []
for entity in session.query(entity_query).all():
# Skip tasks
if entity.entity_type.lower() != "task":
asset_entities.append(entity)
if len(asset_entities) == 0:
raise AssertionError((
"Entity with name \"{0}\" not found"
" in Ftrack project \"{1}\"."
).format(asset_name, project_name))
elif len(asset_entities) > 1:
raise AssertionError((
"Found more than one entity with name \"{0}\""
" in Ftrack project \"{1}\"."
).format(asset_name, project_name))
asset_entity = asset_entities[0]
self.log.debug("Asset found: {0}".format(asset_entity))
task_entity = None
# Find task entity if task is set
if not asset_entity:
self.log.warning(
"Asset entity is not set. Skipping query of task entity."
)
elif not task_name:
self.log.warning("Task name is not set.")
else:
task_query = (
'Task where name is "{0}" and parent_id is "{1}"'
).format(task_name, asset_entity["id"])
self.log.debug("Task entity query: < {0} >".format(task_query))
task_entity = session.query(task_query).first()
if not task_entity:
self.log.warning(
"Task entity with name \"{0}\" was not found.".format(
task_name
)
)
else:
self.log.debug("Task entity found: {0}".format(task_entity))
context.data["ftrackSession"] = session
context.data["ftrackPythonModule"] = ftrack_api
context.data["ftrackProject"] = project_entity
context.data["ftrackEntity"] = asset_entity
context.data["ftrackTask"] = task_entity
self.per_instance_process(context, asset_name, task_name)
def per_instance_process(
self, context, context_asset_name, context_task_name
):
instance_by_asset_and_task = {}
for instance in context:
self.log.debug(
"Checking entities of instance \"{}\"".format(str(instance))
)
instance_asset_name = instance.data.get("asset")
instance_task_name = instance.data.get("task")
if not instance_asset_name and not instance_task_name:
self.log.debug("Instance does not have set context keys.")
continue
elif instance_asset_name and instance_task_name:
if (
instance_asset_name == context_asset_name
and instance_task_name == context_task_name
):
self.log.debug((
"Instance's context is same as in publish context."
" Asset: {} | Task: {}"
).format(context_asset_name, context_task_name))
continue
asset_name = instance_asset_name
task_name = instance_task_name
elif instance_task_name:
if instance_task_name == context_task_name:
self.log.debug((
"Instance's context task is same as in publish"
" context. Task: {}"
).format(context_task_name))
continue
asset_name = context_asset_name
task_name = instance_task_name
elif instance_asset_name:
if instance_asset_name == context_asset_name:
self.log.debug((
"Instance's context asset is same as in publish"
" context. Asset: {}"
).format(context_asset_name))
continue
# Do not use context's task name
task_name = instance_task_name
asset_name = instance_asset_name
if asset_name not in instance_by_asset_and_task:
instance_by_asset_and_task[asset_name] = {}
if task_name not in instance_by_asset_and_task[asset_name]:
instance_by_asset_and_task[asset_name][task_name] = []
instance_by_asset_and_task[asset_name][task_name].append(instance)
if not instance_by_asset_and_task:
return
session = context.data["ftrackSession"]
project_entity = context.data["ftrackProject"]
asset_names = set()
for asset_name in instance_by_asset_and_task.keys():
asset_names.add(asset_name)
joined_asset_names = ",".join([
"\"{}\"".format(name)
for name in asset_names
])
entities = session.query((
"TypedContext where project_id is \"{}\" and name in ({})"
).format(project_entity["id"], joined_asset_names)).all()
entities_by_name = {
entity["name"]: entity
for entity in entities
}
for asset_name, by_task_data in instance_by_asset_and_task.items():
entity = entities_by_name.get(asset_name)
task_entity_by_name = {}
if not entity:
self.log.warning((
"Didn't find entity with name \"{}\" in Project \"{}\""
).format(asset_name, project_entity["full_name"]))
else:
task_entities = session.query((
"select id, name from Task where parent_id is \"{}\""
).format(entity["id"])).all()
for task_entity in task_entities:
task_name_low = task_entity["name"].lower()
task_entity_by_name[task_name_low] = task_entity
for task_name, instances in by_task_data.items():
task_entity = None
if task_name and entity:
task_entity = task_entity_by_name.get(task_name.lower())
for instance in instances:
instance.data["ftrackEntity"] = entity
instance.data["ftrackTask"] = task_entity
self.log.debug((
"Instance {} has own ftrack entities"
" as has different context. TypedContext: {} Task: {}"
).format(str(instance), str(entity), str(task_entity)))
```
#### File: doc/resource/example_plugin_using_session.py
```python
import logging
import ftrack_api.session
def register_with_session_ready(event):
'''Called when session is ready to be used.'''
logger = logging.getLogger('com.example.example-plugin')
logger.debug('Session ready.')
session = event['data']['session']
# Session is now ready and can be used to e.g. query objects.
task = session.query('Task').first()
print task['name']
def register(session, **kw):
'''Register plugin. Called when used as an plugin.'''
logger = logging.getLogger('com.example.example-plugin')
# Validate that session is an instance of ftrack_api.Session. If not,
# assume that register is being called from an old or incompatible API and
# return without doing anything.
if not isinstance(session, ftrack_api.session.Session):
logger.debug(
'Not subscribing plugin as passed argument {0!r} is not an '
'ftrack_api.Session instance.'.format(session)
)
return
session.event_hub.subscribe(
'topic=ftrack.api.session.ready',
register_with_session_ready
)
logger.debug('Plugin registered')
```
#### File: resource/plugin/construct_entity_type.py
```python
import logging
import ftrack_api.entity.factory
class Factory(ftrack_api.entity.factory.StandardFactory):
'''Entity class factory.'''
def create(self, schema, bases=None):
'''Create and return entity class from *schema*.'''
# Optionally change bases for class to be generated.
cls = super(Factory, self).create(schema, bases=bases)
# Further customise cls before returning.
return cls
def register(session):
'''Register plugin with *session*.'''
logger = logging.getLogger('ftrack_plugin:construct_entity_type.register')
# Validate that session is an instance of ftrack_api.Session. If not, assume
# that register is being called from an old or incompatible API and return
# without doing anything.
if not isinstance(session, ftrack_api.Session):
logger.debug(
'Not subscribing plugin as passed argument {0!r} is not an '
'ftrack_api.Session instance.'.format(session)
)
return
factory = Factory()
def construct_entity_type(event):
'''Return class to represent entity type specified by *event*.'''
schema = event['data']['schema']
return factory.create(schema)
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
```
#### File: source/ftrack_api/_centralized_storage_scenario.py
```python
from __future__ import absolute_import
import logging
import json
import sys
import os
import ftrack_api
import ftrack_api.structure.standard as _standard
from ftrack_api.logging import LazyLogMessage as L
scenario_name = 'ftrack.centralized-storage'
class ConfigureCentralizedStorageScenario(object):
'''Configure a centralized storage scenario.'''
def __init__(self):
'''Instansiate centralized storage scenario.'''
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
@property
def storage_scenario(self):
'''Return storage scenario setting.'''
return self.session.query(
'select value from Setting '
'where name is "storage_scenario" and group is "STORAGE"'
).one()
@property
def existing_centralized_storage_configuration(self):
'''Return existing centralized storage configuration.'''
storage_scenario = self.storage_scenario
try:
configuration = json.loads(storage_scenario['value'])
except (ValueError, TypeError):
return None
if not isinstance(configuration, dict):
return None
if configuration.get('scenario') != scenario_name:
return None
return configuration.get('data', {})
def _get_confirmation_text(self, configuration):
'''Return confirmation text from *configuration*.'''
configure_location = configuration.get('configure_location')
select_location = configuration.get('select_location')
select_mount_point = configuration.get('select_mount_point')
if configure_location:
location_text = unicode(
'A new location will be created:\n\n'
'* Label: {location_label}\n'
'* Name: {location_name}\n'
'* Description: {location_description}\n'
).format(**configure_location)
else:
location = self.session.get(
'Location', select_location['location_id']
)
location_text = (
u'You have choosen to use an existing location: {0}'.format(
location['label']
)
)
mount_points_text = unicode(
'* Linux: {linux}\n'
'* OS X: {osx}\n'
'* Windows: {windows}\n\n'
).format(
linux=select_mount_point.get('linux_mount_point') or '*Not set*',
osx=select_mount_point.get('osx_mount_point') or '*Not set*',
windows=select_mount_point.get('windows_mount_point') or '*Not set*'
)
mount_points_not_set = []
if not select_mount_point.get('linux_mount_point'):
mount_points_not_set.append('Linux')
if not select_mount_point.get('osx_mount_point'):
mount_points_not_set.append('OS X')
if not select_mount_point.get('windows_mount_point'):
mount_points_not_set.append('Windows')
if mount_points_not_set:
mount_points_text += unicode(
'Please be aware that this location will not be working on '
'{missing} because the mount points are not set up.'
).format(
missing=' and '.join(mount_points_not_set)
)
text = unicode(
'#Confirm storage setup#\n\n'
'Almost there! Please take a moment to verify the settings you '
'are about to save. You can always come back later and update the '
'configuration.\n'
'##Location##\n\n'
'{location}\n'
'##Mount points##\n\n'
'{mount_points}'
).format(
location=location_text,
mount_points=mount_points_text
)
return text
def configure_scenario(self, event):
'''Configure scenario based on *event* and return form items.'''
steps = (
'select_scenario',
'select_location',
'configure_location',
'select_structure',
'select_mount_point',
'confirm_summary',
'save_configuration'
)
warning_message = ''
values = event['data'].get('values', {})
# Calculate previous step and the next.
previous_step = values.get('step', 'select_scenario')
next_step = steps[steps.index(previous_step) + 1]
state = 'configuring'
self.logger.info(L(
u'Configuring scenario, previous step: {0}, next step: {1}. '
u'Values {2!r}.',
previous_step, next_step, values
))
if 'configuration' in values:
configuration = values.pop('configuration')
else:
configuration = {}
if values:
# Update configuration with values from the previous step.
configuration[previous_step] = values
if previous_step == 'select_location':
values = configuration['select_location']
if values.get('location_id') != 'create_new_location':
location_exists = self.session.query(
'Location where id is "{0}"'.format(
values.get('location_id')
)
).first()
if not location_exists:
next_step = 'select_location'
warning_message = (
'**The selected location does not exist. Please choose '
'one from the dropdown or create a new one.**'
)
if next_step == 'select_location':
try:
location_id = (
self.existing_centralized_storage_configuration['location_id']
)
except (KeyError, TypeError):
location_id = None
options = [{
'label': 'Create new location',
'value': 'create_new_location'
}]
for location in self.session.query(
'select name, label, description from Location'
):
if location['name'] not in (
'ftrack.origin', 'ftrack.unmanaged', 'ftrack.connect',
'ftrack.server', 'ftrack.review'
):
options.append({
'label': u'{label} ({name})'.format(
label=location['label'], name=location['name']
),
'description': location['description'],
'value': location['id']
})
warning = ''
if location_id is not None:
# If there is already a location configured we must make the
# user aware that changing the location may be problematic.
warning = (
'\n\n**Be careful if you switch to another location '
'for an existing storage scenario. Components that have '
'already been published to the previous location will be '
'made unavailable for common use.**'
)
default_value = location_id
elif location_id is None and len(options) == 1:
# No location configured and no existing locations to use.
default_value = 'create_new_location'
else:
# There are existing locations to choose from but non of them
# are currently active in the centralized storage scenario.
default_value = None
items = [{
'type': 'label',
'value': (
'#Select location#\n'
'Choose an already existing location or create a new one '
'to represent your centralized storage. {0}'.format(
warning
)
)
}, {
'type': 'enumerator',
'label': 'Location',
'name': 'location_id',
'value': default_value,
'data': options
}]
default_location_name = 'studio.central-storage-location'
default_location_label = 'Studio location'
default_location_description = (
'The studio central location where all components are '
'stored.'
)
if previous_step == 'configure_location':
configure_location = configuration.get(
'configure_location'
)
if configure_location:
try:
existing_location = self.session.query(
u'Location where name is "{0}"'.format(
configure_location.get('location_name')
)
).first()
except UnicodeEncodeError:
next_step = 'configure_location'
warning_message += (
'**The location name contains non-ascii characters. '
'Please change the name and try again.**'
)
values = configuration['select_location']
else:
if existing_location:
next_step = 'configure_location'
warning_message += (
u'**There is already a location named {0}. '
u'Please change the name and try again.**'.format(
configure_location.get('location_name')
)
)
values = configuration['select_location']
if (
not configure_location.get('location_name') or
not configure_location.get('location_label') or
not configure_location.get('location_description')
):
next_step = 'configure_location'
warning_message += (
'**Location name, label and description cannot '
'be empty.**'
)
values = configuration['select_location']
if next_step == 'configure_location':
# Populate form with previous configuration.
default_location_label = configure_location['location_label']
default_location_name = configure_location['location_name']
default_location_description = (
configure_location['location_description']
)
if next_step == 'configure_location':
if values.get('location_id') == 'create_new_location':
# Add options to create a new location.
items = [{
'type': 'label',
'value': (
'#Create location#\n'
'Here you will create a new location to be used '
'with your new Storage scenario. For your '
'convenience we have already filled in some default '
'values. If this is the first time you are configuring '
'a storage scenario in ftrack we recommend that you '
'stick with these settings.'
)
}, {
'label': 'Label',
'name': 'location_label',
'value': default_location_label,
'type': 'text'
}, {
'label': 'Name',
'name': 'location_name',
'value': default_location_name,
'type': 'text'
}, {
'label': 'Description',
'name': 'location_description',
'value': default_location_description,
'type': 'text'
}]
else:
# The user selected an existing location. Move on to next
# step.
next_step = 'select_mount_point'
if next_step == 'select_structure':
# There is only one structure to choose from, go to next step.
next_step = 'select_mount_point'
# items = [
# {
# 'type': 'label',
# 'value': (
# '#Select structure#\n'
# 'Select which structure to use with your location. '
# 'The structure is used to generate the filesystem '
# 'path for components that are added to this location.'
# )
# },
# {
# 'type': 'enumerator',
# 'label': 'Structure',
# 'name': 'structure_id',
# 'value': 'standard',
# 'data': [{
# 'label': 'Standard',
# 'value': 'standard',
# 'description': (
# 'The Standard structure uses the names in your '
# 'project structure to determine the path.'
# )
# }]
# }
# ]
if next_step == 'select_mount_point':
try:
mount_points = (
self.existing_centralized_storage_configuration['accessor']['mount_points']
)
except (KeyError, TypeError):
mount_points = dict()
items = [
{
'value': (
'#Mount points#\n'
'Set mount points for your centralized storage '
'location. For the location to work as expected each '
'platform that you intend to use must have the '
'corresponding mount point set and the storage must '
'be accessible. If not set correctly files will not be '
'saved or read.'
),
'type': 'label'
}, {
'type': 'text',
'label': 'Linux',
'name': 'linux_mount_point',
'empty_text': 'E.g. /usr/mnt/MyStorage ...',
'value': mount_points.get('linux', '')
}, {
'type': 'text',
'label': 'OS X',
'name': 'osx_mount_point',
'empty_text': 'E.g. /Volumes/MyStorage ...',
'value': mount_points.get('osx', '')
}, {
'type': 'text',
'label': 'Windows',
'name': 'windows_mount_point',
'empty_text': 'E.g. \\\\MyStorage ...',
'value': mount_points.get('windows', '')
}
]
if next_step == 'confirm_summary':
items = [{
'type': 'label',
'value': self._get_confirmation_text(configuration)
}]
state = 'confirm'
if next_step == 'save_configuration':
mount_points = configuration['select_mount_point']
select_location = configuration['select_location']
if select_location['location_id'] == 'create_new_location':
configure_location = configuration['configure_location']
location = self.session.create(
'Location',
{
'name': configure_location['location_name'],
'label': configure_location['location_label'],
'description': (
configure_location['location_description']
)
}
)
else:
location = self.session.query(
'Location where id is "{0}"'.format(
select_location['location_id']
)
).one()
setting_value = json.dumps({
'scenario': scenario_name,
'data': {
'location_id': location['id'],
'location_name': location['name'],
'accessor': {
'mount_points': {
'linux': mount_points['linux_mount_point'],
'osx': mount_points['osx_mount_point'],
'windows': mount_points['windows_mount_point']
}
}
}
})
self.storage_scenario['value'] = setting_value
self.session.commit()
# Broadcast an event that storage scenario has been configured.
event = ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.configure-done'
)
self.session.event_hub.publish(event)
items = [{
'type': 'label',
'value': (
'#Done!#\n'
'Your storage scenario is now configured and ready '
'to use. **Note that you may have to restart Connect and '
'other applications to start using it.**'
)
}]
state = 'done'
if warning_message:
items.insert(0, {
'type': 'label',
'value': warning_message
})
items.append({
'type': 'hidden',
'value': configuration,
'name': 'configuration'
})
items.append({
'type': 'hidden',
'value': next_step,
'name': 'step'
})
return {
'items': items,
'state': state
}
def discover_centralized_scenario(self, event):
'''Return action discover dictionary for *event*.'''
return {
'id': scenario_name,
'name': 'Centralized storage scenario',
'description': (
'(Recommended) centralized storage scenario where all files '
'are kept on a storage that is mounted and available to '
'everyone in the studio.'
)
}
def register(self, session):
'''Subscribe to events on *session*.'''
self.session = session
#: TODO: Move these to a separate function.
session.event_hub.subscribe(
unicode(
'topic=ftrack.storage-scenario.discover '
'and source.user.username="{0}"'
).format(
session.api_user
),
self.discover_centralized_scenario
)
session.event_hub.subscribe(
unicode(
'topic=ftrack.storage-scenario.configure '
'and data.scenario_id="{0}" '
'and source.user.username="{1}"'
).format(
scenario_name,
session.api_user
),
self.configure_scenario
)
class ActivateCentralizedStorageScenario(object):
'''Activate a centralized storage scenario.'''
def __init__(self):
'''Instansiate centralized storage scenario.'''
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
def activate(self, event):
'''Activate scenario in *event*.'''
storage_scenario = event['data']['storage_scenario']
try:
location_data = storage_scenario['data']
location_name = location_data['location_name']
location_id = location_data['location_id']
mount_points = location_data['accessor']['mount_points']
except KeyError:
error_message = (
'Unable to read storage scenario data.'
)
self.logger.error(L(error_message))
raise ftrack_api.exception.LocationError(
'Unable to configure location based on scenario.'
)
else:
location = self.session.create(
'Location',
data=dict(
name=location_name,
id=location_id
),
reconstructing=True
)
if sys.platform == 'darwin':
prefix = mount_points['osx']
elif sys.platform == 'linux2':
prefix = mount_points['linux']
elif sys.platform == 'win32':
prefix = mount_points['windows']
else:
raise ftrack_api.exception.LocationError(
(
'Unable to find accessor prefix for platform {0}.'
).format(sys.platform)
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(
prefix=prefix
)
location.structure = _standard.StandardStructure()
location.priority = 1
self.logger.info(L(
u'Storage scenario activated. Configured {0!r} from '
u'{1!r}',
location, storage_scenario
))
def _verify_startup(self, event):
'''Verify the storage scenario configuration.'''
storage_scenario = event['data']['storage_scenario']
location_data = storage_scenario['data']
mount_points = location_data['accessor']['mount_points']
prefix = None
if sys.platform == 'darwin':
prefix = mount_points['osx']
elif sys.platform == 'linux2':
prefix = mount_points['linux']
elif sys.platform == 'win32':
prefix = mount_points['windows']
if not prefix:
return (
u'The storage scenario has not been configured for your '
u'operating system. ftrack may not be able to '
u'store and track files correctly.'
)
if not os.path.isdir(prefix):
return (
unicode(
'The path {0} does not exist. ftrack may not be able to '
'store and track files correctly. \n\nIf the storage is '
'newly setup you may want to create necessary folder '
'structures. If the storage is a network drive you should '
'make sure that it is mounted correctly.'
).format(prefix)
)
def register(self, session):
'''Subscribe to events on *session*.'''
self.session = session
session.event_hub.subscribe(
(
'topic=ftrack.storage-scenario.activate '
'and data.storage_scenario.scenario="{0}"'.format(
scenario_name
)
),
self.activate
)
# Listen to verify startup event from ftrack connect to allow responding
# with a message if something is not working correctly with this
# scenario that the user should be notified about.
self.session.event_hub.subscribe(
(
'topic=ftrack.connect.verify-startup '
'and data.storage_scenario.scenario="{0}"'.format(
scenario_name
)
),
self._verify_startup
)
def register(session):
'''Register storage scenario.'''
scenario = ActivateCentralizedStorageScenario()
scenario.register(session)
def register_configuration(session):
'''Register storage scenario.'''
scenario = ConfigureCentralizedStorageScenario()
scenario.register(session)
```
#### File: ftrack_api/event/subscription.py
```python
import ftrack_api.event.expression
class Subscription(object):
'''Represent a subscription.'''
parser = ftrack_api.event.expression.Parser()
def __init__(self, subscription):
'''Initialise with *subscription*.'''
self._subscription = subscription
self._expression = self.parser.parse(subscription)
def __str__(self):
'''Return string representation.'''
return self._subscription
def includes(self, event):
'''Return whether subscription includes *event*.'''
return self._expression.match(event)
```
#### File: unit/entity/test_component.py
```python
import os
import pytest
def test_get_availability(new_component):
'''Retrieve availability in locations.'''
session = new_component.session
availability = new_component.get_availability()
# Note: Currently the origin location is also 0.0 as the link is not
# persisted to the server. This may change in future and this test would
# need updating as a result.
assert set(availability.values()) == set([0.0])
# Add to a location.
source_location = session.query(
'Location where name is "ftrack.origin"'
).one()
target_location = session.query(
'Location where name is "ftrack.unmanaged"'
).one()
target_location.add_component(new_component, source_location)
# Recalculate availability.
# Currently have to manually expire the related attribute. This should be
# solved in future by bi-directional relationship updating.
del new_component['component_locations']
availability = new_component.get_availability()
target_availability = availability.pop(target_location['id'])
assert target_availability == 100.0
# All other locations should still be 0.
assert set(availability.values()) == set([0.0])
@pytest.fixture()
def image_path():
'''Return a path to an image file.'''
image_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'..',
'fixture',
'media',
'image.png'
)
)
return image_path
def test_create_task_thumbnail(task, image_path):
'''Successfully create thumbnail component and set as task thumbnail.'''
component = task.create_thumbnail(image_path)
component.session.commit()
assert component['id'] == task['thumbnail_id']
def test_create_thumbnail_with_data(task, image_path, unique_name):
'''Successfully create thumbnail component with custom data.'''
data = {'name': unique_name}
component = task.create_thumbnail(image_path, data=data)
component.session.commit()
assert component['name'] == unique_name
```
#### File: unit/entity/test_metadata.py
```python
import uuid
import ftrack_api
def test_query_metadata(new_project):
'''Query metadata.'''
session = new_project.session
metadata_key = uuid.uuid1().hex
metadata_value = uuid.uuid1().hex
new_project['metadata'][metadata_key] = metadata_value
session.commit()
results = session.query(
'Project where metadata.key is {0}'.format(metadata_key)
)
assert len(results) == 1
assert new_project['id'] == results[0]['id']
results = session.query(
'Project where metadata.value is {0}'.format(metadata_value)
)
assert len(results) == 1
assert new_project['id'] == results[0]['id']
results = session.query(
'Project where metadata.key is {0} and '
'metadata.value is {1}'.format(metadata_key, metadata_value)
)
assert len(results) == 1
assert new_project['id'] == results[0]['id']
def test_set_get_metadata_from_different_sessions(new_project):
'''Get and set metadata using different sessions.'''
session = new_project.session
metadata_key = <KEY>
metadata_value = uuid.uuid1().hex
new_project['metadata'][metadata_key] = metadata_value
session.commit()
new_session = ftrack_api.Session()
project = new_session.query(
'Project where id is {0}'.format(new_project['id'])
)[0]
assert project['metadata'][metadata_key] == metadata_value
project['metadata'][metadata_key] = <KEY>
new_session.commit()
new_session = ftrack_api.Session()
project = new_session.query(
'Project where id is {0}'.format(project['id'])
)[0]
assert project['metadata'][metadata_key] != metadata_value
def test_get_set_multiple_metadata(new_project):
'''Get and set multiple metadata.'''
session = new_project.session
new_project['metadata'] = {
'key1': 'value1',
'key2': 'value2'
}
session.commit()
assert set(new_project['metadata'].keys()) == set(['key1', 'key2'])
new_session = ftrack_api.Session()
retrieved = new_session.query(
'Project where id is {0}'.format(new_project['id'])
)[0]
assert set(retrieved['metadata'].keys()) == set(['key1', 'key2'])
def test_metadata_parent_type_remains_in_schema_id_format(session, new_project):
'''Metadata parent_type remains in schema id format post commit.'''
entity = session.create('Metadata', {
'key': 'key', 'value': 'value',
'parent_type': new_project.entity_type,
'parent_id': new_project['id']
})
session.commit()
assert entity['parent_type'] == new_project.entity_type
def test_set_metadata_twice(new_project):
'''Set metadata twice in a row.'''
session = new_project.session
new_project['metadata'] = {
'key1': 'value1',
'key2': 'value2'
}
session.commit()
assert set(new_project['metadata'].keys()) == set(['key1', 'key2'])
new_project['metadata'] = {
'key3': 'value3',
'key4': 'value4'
}
session.commit()
def test_set_same_metadata_on_retrieved_entity(new_project):
'''Set same metadata on retrieved entity.'''
session = new_project.session
new_project['metadata'] = {
'key1': 'value1'
}
session.commit()
project = session.get('Project', new_project['id'])
project['metadata'] = {
'key1': 'value1'
}
session.commit()
```
#### File: unit/entity/test_note.py
```python
import ftrack_api
import ftrack_api.inspection
def test_create_reply(session, new_note, user, unique_name):
'''Create reply to a note.'''
reply_text = 'My reply on note'
new_note.create_reply(reply_text, user)
session.commit()
assert len(new_note['replies']) == 1
assert reply_text == new_note['replies'][0]['content']
def test_create_note_on_entity(session, new_task, user, unique_name):
'''Create note attached to an entity.'''
note = new_task.create_note(unique_name, user)
session.commit()
session.reset()
retrieved_task = session.get(*ftrack_api.inspection.identity(new_task))
assert len(retrieved_task['notes']) == 1
assert (
ftrack_api.inspection.identity(retrieved_task['notes'][0])
== ftrack_api.inspection.identity(note)
)
def test_create_note_on_entity_specifying_recipients(
session, new_task, user, unique_name, new_user
):
'''Create note with specified recipients attached to an entity.'''
recipient = new_user
note = new_task.create_note(unique_name, user, recipients=[recipient])
session.commit()
session.reset()
retrieved_note = session.get(*ftrack_api.inspection.identity(note))
# Note: The calling user is automatically added server side so there will be
# 2 recipients.
assert len(retrieved_note['recipients']) == 2
specified_recipient_present = False
for entry in retrieved_note['recipients']:
if entry['resource_id'] == recipient['id']:
specified_recipient_present = True
break
assert specified_recipient_present
def test_create_note_on_entity_specifying_category(
session, new_task, user, unique_name
):
'''Create note with specified category attached to an entity.'''
category = session.query('NoteCategory').first()
note = new_task.create_note(unique_name, user, category=category)
session.commit()
session.reset()
retrieved_note = session.get(*ftrack_api.inspection.identity(note))
assert retrieved_note['category']['id'] == category['id']
```
#### File: unit/event/test_hub.py
```python
import inspect
import json
import os
import time
import subprocess
import sys
import pytest
import ftrack_api.event.hub
import ftrack_api.event.subscriber
from ftrack_api.event.base import Event
import ftrack_api.exception
class MockClass(object):
'''Mock class for testing.'''
def method(self):
'''Mock method for testing.'''
def mockFunction():
'''Mock function for testing.'''
class MockConnection(object):
'''Mock connection for testing.'''
@property
def connected(self):
'''Return whether connected.'''
return True
def close(self):
'''Close mock connection.'''
pass
def assert_callbacks(hub, callbacks):
'''Assert hub has exactly *callbacks* subscribed.'''
# Subscribers always starts with internal handle_reply subscriber.
subscribers = hub._subscribers[:]
subscribers.pop(0)
if len(subscribers) != len(callbacks):
raise AssertionError(
'Number of subscribers ({0}) != number of callbacks ({1})'
.format(len(subscribers), len(callbacks))
)
for index, subscriber in enumerate(subscribers):
if subscriber.callback != callbacks[index]:
raise AssertionError(
'Callback at {0} != subscriber callback at same index.'
.format(index)
)
@pytest.fixture()
def event_hub(request, session):
'''Return event hub to test against.
Hub is automatically connected at start of test and disconnected at end.
'''
hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
hub.connect()
def cleanup():
'''Cleanup.'''
if hub.connected:
hub.disconnect()
request.addfinalizer(cleanup)
return hub
@pytest.mark.parametrize('server_url, expected', [
('https://test.ftrackapp.com', 'https://test.ftrackapp.com'),
('https://test.ftrackapp.com:9000', 'https://test.ftrackapp.com:9000')
], ids=[
'with port',
'without port'
])
def test_get_server_url(server_url, expected):
'''Return server url.'''
event_hub = ftrack_api.event.hub.EventHub(
server_url, 'user', 'key'
)
assert event_hub.get_server_url() == expected
@pytest.mark.parametrize('server_url, expected', [
('https://test.ftrackapp.com', 'test.ftrackapp.com'),
('https://test.ftrackapp.com:9000', 'test.ftrackapp.com:9000')
], ids=[
'with port',
'without port'
])
def test_get_network_location(server_url, expected):
'''Return network location of server url.'''
event_hub = ftrack_api.event.hub.EventHub(
server_url, 'user', 'key'
)
assert event_hub.get_network_location() == expected
@pytest.mark.parametrize('server_url, expected', [
('https://test.ftrackapp.com', True),
('http://test.ftrackapp.com', False)
], ids=[
'secure',
'not secure'
])
def test_secure_property(server_url, expected, mocker):
'''Return whether secure connection used.'''
event_hub = ftrack_api.event.hub.EventHub(
server_url, 'user', 'key'
)
assert event_hub.secure is expected
def test_connected_property(session):
'''Return connected state.'''
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
assert event_hub.connected is False
event_hub.connect()
assert event_hub.connected is True
event_hub.disconnect()
assert event_hub.connected is False
@pytest.mark.parametrize('server_url, expected', [
('https://test.ftrackapp.com', 'https://test.ftrackapp.com'),
('https://test.ftrackapp.com:9000', 'https://test.ftrackapp.com:9000'),
('test.f<EMAIL>', ValueError),
('https://:9000', ValueError),
], ids=[
'with port',
'without port',
'missing scheme',
'missing hostname'
])
def test_initialise_against_server_url(server_url, expected):
'''Initialise against server url.'''
if inspect.isclass(expected) and issubclass(expected, Exception):
with pytest.raises(expected):
ftrack_api.event.hub.EventHub(
server_url, 'user', 'key'
)
else:
event_hub = ftrack_api.event.hub.EventHub(
server_url, 'user', 'key'
)
assert event_hub.get_server_url() == expected
def test_connect(session):
'''Connect.'''
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
event_hub.connect()
assert event_hub.connected is True
event_hub.disconnect()
def test_connect_when_already_connected(event_hub):
'''Fail to connect when already connected'''
assert event_hub.connected is True
with pytest.raises(ftrack_api.exception.EventHubConnectionError) as error:
event_hub.connect()
assert 'Already connected' in str(error)
def test_connect_failure(session, mocker):
'''Fail to connect to server.'''
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
def force_fail(*args, **kwargs):
'''Force connection failure.'''
raise Exception('Forced fail.')
mocker.patch('websocket.create_connection', force_fail)
with pytest.raises(ftrack_api.exception.EventHubConnectionError):
event_hub.connect()
def test_connect_missing_required_transport(session, mocker, caplog):
'''Fail to connect to server that does not provide correct transport.'''
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
original_get_socket_io_session = event_hub._get_socket_io_session
def _get_socket_io_session():
'''Patched to return no transports.'''
session = original_get_socket_io_session()
return ftrack_api.event.hub.SocketIoSession(
session[0], session[1], []
)
mocker.patch.object(
event_hub, '_get_socket_io_session', _get_socket_io_session
)
with pytest.raises(ftrack_api.exception.EventHubConnectionError):
event_hub.connect()
logs = caplog.records()
assert (
'Server does not support websocket sessions.' in str(logs[-1].exc_info)
)
def test_disconnect(event_hub):
'''Disconnect and unsubscribe all subscribers.'''
event_hub.disconnect()
assert len(event_hub._subscribers) == 0
assert event_hub.connected is False
def test_disconnect_without_unsubscribing(event_hub):
'''Disconnect without unsubscribing all subscribers.'''
event_hub.disconnect(unsubscribe=False)
assert len(event_hub._subscribers) > 0
assert event_hub.connected is False
def test_close_connection_from_manually_connected_hub(session_no_autoconnect_hub):
'''Close connection from manually connected hub.'''
session_no_autoconnect_hub.event_hub.connect()
session_no_autoconnect_hub.close()
assert session_no_autoconnect_hub.event_hub.connected is False
def test_disconnect_when_not_connected(session):
'''Fail to disconnect when not connected'''
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
with pytest.raises(ftrack_api.exception.EventHubConnectionError) as error:
event_hub.disconnect()
assert 'Not currently connected' in str(error)
def test_reconnect(event_hub):
'''Reconnect successfully.'''
assert event_hub.connected is True
event_hub.reconnect()
assert event_hub.connected is True
def test_reconnect_when_not_connected(session):
'''Reconnect successfully even if not already connected.'''
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
assert event_hub.connected is False
event_hub.reconnect()
assert event_hub.connected is True
event_hub.disconnect()
def test_fail_to_reconnect(session, mocker):
'''Fail to reconnect.'''
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
event_hub.connect()
assert event_hub.connected is True
def force_fail(*args, **kwargs):
'''Force connection failure.'''
raise Exception('Forced fail.')
mocker.patch('websocket.create_connection', force_fail)
attempts = 2
with pytest.raises(ftrack_api.exception.EventHubConnectionError) as error:
event_hub.reconnect(attempts=attempts, delay=0.5)
assert 'Failed to reconnect to event server' in str(error)
assert 'after {} attempts'.format(attempts) in str(error)
def test_wait(event_hub):
'''Wait for event and handle as they arrive.'''
called = {'callback': False}
def callback(event):
called['callback'] = True
event_hub.subscribe('topic=test-subscribe', callback)
event_hub.publish(Event(topic='test-subscribe'))
# Until wait, the event should not have been processed even if received.
time.sleep(1)
assert called == {'callback': False}
event_hub.wait(2)
assert called == {'callback': True}
def test_wait_interrupted_by_disconnect(event_hub):
'''Interrupt wait loop with disconnect event.'''
wait_time = 5
start = time.time()
# Inject event directly for test purposes.
event = Event(topic='ftrack.meta.disconnected')
event_hub._event_queue.put(event)
event_hub.wait(wait_time)
assert time.time() - start < wait_time
@pytest.mark.parametrize('identifier, registered', [
('registered-test-subscriber', True),
('unregistered-test-subscriber', False)
], ids=[
'registered',
'missing'
])
def test_get_subscriber_by_identifier(event_hub, identifier, registered):
'''Return subscriber by identifier.'''
def callback(event):
pass
subscriber = {
'id': 'registered-test-subscriber'
}
event_hub.subscribe('topic=test-subscribe', callback, subscriber)
retrieved = event_hub.get_subscriber_by_identifier(identifier)
if registered:
assert isinstance(retrieved, ftrack_api.event.subscriber.Subscriber)
assert retrieved.metadata.get('id') == subscriber['id']
else:
assert retrieved is None
def test_subscribe(event_hub):
'''Subscribe to topics.'''
called = {'a': False, 'b': False}
def callback_a(event):
called['a'] = True
def callback_b(event):
called['b'] = True
event_hub.subscribe('topic=test-subscribe', callback_a)
event_hub.subscribe('topic=test-subscribe-other', callback_b)
event_hub.publish(Event(topic='test-subscribe'))
event_hub.wait(2)
assert called == {'a': True, 'b': False}
def test_subscribe_before_connected(session):
'''Subscribe to topic before connected.'''
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
called = {'callback': False}
def callback(event):
called['callback'] = True
identifier = 'test-subscriber'
event_hub.subscribe(
'topic=test-subscribe', callback, subscriber={'id': identifier}
)
assert event_hub.get_subscriber_by_identifier(identifier) is not None
event_hub.connect()
try:
event_hub.publish(Event(topic='test-subscribe'))
event_hub.wait(2)
finally:
event_hub.disconnect()
assert called == {'callback': True}
def test_duplicate_subscriber(event_hub):
'''Fail to subscribe same subscriber more than once.'''
subscriber = {'id': 'test-subscriber'}
event_hub.subscribe('topic=test', None, subscriber=subscriber)
with pytest.raises(ftrack_api.exception.NotUniqueError) as error:
event_hub.subscribe('topic=test', None, subscriber=subscriber)
assert '{0} already exists'.format(subscriber['id']) in str(error)
def test_unsubscribe(event_hub):
'''Unsubscribe a specific callback.'''
def callback_a(event):
pass
def callback_b(event):
pass
identifier_a = event_hub.subscribe('topic=test', callback_a)
identifier_b = event_hub.subscribe('topic=test', callback_b)
assert_callbacks(event_hub, [callback_a, callback_b])
event_hub.unsubscribe(identifier_a)
# Unsubscribe requires confirmation event so wait here to give event a
# chance to process.
time.sleep(5)
assert_callbacks(event_hub, [callback_b])
def test_unsubscribe_whilst_disconnected(event_hub):
'''Unsubscribe whilst disconnected.'''
identifier = event_hub.subscribe('topic=test', None)
event_hub.disconnect(unsubscribe=False)
event_hub.unsubscribe(identifier)
assert_callbacks(event_hub, [])
def test_unsubscribe_missing_subscriber(event_hub):
'''Fail to unsubscribe a non-subscribed subscriber.'''
identifier = 'non-subscribed-subscriber'
with pytest.raises(ftrack_api.exception.NotFoundError) as error:
event_hub.unsubscribe(identifier)
assert (
'missing subscriber with identifier {}'.format(identifier)
in str(error)
)
@pytest.mark.parametrize('event_data', [
dict(source=dict(id='1', user=dict(username='auto'))),
dict(source=dict(user=dict(username='auto'))),
dict(source=dict(id='1')),
dict()
], ids=[
'pre-prepared',
'missing id',
'missing user',
'no source'
])
def test_prepare_event(session, event_data):
'''Prepare event.'''
# Replace username `auto` in event data with API user.
try:
if event_data['source']['user']['username'] == 'auto':
event_data['source']['user']['username'] = session.api_user
except KeyError:
pass
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
event_hub.id = '1'
event = Event('test', id='event-id', **event_data)
expected = Event(
'test', id='event-id', source=dict(id='1', user=dict(username=session.api_user))
)
event_hub._prepare_event(event)
assert event == expected
def test_prepare_reply_event(session):
'''Prepare reply event.'''
event_hub = ftrack_api.event.hub.EventHub(
session.server_url, session.api_user, session.api_key
)
source_event = Event('source', source=dict(id='source-id'))
reply_event = Event('reply')
event_hub._prepare_reply_event(reply_event, source_event)
assert source_event['source']['id'] in reply_event['target']
assert reply_event['in_reply_to_event'] == source_event['id']
event_hub._prepare_reply_event(reply_event, source_event, {'id': 'source'})
assert reply_event['source'] == {'id': 'source'}
def test_publish(event_hub):
'''Publish asynchronous event.'''
called = {'callback': False}
def callback(event):
called['callback'] = True
event_hub.subscribe('topic=test-subscribe', callback)
event_hub.publish(Event(topic='test-subscribe'))
event_hub.wait(2)
assert called == {'callback': True}
def test_publish_raising_error(event_hub):
'''Raise error, when configured, on failed publish.'''
# Note that the event hub currently only fails publish when not connected.
# All other errors are inconsistently swallowed.
event_hub.disconnect()
event = Event(topic='a-topic', data=dict(status='fail'))
with pytest.raises(Exception):
event_hub.publish(event, on_error='raise')
def test_publish_ignoring_error(event_hub):
'''Ignore error, when configured, on failed publish.'''
# Note that the event hub currently only fails publish when not connected.
# All other errors are inconsistently swallowed.
event_hub.disconnect()
event = Event(topic='a-topic', data=dict(status='fail'))
event_hub.publish(event, on_error='ignore')
def test_publish_logs_other_errors(event_hub, caplog, mocker):
'''Log publish errors other than connection error.'''
# Mock connection to force error.
mocker.patch.object(event_hub, '_connection', MockConnection())
event = Event(topic='a-topic', data=dict(status='fail'))
event_hub.publish(event)
expected = 'Error sending event {0}.'.format(event)
messages = [record.getMessage().strip() for record in caplog.records()]
assert expected in messages, 'Expected log message missing in output.'
def test_synchronous_publish(event_hub):
'''Publish event synchronously and collect results.'''
def callback_a(event):
return 'A'
def callback_b(event):
return 'B'
def callback_c(event):
return 'C'
event_hub.subscribe('topic=test', callback_a, priority=50)
event_hub.subscribe('topic=test', callback_b, priority=60)
event_hub.subscribe('topic=test', callback_c, priority=70)
results = event_hub.publish(Event(topic='test'), synchronous=True)
assert results == ['A', 'B', 'C']
def test_publish_with_reply(event_hub):
'''Publish asynchronous event with on reply handler.'''
def replier(event):
'''Replier.'''
return 'Replied'
event_hub.subscribe('topic=test', replier)
called = {'callback': None}
def on_reply(event):
called['callback'] = event['data']
event_hub.publish(Event(topic='test'), on_reply=on_reply)
event_hub.wait(2)
assert called['callback'] == 'Replied'
def test_publish_with_multiple_replies(event_hub):
'''Publish asynchronous event and retrieve multiple replies.'''
def replier_one(event):
'''Replier.'''
return 'One'
def replier_two(event):
'''Replier.'''
return 'Two'
event_hub.subscribe('topic=test', replier_one)
event_hub.subscribe('topic=test', replier_two)
called = {'callback': []}
def on_reply(event):
called['callback'].append(event['data'])
event_hub.publish(Event(topic='test'), on_reply=on_reply)
event_hub.wait(2)
assert sorted(called['callback']) == ['One', 'Two']
@pytest.mark.slow
def test_server_heartbeat_response():
'''Maintain connection by responding to server heartbeat request.'''
test_script = os.path.join(
os.path.dirname(__file__), 'event_hub_server_heartbeat.py'
)
# Start subscriber that will listen for all three messages.
subscriber = subprocess.Popen([sys.executable, test_script, 'subscribe'])
# Give subscriber time to connect to server.
time.sleep(10)
# Start publisher to publish three messages.
publisher = subprocess.Popen([sys.executable, test_script, 'publish'])
publisher.wait()
subscriber.wait()
assert subscriber.returncode == 0
def test_stop_event(event_hub):
'''Stop processing of subsequent local handlers when stop flag set.'''
called = {
'a': False,
'b': False,
'c': False
}
def callback_a(event):
called['a'] = True
def callback_b(event):
called['b'] = True
event.stop()
def callback_c(event):
called['c'] = True
event_hub.subscribe('topic=test', callback_a, priority=50)
event_hub.subscribe('topic=test', callback_b, priority=60)
event_hub.subscribe('topic=test', callback_c, priority=70)
event_hub.publish(Event(topic='test'))
event_hub.wait(2)
assert called == {
'a': True,
'b': True,
'c': False
}
def test_encode(session):
'''Encode event data.'''
encoded = session.event_hub._encode(
dict(name='ftrack.event', args=[Event('test')])
)
assert 'inReplyToEvent' in encoded
assert 'in_reply_to_event' not in encoded
def test_decode(session):
'''Decode event data.'''
decoded = session.event_hub._decode(
json.dumps({
'inReplyToEvent': 'id'
})
)
assert 'in_reply_to_event' in decoded
assert 'inReplyToEvent' not in decoded
```
#### File: unit/structure/test_id.py
```python
import inspect
import pytest
import ftrack_api
import ftrack_api.structure.id
@pytest.fixture(scope='session')
def structure():
'''Return structure.'''
return ftrack_api.structure.id.IdStructure(prefix='path')
# Note: When it is possible to use indirect=True on just a few arguments, the
# called functions here can change to standard fixtures.
# https://github.com/pytest-dev/pytest/issues/579
def file_component(container=None):
'''Return file component.'''
session = ftrack_api.Session()
entity = session.create('FileComponent', {
'id': 'f6cd40cb-d1c0-469f-a2d5-10369be8a724',
'name': '0001',
'file_type': '.png',
'container': container
})
return entity
def sequence_component(padding=0):
'''Return sequence component with *padding*.'''
session = ftrack_api.Session()
entity = session.create('SequenceComponent', {
'id': 'ff17edad-2129-483b-8b59-d1a654c8497b',
'name': 'sequence_component',
'file_type': '.png',
'padding': padding
})
return entity
def container_component():
'''Return container component.'''
session = ftrack_api.Session()
entity = session.create('ContainerComponent', {
'id': '03ab9967-f86c-4b55-8252-cd187d0c244a',
'name': 'container_component'
})
return entity
def unsupported_entity():
'''Return an unsupported entity.'''
session = ftrack_api.Session()
entity = session.create('User', {
'username': 'martin'
})
return entity
@pytest.mark.parametrize('entity, context, expected', [
(
file_component(), {},
'path/f/6/c/d/40cb-d1c0-469f-a2d5-10369be8a724.png'
),
(
file_component(container_component()), {},
'path/0/3/a/b/9967-f86c-4b55-8252-cd187d0c244a/'
'f6cd40cb-d1c0-469f-a2d5-10369be8a724.png'
),
(
file_component(sequence_component()), {},
'path/f/f/1/7/edad-2129-483b-8b59-d1a654c8497b/file.0001.png'
),
(
sequence_component(padding=0), {},
'path/f/f/1/7/edad-2129-483b-8b59-d1a654c8497b/file.%d.png'
),
(
sequence_component(padding=4), {},
'path/f/f/1/7/edad-2129-483b-8b59-d1a654c8497b/file.%04d.png'
),
(
container_component(), {},
'path/0/3/a/b/9967-f86c-4b55-8252-cd187d0c244a'
),
(unsupported_entity(), {}, NotImplementedError)
], ids=[
'file-component',
'file-component-in-container',
'file-component-in-sequence',
'unpadded-sequence-component',
'padded-sequence-component',
'container-component',
'unsupported-entity'
])
def test_get_resource_identifier(structure, entity, context, expected):
'''Get resource identifier.'''
if inspect.isclass(expected) and issubclass(expected, Exception):
with pytest.raises(expected):
structure.get_resource_identifier(entity, context)
else:
assert structure.get_resource_identifier(entity, context) == expected
```
#### File: test/unit/test_cache.py
```python
import os
import uuid
import tempfile
import pytest
import ftrack_api.cache
@pytest.fixture(params=['proxy', 'layered', 'memory', 'file', 'serialised'])
def cache(request):
'''Return cache.'''
if request.param == 'proxy':
cache = ftrack_api.cache.ProxyCache(
ftrack_api.cache.MemoryCache()
)
elif request.param == 'layered':
cache = ftrack_api.cache.LayeredCache(
[ftrack_api.cache.MemoryCache()]
)
elif request.param == 'memory':
cache = ftrack_api.cache.MemoryCache()
elif request.param == 'file':
cache_path = os.path.join(
tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex)
)
cache = ftrack_api.cache.FileCache(cache_path)
def cleanup():
'''Cleanup.'''
try:
os.remove(cache_path)
except OSError:
# BSD DB (Mac OSX) implementation of the interface will append
# a .db extension.
os.remove(cache_path + '.db')
request.addfinalizer(cleanup)
elif request.param == 'serialised':
cache = ftrack_api.cache.SerialisedCache(
ftrack_api.cache.MemoryCache(),
encode=lambda value: value,
decode=lambda value: value
)
else:
raise ValueError(
'Unrecognised cache fixture type {0!r}'.format(request.param)
)
return cache
class Class(object):
'''Class for testing.'''
def method(self, key):
'''Method for testing.'''
def function(mutable, x, y=2):
'''Function for testing.'''
mutable['called'] = True
return {'result': x + y}
def assert_memoised_call(
memoiser, function, expected, args=None, kw=None, memoised=True
):
'''Assert *function* call via *memoiser* was *memoised*.'''
mapping = {'called': False}
if args is not None:
args = (mapping,) + args
else:
args = (mapping,)
result = memoiser.call(function, args, kw)
assert result == expected
assert mapping['called'] is not memoised
def test_get(cache):
'''Retrieve item from cache.'''
cache.set('key', 'value')
assert cache.get('key') == 'value'
def test_get_missing_key(cache):
'''Fail to retrieve missing item from cache.'''
with pytest.raises(KeyError):
cache.get('key')
def test_set(cache):
'''Set item in cache.'''
with pytest.raises(KeyError):
cache.get('key')
cache.set('key', 'value')
assert cache.get('key') == 'value'
def test_remove(cache):
'''Remove item from cache.'''
cache.set('key', 'value')
cache.remove('key')
with pytest.raises(KeyError):
cache.get('key')
def test_remove_missing_key(cache):
'''Fail to remove missing key.'''
with pytest.raises(KeyError):
cache.remove('key')
def test_keys(cache):
'''Retrieve keys of items in cache.'''
assert cache.keys() == []
cache.set('a', 'a_value')
cache.set('b', 'b_value')
cache.set('c', 'c_value')
assert sorted(cache.keys()) == sorted(['a', 'b', 'c'])
def test_clear(cache):
'''Remove items from cache.'''
cache.set('a', 'a_value')
cache.set('b', 'b_value')
cache.set('c', 'c_value')
assert cache.keys()
cache.clear()
assert not cache.keys()
def test_clear_using_pattern(cache):
'''Remove items that match pattern from cache.'''
cache.set('matching_key', 'value')
cache.set('another_matching_key', 'value')
cache.set('key_not_matching', 'value')
assert cache.keys()
cache.clear(pattern='.*matching_key$')
assert cache.keys() == ['key_not_matching']
def test_clear_encountering_missing_key(cache, mocker):
'''Clear missing key.'''
# Force reporting keys that are not actually valid for test purposes.
mocker.patch.object(cache, 'keys', lambda: ['missing'])
assert cache.keys() == ['missing']
# Should not error even though key not valid.
cache.clear()
# The key was not successfully removed so should still be present.
assert cache.keys() == ['missing']
def test_layered_cache_propagates_value_on_get():
'''Layered cache propagates value on get.'''
caches = [
ftrack_api.cache.MemoryCache(),
ftrack_api.cache.MemoryCache(),
ftrack_api.cache.MemoryCache()
]
cache = ftrack_api.cache.LayeredCache(caches)
# Set item on second level cache only.
caches[1].set('key', 'value')
# Retrieving key via layered cache should propagate it automatically to
# higher level caches only.
assert cache.get('key') == 'value'
assert caches[0].get('key') == 'value'
with pytest.raises(KeyError):
caches[2].get('key')
def test_layered_cache_remove_at_depth():
'''Remove key that only exists at depth in LayeredCache.'''
caches = [
ftrack_api.cache.MemoryCache(),
ftrack_api.cache.MemoryCache()
]
cache = ftrack_api.cache.LayeredCache(caches)
# Set item on second level cache only.
caches[1].set('key', 'value')
# Removing key that only exists at depth should not raise key error.
cache.remove('key')
# Ensure key was removed.
assert not cache.keys()
def test_expand_references():
'''Test that references are expanded from serialized cache.'''
cache_path = os.path.join(
tempfile.gettempdir(), '{0}.dbm'.format(uuid.uuid4().hex)
)
def make_cache(session, cache_path):
'''Create a serialised file cache.'''
serialized_file_cache = ftrack_api.cache.SerialisedCache(
ftrack_api.cache.FileCache(cache_path),
encode=session.encode,
decode=session.decode
)
return serialized_file_cache
# Populate the serialized file cache.
session = ftrack_api.Session(
cache=lambda session, cache_path=cache_path:make_cache(
session, cache_path
)
)
expanded_results = dict()
query_string = 'select asset.parent from AssetVersion where asset is_not None limit 10'
for sequence in session.query(query_string):
asset = sequence.get('asset')
expanded_results.setdefault(
asset.get('id'), asset.get('parent')
)
# Fetch the data from cache.
new_session = ftrack_api.Session(
cache=lambda session, cache_path=cache_path:make_cache(
session, cache_path
)
)
new_session_two = ftrack_api.Session(
cache=lambda session, cache_path=cache_path:make_cache(
session, cache_path
)
)
# Make sure references are merged.
for sequence in new_session.query(query_string):
asset = sequence.get('asset')
assert (
asset.get('parent') == expanded_results[asset.get('id')]
)
# Use for fetching directly using get.
assert (
new_session_two.get(asset.entity_type, asset.get('id')).get('parent') ==
expanded_results[asset.get('id')]
)
@pytest.mark.parametrize('items, key', [
(({},), '{}'),
(({}, {}), '{}{}')
], ids=[
'single object',
'multiple objects'
])
def test_string_key_maker_key(items, key):
'''Generate key using string key maker.'''
key_maker = ftrack_api.cache.StringKeyMaker()
assert key_maker.key(*items) == key
@pytest.mark.parametrize('items, key', [
(
({},),
'\x01\x01'
),
(
({'a': 'b'}, [1, 2]),
'\x01'
'\x80\x02U\x01a.' '\x02' '\x80\x02U\x01b.'
'\x01'
'\x00'
'\x03'
'\x80\x02K\x01.' '\x00' '\x80\x02K\x02.'
'\x03'
),
(
(function,),
'\x04function\x00unit.test_cache'
),
(
(Class,),
'\x04Class\x00unit.test_cache'
),
(
(Class.method,),
'\x04method\x00Class\x00unit.test_cache'
),
(
(callable,),
'\x04callable'
)
], ids=[
'single mapping',
'multiple objects',
'function',
'class',
'method',
'builtin'
])
def test_object_key_maker_key(items, key):
'''Generate key using string key maker.'''
key_maker = ftrack_api.cache.ObjectKeyMaker()
assert key_maker.key(*items) == key
def test_memoised_call():
'''Call memoised function.'''
memoiser = ftrack_api.cache.Memoiser()
# Initial call should not be memoised so function is executed.
assert_memoised_call(
memoiser, function, args=(1,), expected={'result': 3}, memoised=False
)
# Identical call should be memoised so function is not executed again.
assert_memoised_call(
memoiser, function, args=(1,), expected={'result': 3}, memoised=True
)
# Differing call is not memoised so function is executed.
assert_memoised_call(
memoiser, function, args=(3,), expected={'result': 5}, memoised=False
)
def test_memoised_call_variations():
'''Call memoised function with identical arguments using variable format.'''
memoiser = ftrack_api.cache.Memoiser()
expected = {'result': 3}
# Call function once to ensure is memoised.
assert_memoised_call(
memoiser, function, args=(1,), expected=expected, memoised=False
)
# Each of the following calls should equate to the same key and make
# use of the memoised value.
for args, kw in [
((), {'x': 1}),
((), {'x': 1, 'y': 2}),
((1,), {'y': 2}),
((1,), {})
]:
assert_memoised_call(
memoiser, function, args=args, kw=kw, expected=expected
)
# The following calls should all be treated as new variations and so
# not use any memoised value.
assert_memoised_call(
memoiser, function, kw={'x': 2}, expected={'result': 4}, memoised=False
)
assert_memoised_call(
memoiser, function, kw={'x': 3, 'y': 2}, expected={'result': 5},
memoised=False
)
assert_memoised_call(
memoiser, function, args=(4, ), kw={'y': 2}, expected={'result': 6},
memoised=False
)
assert_memoised_call(
memoiser, function, args=(5, ), expected={'result': 7}, memoised=False
)
def test_memoised_mutable_return_value():
'''Avoid side effects for returned mutable arguments when memoising.'''
memoiser = ftrack_api.cache.Memoiser()
arguments = ({'called': False}, 1)
result_a = memoiser.call(function, arguments)
assert result_a == {'result': 3}
assert arguments[0]['called']
# Modify mutable externally and check that stored memoised value is
# unchanged.
del result_a['result']
arguments[0]['called'] = False
result_b = memoiser.call(function, arguments)
assert result_b == {'result': 3}
assert not arguments[0]['called']
```
#### File: test/unit/test_collection.py
```python
import copy
import uuid
import mock
import pytest
import ftrack_api.collection
import ftrack_api.symbol
import ftrack_api.inspection
import ftrack_api.exception
import ftrack_api.operation
def create_mock_entity(session):
'''Return new mock entity for *session*.'''
entity = mock.MagicMock()
entity.session = session
entity.primary_key_attributes = ['id']
entity['id'] = str(uuid.uuid4())
return entity
@pytest.fixture
def mock_entity(session):
'''Return mock entity.'''
return create_mock_entity(session)
@pytest.fixture
def mock_entities(session):
'''Return list of two mock entities.'''
return [
create_mock_entity(session),
create_mock_entity(session)
]
@pytest.fixture
def mock_attribute():
'''Return mock attribute.'''
attribute = mock.MagicMock()
attribute.name = 'test'
return attribute
def test_collection_initialisation_does_not_modify_entity_state(
mock_entity, mock_attribute, mock_entities
):
'''Initialising collection does not modify entity state.'''
ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
assert ftrack_api.inspection.state(mock_entity) is ftrack_api.symbol.NOT_SET
def test_immutable_collection_initialisation(
mock_entity, mock_attribute, mock_entities
):
'''Initialise immutable collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities, mutable=False
)
assert list(collection) == mock_entities
assert collection.mutable is False
def test_collection_shallow_copy(
mock_entity, mock_attribute, mock_entities, session
):
'''Shallow copying collection should avoid indirect mutation.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
with mock_entity.session.operation_recording(False):
collection_copy = copy.copy(collection)
new_entity = create_mock_entity(session)
collection_copy.append(new_entity)
assert list(collection) == mock_entities
assert list(collection_copy) == mock_entities + [new_entity]
def test_collection_insert(
mock_entity, mock_attribute, mock_entities, session
):
'''Insert a value into collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
new_entity = create_mock_entity(session)
collection.insert(0, new_entity)
assert list(collection) == [new_entity] + mock_entities
def test_collection_insert_duplicate(
mock_entity, mock_attribute, mock_entities
):
'''Fail to insert a duplicate value into collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
with pytest.raises(ftrack_api.exception.DuplicateItemInCollectionError):
collection.insert(0, mock_entities[1])
def test_immutable_collection_insert(
mock_entity, mock_attribute, mock_entities, session
):
'''Fail to insert a value into immutable collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities, mutable=False
)
with pytest.raises(ftrack_api.exception.ImmutableCollectionError):
collection.insert(0, create_mock_entity(session))
def test_collection_set_item(
mock_entity, mock_attribute, mock_entities, session
):
'''Set item at index in collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
new_entity = create_mock_entity(session)
collection[0] = new_entity
assert list(collection) == [new_entity, mock_entities[1]]
def test_collection_re_set_item(
mock_entity, mock_attribute, mock_entities
):
'''Re-set value at exact same index in collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
collection[0] = mock_entities[0]
assert list(collection) == mock_entities
def test_collection_set_duplicate_item(
mock_entity, mock_attribute, mock_entities
):
'''Fail to set a duplicate value into collection at different index.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
with pytest.raises(ftrack_api.exception.DuplicateItemInCollectionError):
collection[0] = mock_entities[1]
def test_immutable_collection_set_item(
mock_entity, mock_attribute, mock_entities
):
'''Fail to set item at index in immutable collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities, mutable=False
)
with pytest.raises(ftrack_api.exception.ImmutableCollectionError):
collection[0] = mock_entities[0]
def test_collection_delete_item(
mock_entity, mock_attribute, mock_entities
):
'''Remove item at index from collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
del collection[0]
assert list(collection) == [mock_entities[1]]
def test_collection_delete_item_at_invalid_index(
mock_entity, mock_attribute, mock_entities
):
'''Fail to remove item at missing index from immutable collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
with pytest.raises(IndexError):
del collection[4]
def test_immutable_collection_delete_item(
mock_entity, mock_attribute, mock_entities
):
'''Fail to remove item at index from immutable collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities, mutable=False
)
with pytest.raises(ftrack_api.exception.ImmutableCollectionError):
del collection[0]
def test_collection_count(
mock_entity, mock_attribute, mock_entities, session
):
'''Count items in collection.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
assert len(collection) == 2
collection.append(create_mock_entity(session))
assert len(collection) == 3
del collection[0]
assert len(collection) == 2
@pytest.mark.parametrize('other, expected', [
([], False),
([1, 2], True),
([1, 2, 3], False),
([1], False)
], ids=[
'empty',
'same',
'additional',
'missing'
])
def test_collection_equal(mocker, mock_entity, mock_attribute, other, expected):
'''Determine collection equality against another collection.'''
# Temporarily override determination of entity identity so that it works
# against simple scalar values for purpose of test.
mocker.patch.object(
ftrack_api.inspection, 'identity', lambda entity: str(entity)
)
collection_a = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=[1, 2]
)
collection_b = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=other
)
assert (collection_a == collection_b) is expected
def test_collection_not_equal_to_non_collection(
mocker, mock_entity, mock_attribute
):
'''Collection not equal to a non-collection.'''
# Temporarily override determination of entity identity so that it works
# against simple scalar values for purpose of test.
mocker.patch.object(
ftrack_api.inspection, 'identity', lambda entity: str(entity)
)
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=[1, 2]
)
assert (collection != {}) is True
def test_collection_notify_on_modification(
mock_entity, mock_attribute, mock_entities, session
):
'''Record UpdateEntityOperation on collection modification.'''
collection = ftrack_api.collection.Collection(
mock_entity, mock_attribute, data=mock_entities
)
assert len(session.recorded_operations) == 0
collection.append(create_mock_entity(session))
assert len(session.recorded_operations) == 1
operation = session.recorded_operations.pop()
assert isinstance(operation, ftrack_api.operation.UpdateEntityOperation)
assert operation.new_value == collection
def test_mapped_collection_proxy_shallow_copy(new_project, unique_name):
'''Shallow copying mapped collection proxy avoids indirect mutation.'''
metadata = new_project['metadata']
with new_project.session.operation_recording(False):
metadata_copy = copy.copy(metadata)
metadata_copy[unique_name] = True
assert unique_name not in metadata
assert unique_name in metadata_copy
def test_mapped_collection_proxy_mutable_property(new_project):
'''Mapped collection mutable property maps to underlying collection.'''
metadata = new_project['metadata']
assert metadata.mutable is True
assert metadata.collection.mutable is True
metadata.mutable = False
assert metadata.collection.mutable is False
def test_mapped_collection_proxy_attribute_property(
new_project, mock_attribute
):
'''Mapped collection attribute property maps to underlying collection.'''
metadata = new_project['metadata']
assert metadata.attribute is metadata.collection.attribute
metadata.attribute = mock_attribute
assert metadata.collection.attribute is mock_attribute
def test_mapped_collection_proxy_get_item(new_project, unique_name):
'''Retrieve item in mapped collection proxy.'''
session = new_project.session
# Prepare data.
metadata = new_project['metadata']
value = 'value'
metadata[unique_name] = value
session.commit()
# Check in clean session retrieval of value.
session.reset()
retrieved = session.get(*ftrack_api.inspection.identity(new_project))
assert retrieved is not new_project
assert retrieved['metadata'].keys() == [unique_name]
assert retrieved['metadata'][unique_name] == value
def test_mapped_collection_proxy_set_item(new_project, unique_name):
'''Set new item in mapped collection proxy.'''
session = new_project.session
metadata = new_project['metadata']
assert unique_name not in metadata
value = 'value'
metadata[unique_name] = value
assert metadata[unique_name] == value
# Check change persisted correctly.
session.commit()
session.reset()
retrieved = session.get(*ftrack_api.inspection.identity(new_project))
assert retrieved is not new_project
assert retrieved['metadata'].keys() == [unique_name]
assert retrieved['metadata'][unique_name] == value
def test_mapped_collection_proxy_update_item(new_project, unique_name):
'''Update existing item in mapped collection proxy.'''
session = new_project.session
# Prepare a pre-existing value.
metadata = new_project['metadata']
value = 'value'
metadata[unique_name] = value
session.commit()
# Set new value.
new_value = 'new_value'
metadata[unique_name] = new_value
# Confirm change persisted correctly.
session.commit()
session.reset()
retrieved = session.get(*ftrack_api.inspection.identity(new_project))
assert retrieved is not new_project
assert retrieved['metadata'].keys() == [unique_name]
assert retrieved['metadata'][unique_name] == new_value
def test_mapped_collection_proxy_delete_item(new_project, unique_name):
'''Remove existing item from mapped collection proxy.'''
session = new_project.session
# Prepare a pre-existing value to remove.
metadata = new_project['metadata']
value = 'value'
metadata[unique_name] = value
session.commit()
# Now remove value.
del new_project['metadata'][unique_name]
assert unique_name not in new_project['metadata']
# Confirm change persisted correctly.
session.commit()
session.reset()
retrieved = session.get(*ftrack_api.inspection.identity(new_project))
assert retrieved is not new_project
assert retrieved['metadata'].keys() == []
assert unique_name not in retrieved['metadata']
def test_mapped_collection_proxy_delete_missing_item(new_project, unique_name):
'''Fail to remove item for missing key from mapped collection proxy.'''
metadata = new_project['metadata']
assert unique_name not in metadata
with pytest.raises(KeyError):
del metadata[unique_name]
def test_mapped_collection_proxy_iterate_keys(new_project, unique_name):
'''Iterate over keys in mapped collection proxy.'''
metadata = new_project['metadata']
metadata.update({
'a': 'value-a',
'b': 'value-b',
'c': 'value-c'
})
# Commit here as otherwise cleanup operation will fail because transaction
# will include updating metadata to refer to a deleted entity.
new_project.session.commit()
iterated = set()
for key in metadata:
iterated.add(key)
assert iterated == set(['a', 'b', 'c'])
def test_mapped_collection_proxy_count(new_project, unique_name):
'''Count items in mapped collection proxy.'''
metadata = new_project['metadata']
metadata.update({
'a': 'value-a',
'b': 'value-b',
'c': 'value-c'
})
# Commit here as otherwise cleanup operation will fail because transaction
# will include updating metadata to refer to a deleted entity.
new_project.session.commit()
assert len(metadata) == 3
def test_mapped_collection_on_create(session, unique_name, project):
'''Test that it is possible to set relational attributes on create'''
metadata = {
'a': 'value-a',
'b': 'value-b',
'c': 'value-c'
}
task_id = session.create(
'Task', {
'name': unique_name,
'parent': project,
'metadata': metadata,
}
).get('id')
session.commit()
# Reset the session and check that we have the expected
# values.
session.reset()
task = session.get(
'Task', task_id
)
for key, value in metadata.items():
assert value == task['metadata'][key]
def test_collection_refresh(new_asset_version, new_component):
'''Test collection reload.'''
session_two = ftrack_api.Session(auto_connect_event_hub=False)
query_string = 'select components from AssetVersion where id is "{0}"'.format(
new_asset_version.get('id')
)
# Fetch the new asset version in a new session.
new_asset_version_two = session_two.query(
query_string
).one()
# Modify our asset version
new_asset_version.get('components').append(
new_component
)
new_asset_version.session.commit()
# Query the same asset version again and make sure we get the newly
# populated data.
session_two.query(
query_string
).all()
assert (
new_asset_version.get('components') == new_asset_version_two.get('components')
)
# Make a local change to our asset version
new_asset_version_two.get('components').pop()
# Query the same asset version again and make sure our local changes
# are not overwritten.
session_two.query(
query_string
).all()
assert len(new_asset_version_two.get('components')) == 0
def test_mapped_collection_reload(new_asset_version):
'''Test mapped collection reload.'''
session_two = ftrack_api.Session(auto_connect_event_hub=False)
query_string = 'select metadata from AssetVersion where id is "{0}"'.format(
new_asset_version.get('id')
)
# Fetch the new asset version in a new session.
new_asset_version_two = session_two.query(
query_string
).one()
# Modify our asset version
new_asset_version['metadata']['test'] = str(uuid.uuid4())
new_asset_version.session.commit()
# Query the same asset version again and make sure we get the newly
# populated data.
session_two.query(
query_string
).all()
assert (
new_asset_version['metadata']['test'] == new_asset_version_two['metadata']['test']
)
local_data = str(uuid.uuid4())
new_asset_version_two['metadata']['test'] = local_data
# Modify our asset version again
new_asset_version['metadata']['test'] = str(uuid.uuid4())
new_asset_version.session.commit()
# Query the same asset version again and make sure our local changes
# are not overwritten.
session_two.query(
query_string
).all()
assert (
new_asset_version_two['metadata']['test'] == local_data
)
```
#### File: test/unit/test_data.py
```python
import os
import tempfile
import pytest
import ftrack_api.data
@pytest.fixture()
def content():
'''Return initial content.'''
return 'test data'
@pytest.fixture(params=['file', 'file_wrapper', 'string'])
def data(request, content):
'''Return cache.'''
if request.param == 'string':
data_object = ftrack_api.data.String(content)
elif request.param == 'file':
file_handle, path = tempfile.mkstemp()
file_object = os.fdopen(file_handle, 'r+')
file_object.write(content)
file_object.flush()
file_object.close()
data_object = ftrack_api.data.File(path, 'r+')
def cleanup():
'''Cleanup.'''
data_object.close()
os.remove(path)
request.addfinalizer(cleanup)
elif request.param == 'file_wrapper':
file_handle, path = tempfile.mkstemp()
file_object = os.fdopen(file_handle, 'r+')
file_object.write(content)
file_object.seek(0)
data_object = ftrack_api.data.FileWrapper(file_object)
def cleanup():
'''Cleanup.'''
data_object.close()
os.remove(path)
request.addfinalizer(cleanup)
else:
raise ValueError('Unrecognised parameter: {0}'.format(request.param))
return data_object
def test_read(data, content):
'''Return content from current position up to *limit*.'''
assert data.read(5) == content[:5]
assert data.read() == content[5:]
def test_write(data, content):
'''Write content at current position.'''
assert data.read() == content
data.write('more test data')
data.seek(0)
assert data.read() == content + 'more test data'
def test_flush(data):
'''Flush buffers ensuring data written.'''
# TODO: Implement better test than just calling function.
data.flush()
def test_seek(data, content):
'''Move internal pointer to *position*.'''
data.seek(5)
assert data.read() == content[5:]
def test_tell(data):
'''Return current position of internal pointer.'''
assert data.tell() == 0
data.seek(5)
assert data.tell() == 5
def test_close(data):
'''Flush buffers and prevent further access.'''
data.close()
with pytest.raises(ValueError) as error:
data.read()
assert 'I/O operation on closed file' in str(error.value)
class Dummy(ftrack_api.data.Data):
'''Dummy string.'''
def read(self, limit=None):
'''Return content from current position up to *limit*.'''
def write(self, content):
'''Write content at current position.'''
def test_unsupported_tell():
'''Fail when tell unsupported.'''
data = Dummy()
with pytest.raises(NotImplementedError) as error:
data.tell()
assert 'Tell not supported' in str(error.value)
def test_unsupported_seek():
'''Fail when seek unsupported.'''
data = Dummy()
with pytest.raises(NotImplementedError) as error:
data.seek(5)
assert 'Seek not supported' in str(error.value)
```
#### File: test/unit/test_operation.py
```python
import ftrack_api.operation
def test_operations_initialise():
'''Initialise empty operations stack.'''
operations = ftrack_api.operation.Operations()
assert len(operations) == 0
def test_operations_push():
'''Push new operation onto stack.'''
operations = ftrack_api.operation.Operations()
assert len(operations) == 0
operation = ftrack_api.operation.Operation()
operations.push(operation)
assert list(operations)[-1] is operation
def test_operations_pop():
'''Pop and return operation from stack.'''
operations = ftrack_api.operation.Operations()
assert len(operations) == 0
operations.push(ftrack_api.operation.Operation())
operations.push(ftrack_api.operation.Operation())
operation = ftrack_api.operation.Operation()
operations.push(operation)
assert len(operations) == 3
popped = operations.pop()
assert popped is operation
assert len(operations) == 2
def test_operations_count():
'''Count operations in stack.'''
operations = ftrack_api.operation.Operations()
assert len(operations) == 0
operations.push(ftrack_api.operation.Operation())
assert len(operations) == 1
operations.pop()
assert len(operations) == 0
def test_operations_clear():
'''Clear operations stack.'''
operations = ftrack_api.operation.Operations()
operations.push(ftrack_api.operation.Operation())
operations.push(ftrack_api.operation.Operation())
operations.push(ftrack_api.operation.Operation())
assert len(operations) == 3
operations.clear()
assert len(operations) == 0
def test_operations_iter():
'''Iterate over operations stack.'''
operations = ftrack_api.operation.Operations()
operation_a = ftrack_api.operation.Operation()
operation_b = ftrack_api.operation.Operation()
operation_c = ftrack_api.operation.Operation()
operations.push(operation_a)
operations.push(operation_b)
operations.push(operation_c)
assert len(operations) == 3
for operation, expected in zip(
operations, [operation_a, operation_b, operation_c]
):
assert operation is expected
```
#### File: test/unit/test_query.py
```python
import math
import pytest
import ftrack_api
import ftrack_api.query
import ftrack_api.exception
def test_index(session):
'''Index into query result.'''
results = session.query('User')
assert isinstance(results[2], session.types['User'])
def test_len(session):
'''Return count of results using len.'''
results = session.query('User where username is jenkins')
assert len(results) == 1
def test_all(session):
'''Return all results using convenience method.'''
results = session.query('User').all()
assert isinstance(results, list)
assert len(results)
def test_implicit_iteration(session):
'''Implicitly iterate through query result.'''
results = session.query('User')
assert isinstance(results, ftrack_api.query.QueryResult)
records = []
for record in results:
records.append(record)
assert len(records) == len(results)
def test_one(session):
'''Return single result using convenience method.'''
user = session.query('User where username is jenkins').one()
assert user['username'] == 'jenkins'
def test_one_fails_for_no_results(session):
'''Fail to fetch single result when no results available.'''
with pytest.raises(ftrack_api.exception.NoResultFoundError):
session.query('User where username is does_not_exist').one()
def test_one_fails_for_multiple_results(session):
'''Fail to fetch single result when multiple results available.'''
with pytest.raises(ftrack_api.exception.MultipleResultsFoundError):
session.query('User').one()
def test_one_with_existing_limit(session):
'''Fail to return single result when existing limit in expression.'''
with pytest.raises(ValueError):
session.query('User where username is jenkins limit 0').one()
def test_one_with_existing_offset(session):
'''Fail to return single result when existing offset in expression.'''
with pytest.raises(ValueError):
session.query('User where username is jenkins offset 2').one()
def test_one_with_prefetched_data(session):
'''Return single result ignoring prefetched data.'''
query = session.query('User where username is jenkins')
query.all()
user = query.one()
assert user['username'] == 'jenkins'
def test_first(session):
'''Return first result using convenience method.'''
users = session.query('User').all()
user = session.query('User').first()
assert user == users[0]
def test_first_returns_none_when_no_results(session):
'''Return None when no results available.'''
user = session.query('User where username is does_not_exist').first()
assert user is None
def test_first_with_existing_limit(session):
'''Fail to return first result when existing limit in expression.'''
with pytest.raises(ValueError):
session.query('User where username is jenkins limit 0').first()
def test_first_with_existing_offset(session):
'''Return first result whilst respecting custom offset.'''
users = session.query('User').all()
user = session.query('User offset 2').first()
assert user == users[2]
def test_first_with_prefetched_data(session):
'''Return first result ignoring prefetched data.'''
query = session.query('User where username is jenkins')
query.all()
user = query.first()
assert user['username'] == 'jenkins'
def test_paging(session, mocker):
'''Page through results.'''
mocker.patch.object(session, 'call', wraps=session.call)
page_size = 5
query = session.query('User limit 50', page_size=page_size)
records = query.all()
assert session.call.call_count == (
math.ceil(len(records) / float(page_size))
)
def test_paging_respects_offset_and_limit(session, mocker):
'''Page through results respecting offset and limit.'''
users = session.query('User').all()
mocker.patch.object(session, 'call', wraps=session.call)
page_size = 6
query = session.query('User offset 2 limit 8', page_size=page_size)
records = query.all()
assert session.call.call_count == 2
assert len(records) == 8
assert records == users[2:10]
def test_paging_respects_limit_smaller_than_page_size(session, mocker):
'''Use initial limit when less than page size.'''
mocker.patch.object(session, 'call', wraps=session.call)
page_size = 100
query = session.query('User limit 10', page_size=page_size)
records = query.all()
assert session.call.call_count == 1
session.call.assert_called_once_with(
[{
'action': 'query',
'expression': 'select id from User offset 0 limit 10'
}]
)
assert len(records) == 10
```
#### File: job_queue/job_server/job_queue_route.py
```python
import json
from aiohttp.web_response import Response
class JobQueueResource:
def __init__(self, job_queue, server_manager):
self.server_manager = server_manager
self._prefix = "/api"
self._job_queue = job_queue
self.endpoint_defs = (
("POST", "/jobs", self.post_job),
("GET", "/jobs", self.get_jobs),
("GET", "/jobs/{job_id}", self.get_job)
)
self.register()
def register(self):
for methods, url, callback in self.endpoint_defs:
final_url = self._prefix + url
self.server_manager.add_route(
methods, final_url, callback
)
async def get_jobs(self, request):
jobs_data = []
for job in self._job_queue.get_jobs():
jobs_data.append(job.status())
return Response(status=200, body=self.encode(jobs_data))
async def post_job(self, request):
data = await request.json()
host_name = data.get("host_name")
if not host_name:
return Response(
status=400, message="Key \"host_name\" not filled."
)
job = self._job_queue.create_job(host_name, data)
return Response(status=201, text=job.id)
async def get_job(self, request):
job_id = request.match_info["job_id"]
content = self._job_queue.get_job_status(job_id)
if content is None:
content = {}
return Response(
status=200,
body=self.encode(content),
content_type="application/json"
)
@classmethod
def encode(cls, data):
return json.dumps(
data,
indent=4
).encode("utf-8")
```
#### File: job_queue/job_server/utils.py
```python
import sys
import signal
import time
import socket
from .server import WebServerManager
class SharedObjects:
stopped = False
@classmethod
def stop(cls):
cls.stopped = True
def main(port=None, host=None):
def signal_handler(sig, frame):
print("Signal to kill process received. Termination starts.")
SharedObjects.stop()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
port = int(port or 8079)
host = str(host or "localhost")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as con:
result_of_check = con.connect_ex((host, port))
if result_of_check == 0:
print((
"Server {}:{} is already running or address is occupied."
).format(host, port))
return 1
print("Running server {}:{}".format(host, port))
manager = WebServerManager(port, host)
manager.start_server()
stopped = False
while manager.is_running:
if not stopped and SharedObjects.stopped:
stopped = True
manager.stop_server()
time.sleep(0.1)
return 0
if __name__ == "__main__":
sys.exit(main())
```
#### File: job_queue/job_server/workers_rpc_route.py
```python
import asyncio
import aiohttp
from aiohttp_json_rpc import JsonRpc
from aiohttp_json_rpc.protocol import (
encode_error, decode_msg, JsonRpcMsgTyp
)
from aiohttp_json_rpc.exceptions import RpcError
from .workers import Worker
class WorkerRpc(JsonRpc):
def __init__(self, job_queue, manager, **kwargs):
super().__init__(**kwargs)
self._job_queue = job_queue
self._manager = manager
self._stopped = False
# Register methods
self.add_methods(
("", self.register_worker),
("", self.job_done)
)
asyncio.ensure_future(self._rpc_loop(), loop=self.loop)
self._manager.add_route(
"*", "/ws", self.handle_request
)
# Panel routes for tools
async def register_worker(self, request, host_name):
worker = Worker(host_name, request.http_request)
self._job_queue.add_worker(worker)
return worker.id
async def _rpc_loop(self):
while self.loop.is_running():
if self._stopped:
break
for worker in tuple(self._job_queue.workers()):
if not worker.connection_is_alive():
self._job_queue.remove_worker(worker)
self._job_queue.assign_jobs()
await self.send_jobs()
await asyncio.sleep(5)
async def job_done(self, worker_id, job_id, success, message, data):
worker = self._job_queue.get_worker(worker_id)
if worker is not None:
worker.set_current_job(None)
job = self._job_queue.get_job(job_id)
if job is not None:
job.set_done(success, message, data)
return True
async def send_jobs(self):
invalid_workers = []
for worker in self._job_queue.workers():
if worker.job_assigned() and not worker.is_working():
try:
await worker.send_job()
except ConnectionResetError:
invalid_workers.append(worker)
for worker in invalid_workers:
self._job_queue.remove_worker(worker)
async def handle_websocket_request(self, http_request):
"""Override this method to catch CLOSING messages."""
http_request.msg_id = 0
http_request.pending = {}
# prepare and register websocket
ws = aiohttp.web_ws.WebSocketResponse()
await ws.prepare(http_request)
http_request.ws = ws
self.clients.append(http_request)
while not ws.closed:
self.logger.debug('waiting for messages')
raw_msg = await ws.receive()
if raw_msg.type == aiohttp.WSMsgType.TEXT:
self.logger.debug('raw msg received: %s', raw_msg.data)
self.loop.create_task(
self._handle_rpc_msg(http_request, raw_msg)
)
elif raw_msg.type == aiohttp.WSMsgType.CLOSING:
break
self.clients.remove(http_request)
return ws
async def _handle_rpc_msg(self, http_request, raw_msg):
# This is duplicated code from super but there is no way how to do it
# to be able handle server->client requests
try:
_raw_message = raw_msg.data
msg = decode_msg(_raw_message)
except RpcError as error:
await self._ws_send_str(http_request, encode_error(error))
return
if msg.type in (JsonRpcMsgTyp.RESULT, JsonRpcMsgTyp.ERROR):
request_id = msg.data["id"]
if request_id in http_request.pending_requests:
future = http_request.pending_requests[request_id]
future.set_result(msg.data["result"])
return
return await super()._handle_rpc_msg(http_request, raw_msg)
async def stop(self):
self._stopped = True
for worker in tuple(self._job_queue.workers()):
await worker.close()
```
#### File: modules/royalrender/api.py
```python
import sys
import os
from openpype.settings import get_project_settings
from openpype.lib.local_settings import OpenPypeSettingsRegistry
from openpype.lib import PypeLogger, run_subprocess
from .rr_job import RRJob, SubmitFile, SubmitterParameter
log = PypeLogger.get_logger("RoyalRender")
class Api:
_settings = None
RR_SUBMIT_CONSOLE = 1
RR_SUBMIT_API = 2
def __init__(self, settings, project=None):
self._settings = settings
self._initialize_rr(project)
def _initialize_rr(self, project=None):
# type: (str) -> None
"""Initialize RR Path.
Args:
project (str, Optional): Project name to set RR api in
context.
"""
if project:
project_settings = get_project_settings(project)
rr_path = (
project_settings
["royalrender"]
["rr_paths"]
)
else:
rr_path = (
self._settings
["modules"]
["royalrender"]
["rr_path"]
["default"]
)
os.environ["RR_ROOT"] = rr_path
self._rr_path = rr_path
def _get_rr_bin_path(self, rr_root=None):
# type: (str) -> str
"""Get path to RR bin folder."""
rr_root = rr_root or self._rr_path
is_64bit_python = sys.maxsize > 2 ** 32
rr_bin_path = ""
if sys.platform.lower() == "win32":
rr_bin_path = "/bin/win64"
if not is_64bit_python:
# we are using 64bit python
rr_bin_path = "/bin/win"
rr_bin_path = rr_bin_path.replace(
"/", os.path.sep
)
if sys.platform.lower() == "darwin":
rr_bin_path = "/bin/mac64"
if not is_64bit_python:
rr_bin_path = "/bin/mac"
if sys.platform.lower() == "linux":
rr_bin_path = "/bin/lx64"
return os.path.join(rr_root, rr_bin_path)
def _initialize_module_path(self):
# type: () -> None
"""Set RR modules for Python."""
# default for linux
rr_bin = self._get_rr_bin_path()
rr_module_path = os.path.join(rr_bin, "lx64/lib")
if sys.platform.lower() == "win32":
rr_module_path = rr_bin
rr_module_path = rr_module_path.replace(
"/", os.path.sep
)
if sys.platform.lower() == "darwin":
rr_module_path = os.path.join(rr_bin, "lib/python/27")
sys.path.append(os.path.join(self._rr_path, rr_module_path))
def create_submission(self, jobs, submitter_attributes, file_name=None):
# type: (list[RRJob], list[SubmitterParameter], str) -> SubmitFile
"""Create jobs submission file.
Args:
jobs (list): List of :class:`RRJob`
submitter_attributes (list): List of submitter attributes
:class:`SubmitterParameter` for whole submission batch.
file_name (str), optional): File path to write data to.
Returns:
str: XML data of job submission files.
"""
raise NotImplementedError
def submit_file(self, file, mode=RR_SUBMIT_CONSOLE):
# type: (SubmitFile, int) -> None
if mode == self.RR_SUBMIT_CONSOLE:
self._submit_using_console(file)
# RR v7 supports only Python 2.7 so we bail out in fear
# until there is support for Python 3 😰
raise NotImplementedError(
"Submission via RoyalRender API is not supported yet")
# self._submit_using_api(file)
def _submit_using_console(self, file):
# type: (SubmitFile) -> bool
rr_console = os.path.join(
self._get_rr_bin_path(),
"rrSubmitterconsole"
)
if sys.platform.lower() == "darwin":
if "/bin/mac64" in rr_console:
rr_console = rr_console.replace("/bin/mac64", "/bin/mac")
if sys.platform.lower() == "win32":
if "/bin/win64" in rr_console:
rr_console = rr_console.replace("/bin/win64", "/bin/win")
rr_console += ".exe"
args = [rr_console, file]
run_subprocess(" ".join(args), logger=log)
def _submit_using_api(self, file):
# type: (SubmitFile) -> None
"""Use RR API to submit jobs.
Args:
file (SubmitFile): Submit jobs definition.
Throws:
RoyalRenderException: When something fails.
"""
self._initialize_module_path()
import libpyRR2 as rrLib # noqa
from rrJob import getClass_JobBasics # noqa
import libpyRR2 as _RenderAppBasic # noqa
tcp = rrLib._rrTCP("") # noqa
rr_server = tcp.getRRServer()
if len(rr_server) == 0:
log.info("Got RR IP address {}".format(rr_server))
# TODO: Port is hardcoded in RR? If not, move it to Settings
if not tcp.setServer(rr_server, 7773):
log.error(
"Can not set RR server: {}".format(tcp.errorMessage()))
raise RoyalRenderException(tcp.errorMessage())
# TODO: This need UI and better handling of username/password.
# We can't store password in keychain as it is pulled multiple
# times and users on linux must enter keychain password every time.
# Probably best way until we setup our own user management would be
# to encrypt password and save it to json locally. Not bulletproof
# but at least it is not stored in plaintext.
reg = OpenPypeSettingsRegistry()
try:
rr_user = reg.get_item("rr_username")
rr_password = reg.get_item("rr_password")
except ValueError:
# user has no rr credentials set
pass
else:
# login to RR
tcp.setLogin(rr_user, rr_password)
job = getClass_JobBasics()
renderer = _RenderAppBasic()
# iterate over SubmitFile, set _JobBasic (job) and renderer
# and feed it to jobSubmitNew()
# not implemented yet
job.renderer = renderer
tcp.jobSubmitNew(job)
class RoyalRenderException(Exception):
"""Exception used in various error states coming from RR."""
pass
```
#### File: sync_server/tray/app.py
```python
from Qt import QtWidgets, QtCore, QtGui
from openpype.tools.settings import style
from openpype.lib import PypeLogger
from openpype import resources
from .widgets import (
SyncProjectListWidget,
SyncRepresentationSummaryWidget
)
log = PypeLogger().get_logger("SyncServer")
class SyncServerWindow(QtWidgets.QDialog):
"""
Main window that contains list of synchronizable projects and summary
view with all synchronizable representations for first project
"""
def __init__(self, sync_server, parent=None):
super(SyncServerWindow, self).__init__(parent)
self.sync_server = sync_server
self.setWindowFlags(QtCore.Qt.Window)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setStyleSheet(style.load_stylesheet())
self.setWindowIcon(QtGui.QIcon(resources.get_openpype_icon_filepath()))
self.resize(1450, 700)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self._hide_message)
body = QtWidgets.QWidget(self)
footer = QtWidgets.QWidget(self)
footer.setFixedHeight(20)
left_column = QtWidgets.QWidget(body)
left_column_layout = QtWidgets.QVBoxLayout(left_column)
self.projects = SyncProjectListWidget(sync_server, self)
self.projects.refresh() # force selection of default
left_column_layout.addWidget(self.projects)
self.pause_btn = QtWidgets.QPushButton("Pause server")
left_column_layout.addWidget(self.pause_btn)
repres = SyncRepresentationSummaryWidget(
sync_server,
project=self.projects.current_project,
parent=self)
container = QtWidgets.QWidget()
container_layout = QtWidgets.QHBoxLayout(container)
container_layout.setContentsMargins(0, 0, 0, 0)
split = QtWidgets.QSplitter()
split.addWidget(left_column)
split.addWidget(repres)
split.setSizes([180, 950, 200])
container_layout.addWidget(split)
body_layout = QtWidgets.QHBoxLayout(body)
body_layout.addWidget(container)
body_layout.setContentsMargins(0, 0, 0, 0)
self.message = QtWidgets.QLabel(footer)
self.message.hide()
footer_layout = QtWidgets.QVBoxLayout(footer)
footer_layout.addWidget(self.message)
footer_layout.setContentsMargins(20, 0, 0, 0)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(body)
layout.addWidget(footer)
self.setWindowTitle("Sync Queue")
self.projects.project_changed.connect(
self._on_project_change
)
self.pause_btn.clicked.connect(self._pause)
self.pause_btn.setAutoDefault(False)
self.pause_btn.setDefault(False)
repres.message_generated.connect(self._update_message)
self.projects.message_generated.connect(self._update_message)
self.representationWidget = repres
def _on_project_change(self):
if self.projects.current_project is None:
return
self.representationWidget.table_view.model().set_project(
self.projects.current_project
)
project_name = self.projects.current_project
if not self.sync_server.get_sync_project_setting(project_name):
self.projects.message_generated.emit(
"Project {} not active anymore".format(project_name))
self.projects.refresh()
return
def showEvent(self, event):
self.representationWidget.model.set_project(
self.projects.current_project)
self.projects.refresh()
self._set_running(True)
super().showEvent(event)
def closeEvent(self, event):
self._set_running(False)
super().closeEvent(event)
def _set_running(self, running):
self.representationWidget.model.is_running = running
self.representationWidget.model.timer.setInterval(0)
def _pause(self):
if self.sync_server.is_paused():
self.sync_server.unpause_server()
self.pause_btn.setText("Pause server")
else:
self.sync_server.pause_server()
self.pause_btn.setText("Unpause server")
self.projects.refresh()
def _update_message(self, value):
"""
Update and show message in the footer
"""
self.message.setText(value)
if self.message.isVisible():
self.message.repaint()
else:
self.message.show()
msec_delay = 3000
self.timer.start(msec_delay)
def _hide_message(self):
"""
Hide message in footer
Called automatically by self.timer after a while
"""
self.message.setText("")
self.message.hide()
```
#### File: modules/webserver/base_routes.py
```python
import inspect
from aiohttp.http_exceptions import HttpBadRequest
from aiohttp.web_exceptions import HTTPMethodNotAllowed
from aiohttp.web_request import Request
DEFAULT_METHODS = ("GET", "POST", "PUT", "DELETE")
class RestApiEndpoint:
"""Helper endpoint class for single endpoint.
Class can define `get`, `post`, `put` or `delete` async methods for the
endpoint.
"""
def __init__(self):
methods = {}
for method_name in DEFAULT_METHODS:
method = getattr(self, method_name.lower(), None)
if method:
methods[method_name.upper()] = method
self.methods = methods
async def dispatch(self, request: Request):
method = self.methods.get(request.method.upper())
if not method:
raise HTTPMethodNotAllowed("", DEFAULT_METHODS)
wanted_args = list(inspect.signature(method).parameters.keys())
available_args = request.match_info.copy()
available_args["request"] = request
unsatisfied_args = set(wanted_args) - set(available_args.keys())
if unsatisfied_args:
# Expected match info that doesn't exist
raise HttpBadRequest("")
return await method(**{
arg_name: available_args[arg_name]
for arg_name in wanted_args
})
```
#### File: plugins/publish/cleanup_explicit.py
```python
import os
import shutil
import pyblish.api
class ExplicitCleanUp(pyblish.api.ContextPlugin):
"""Cleans up the files and folder defined to be deleted.
plugin is looking for 2 keys into context data:
- `cleanupFullPaths` - full paths that should be removed not matter if
is path to file or to directory
- `cleanupEmptyDirs` - full paths to directories that should be removed
only if do not contain any file in it but will be removed if contain
sub-folders
"""
order = pyblish.api.IntegratorOrder + 10
label = "Explicit Clean Up"
optional = True
active = True
def process(self, context):
cleanup_full_paths = context.data.get("cleanupFullPaths")
cleanup_empty_dirs = context.data.get("cleanupEmptyDirs")
self._remove_full_paths(cleanup_full_paths)
self._remove_empty_dirs(cleanup_empty_dirs)
def _remove_full_paths(self, full_paths):
"""Remove files and folders from disc.
Folders are removed with whole content.
"""
if not full_paths:
self.log.debug("No full paths to cleanup were collected.")
return
# Separate paths into files and directories
filepaths = set()
dirpaths = set()
for path in full_paths:
# Skip empty items
if not path:
continue
# Normalize path
normalized = os.path.normpath(path)
# Check if path exists
if not os.path.exists(normalized):
continue
if os.path.isfile(normalized):
filepaths.add(normalized)
else:
dirpaths.add(normalized)
# Store failed paths with exception
failed = []
# Store removed filepaths for logging
succeded_files = set()
# Remove file by file
for filepath in filepaths:
try:
os.remove(filepath)
succeded_files.add(filepath)
except Exception as exc:
failed.append((filepath, exc))
if succeded_files:
self.log.info(
"Removed files:\n{}".format("\n".join(succeded_files))
)
# Delete folders with it's content
succeded_dirs = set()
for dirpath in dirpaths:
# Check if directory still exists
# - it is possible that directory was already deleted with
# different dirpath to delete
if os.path.exists(dirpath):
try:
shutil.rmtree(dirpath)
succeded_dirs.add(dirpath)
except Exception:
failed.append(dirpath)
if succeded_dirs:
self.log.info(
"Removed direcoties:\n{}".format("\n".join(succeded_dirs))
)
# Prepare lines for report of failed removements
lines = []
for filepath, exc in failed:
lines.append("{}: {}".format(filepath, str(exc)))
if lines:
self.log.warning(
"Failed to remove filepaths:\n{}".format("\n".join(lines))
)
def _remove_empty_dirs(self, empty_dirpaths):
"""Remove directories if do not contain any files."""
if not empty_dirpaths:
self.log.debug("No empty dirs to cleanup were collected.")
return
# First filtering of directories and making sure those are
# existing directories
filtered_dirpaths = set()
for path in empty_dirpaths:
if (
path
and os.path.exists(path)
and os.path.isdir(path)
):
filtered_dirpaths.add(os.path.normpath(path))
to_delete_dirpaths = set()
to_skip_dirpaths = set()
# Check if contain any files (or it's subfolders contain files)
for dirpath in filtered_dirpaths:
valid = True
for _, _, filenames in os.walk(dirpath):
if filenames:
valid = False
break
if valid:
to_delete_dirpaths.add(dirpath)
else:
to_skip_dirpaths.add(dirpath)
if to_skip_dirpaths:
self.log.debug(
"Skipped directories because contain files:\n{}".format(
"\n".join(to_skip_dirpaths)
)
)
# Remove empty directies
for dirpath in to_delete_dirpaths:
if os.path.exists(dirpath):
shutil.rmtree(dirpath)
if to_delete_dirpaths:
self.log.debug(
"Deleted empty directories:\n{}".format(
"\n".join(to_delete_dirpaths)
)
)
```
#### File: plugins/publish/extract_jpeg_exr.py
```python
import os
import pyblish.api
from openpype.lib import (
get_ffmpeg_tool_path,
run_subprocess,
path_to_subprocess_arg,
get_transcode_temp_directory,
convert_for_ffmpeg,
should_convert_for_ffmpeg
)
import shutil
class ExtractJpegEXR(pyblish.api.InstancePlugin):
"""Create jpg thumbnail from sequence using ffmpeg"""
label = "Extract Jpeg EXR"
order = pyblish.api.ExtractorOrder
families = [
"imagesequence", "render", "render2d",
"source", "plate", "take"
]
hosts = ["shell", "fusion", "resolve"]
enabled = False
# presetable attribute
ffmpeg_args = None
def process(self, instance):
self.log.info("subset {}".format(instance.data['subset']))
# skip crypto passes.
# TODO: This is just a quick fix and has its own side-effects - it is
# affecting every subset name with `crypto` in its name.
# This must be solved properly, maybe using tags on
# representation that can be determined much earlier and
# with better precision.
if 'crypto' in instance.data['subset'].lower():
self.log.info("Skipping crypto passes.")
return
# Skip if review not set.
if not instance.data.get("review", True):
self.log.info("Skipping - no review set on instance.")
return
filtered_repres = self._get_filtered_repres(instance)
for repre in filtered_repres:
repre_files = repre["files"]
if not isinstance(repre_files, (list, tuple)):
input_file = repre_files
else:
file_index = int(float(len(repre_files)) * 0.5)
input_file = repre_files[file_index]
stagingdir = os.path.normpath(repre["stagingDir"])
full_input_path = os.path.join(stagingdir, input_file)
self.log.info("input {}".format(full_input_path))
do_convert = should_convert_for_ffmpeg(full_input_path)
# If result is None the requirement of conversion can't be
# determined
if do_convert is None:
self.log.info((
"Can't determine if representation requires conversion."
" Skipped."
))
continue
# Do conversion if needed
# - change staging dir of source representation
# - must be set back after output definitions processing
convert_dir = None
if do_convert:
convert_dir = get_transcode_temp_directory()
filename = os.path.basename(full_input_path)
convert_for_ffmpeg(
full_input_path,
convert_dir,
None,
None,
self.log
)
full_input_path = os.path.join(convert_dir, filename)
filename = os.path.splitext(input_file)[0]
if not filename.endswith('.'):
filename += "."
jpeg_file = filename + "jpg"
full_output_path = os.path.join(stagingdir, jpeg_file)
self.log.info("output {}".format(full_output_path))
ffmpeg_path = get_ffmpeg_tool_path("ffmpeg")
ffmpeg_args = self.ffmpeg_args or {}
jpeg_items = []
jpeg_items.append(path_to_subprocess_arg(ffmpeg_path))
# override file if already exists
jpeg_items.append("-y")
# use same input args like with mov
jpeg_items.extend(ffmpeg_args.get("input") or [])
# input file
jpeg_items.append("-i {}".format(
path_to_subprocess_arg(full_input_path)
))
# output arguments from presets
jpeg_items.extend(ffmpeg_args.get("output") or [])
# If its a movie file, we just want one frame.
if repre["ext"] == "mov":
jpeg_items.append("-vframes 1")
# output file
jpeg_items.append(path_to_subprocess_arg(full_output_path))
subprocess_command = " ".join(jpeg_items)
# run subprocess
self.log.debug("{}".format(subprocess_command))
try: # temporary until oiiotool is supported cross platform
run_subprocess(
subprocess_command, shell=True, logger=self.log
)
except RuntimeError as exp:
if "Compression" in str(exp):
self.log.debug(
"Unsupported compression on input files. Skipping!!!"
)
return
self.log.warning("Conversion crashed", exc_info=True)
raise
new_repre = {
"name": "thumbnail",
"ext": "jpg",
"files": jpeg_file,
"stagingDir": stagingdir,
"thumbnail": True,
"tags": ["thumbnail"]
}
# adding representation
self.log.debug("Adding: {}".format(new_repre))
instance.data["representations"].append(new_repre)
# Cleanup temp folder
if convert_dir is not None and os.path.exists(convert_dir):
shutil.rmtree(convert_dir)
def _get_filtered_repres(self, instance):
filtered_repres = []
src_repres = instance.data.get("representations") or []
for repre in src_repres:
self.log.debug(repre)
tags = repre.get("tags") or []
valid = "review" in tags or "thumb-nuke" in tags
if not valid:
continue
if not repre.get("files"):
self.log.info((
"Representation \"{}\" don't have files. Skipping"
).format(repre["name"]))
continue
filtered_repres.append(repre)
return filtered_repres
```
#### File: plugins/publish/validate_aseset_docs.py
```python
import pyblish.api
from openpype.pipeline import PublishValidationError
class ValidateContainers(pyblish.api.InstancePlugin):
"""Validate existence of asset asset documents on instances.
Without asset document it is not possible to publish the instance.
If context has set asset document the validation is skipped.
Plugin was added because there are cases when context asset is not defined
e.g. in tray publisher.
"""
label = "Validate Asset docs"
order = pyblish.api.ValidatorOrder
def process(self, instance):
context_asset_doc = instance.context.data.get("assetEntity")
if context_asset_doc:
return
if instance.data.get("assetEntity"):
self.log.info("Instance have set asset document in it's data.")
else:
raise PublishValidationError((
"Instance \"{}\" don't have set asset"
" document which is needed for publishing."
).format(instance.data["name"]))
```
#### File: plugins/publish/validate_filesequences.py
```python
import pyblish.api
class ValidateFileSequences(pyblish.api.ContextPlugin):
"""Validates whether any file sequences were collected."""
order = pyblish.api.ValidatorOrder
# Keep "filesequence" for backwards compatibility of older jobs
targets = ["filesequence", "farm"]
label = "Validate File Sequences"
def process(self, context):
assert context, "Nothing collected."
```
#### File: plugins/publish/validate_intent.py
```python
import os
import pyblish.api
from openpype.lib import filter_profiles
class ValidateIntent(pyblish.api.ContextPlugin):
"""Validate intent of the publish.
It is required to fill the intent of this publish. Chech the log
for more details
"""
order = pyblish.api.ValidatorOrder
label = "Validate Intent"
enabled = False
# Can be modified by settings
profiles = [{
"hosts": [],
"task_types": [],
"tasks": [],
"validate": False
}]
def process(self, context):
# Skip if there are no profiles
validate = True
if self.profiles:
# Collect data from context
task_name = context.data.get("task")
task_type = context.data.get("taskType")
host_name = context.data.get("hostName")
filter_data = {
"hosts": host_name,
"task_types": task_type,
"tasks": task_name
}
matching_profile = filter_profiles(
self.profiles, filter_data, logger=self.log
)
if matching_profile:
validate = matching_profile["validate"]
if not validate:
self.log.debug((
"Validation of intent was skipped."
" Matching profile for current context disabled validation."
))
return
msg = (
"Please make sure that you select the intent of this publish."
)
intent = context.data.get("intent") or {}
self.log.debug(str(intent))
intent_value = intent.get("value")
if not intent_value:
raise AssertionError(msg)
```
#### File: settings/entities/item_entities.py
```python
import re
import six
from .lib import (
NOT_SET,
STRING_TYPE,
OverrideState
)
from .exceptions import (
DefaultsNotDefined,
StudioDefaultsNotDefined,
EntitySchemaError
)
from .base_entity import ItemEntity
class PathEntity(ItemEntity):
schema_types = ["path"]
platforms = ("windows", "darwin", "linux")
platform_labels_mapping = {
"windows": "Windows",
"darwin": "MacOS",
"linux": "Linux"
}
path_item_type_error = "Got invalid path value type {}. Expected: {}"
attribute_error_msg = (
"'PathEntity' has no attribute '{}' if is not set as multiplatform"
)
def __setitem__(self, *args, **kwargs):
return self.child_obj.__setitem__(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
return self.child_obj.__getitem__(*args, **kwargs)
def __iter__(self):
return self.child_obj.__iter__()
def keys(self):
if not self.multiplatform:
raise AttributeError(self.attribute_error_msg.format("keys"))
return self.child_obj.keys()
def values(self):
if not self.multiplatform:
raise AttributeError(self.attribute_error_msg.format("values"))
return self.child_obj.values()
def items(self):
if not self.multiplatform:
raise AttributeError(self.attribute_error_msg.format("items"))
return self.child_obj.items()
def has_child_with_key(self, key):
return self.child_obj.has_child_with_key(key)
def _item_initialization(self):
if self.group_item is None and not self.is_group:
self.is_group = True
self.multiplatform = self.schema_data.get("multiplatform", False)
self.multipath = self.schema_data.get("multipath", False)
placeholder_text = self.schema_data.get("placeholder")
# Create child object
if not self.multiplatform and not self.multipath:
valid_value_types = (STRING_TYPE, )
item_schema = {
"type": "path-input",
"key": self.key,
"placeholder": placeholder_text
}
elif not self.multiplatform:
valid_value_types = (list, )
item_schema = {
"type": "list",
"key": self.key,
"object_type": {
"type": "path-input",
"placeholder": placeholder_text
}
}
else:
valid_value_types = (dict, )
item_schema = {
"type": "dict",
"key": self.key,
"show_borders": False,
"children": []
}
for platform_key in self.platforms:
platform_label = self.platform_labels_mapping[platform_key]
child_item = {
"key": platform_key,
"label": platform_label
}
if self.multipath:
child_item["type"] = "list"
child_item["object_type"] = {
"type": "path-input",
"placeholder": placeholder_text
}
else:
child_item["type"] = "path-input"
child_item["placeholder"] = placeholder_text
item_schema["children"].append(child_item)
self.valid_value_types = valid_value_types
self.child_obj = self.create_schema_object(item_schema, self)
def collect_static_entities_by_path(self):
return self.child_obj.collect_static_entities_by_path()
def get_child_path(self, _child_obj):
return self.path
def set(self, value):
self.child_obj.set(value)
def collect_dynamic_schema_entities(self, *args, **kwargs):
self.child_obj.collect_dynamic_schema_entities(*args, **kwargs)
def settings_value(self):
if self._override_state is OverrideState.NOT_DEFINED:
return NOT_SET
if self.is_group:
if self._override_state is OverrideState.STUDIO:
if not self.has_studio_override:
return NOT_SET
elif self._override_state is OverrideState.PROJECT:
if not self.has_project_override:
return NOT_SET
return self.child_obj.settings_value()
def on_change(self):
for callback in self.on_change_callbacks:
callback()
self.parent.on_child_change(self)
def on_child_change(self, _child_obj):
self.on_change()
@property
def has_unsaved_changes(self):
return self.child_obj.has_unsaved_changes
@property
def has_studio_override(self):
return self.child_obj.has_studio_override
@property
def has_project_override(self):
return self.child_obj.has_project_override
@property
def value(self):
return self.child_obj.value
def set_override_state(self, state, ignore_missing_defaults):
# Trigger override state change of root if is not same
if self.root_item.override_state is not state:
self.root_item.set_override_state(state)
return
self._override_state = state
self._ignore_missing_defaults = ignore_missing_defaults
self.child_obj.set_override_state(state, ignore_missing_defaults)
def update_default_value(self, value):
self.child_obj.update_default_value(value)
def update_project_value(self, value):
self.child_obj.update_project_value(value)
def update_studio_value(self, value):
self.child_obj.update_studio_value(value)
def _discard_changes(self, *args, **kwargs):
self.child_obj.discard_changes(*args, **kwargs)
def _add_to_studio_default(self, *args, **kwargs):
self.child_obj.add_to_studio_default(*args, **kwargs)
def _remove_from_studio_default(self, *args, **kwargs):
self.child_obj.remove_from_studio_default(*args, **kwargs)
def _add_to_project_override(self, *args, **kwargs):
self.child_obj.add_to_project_override(*args, **kwargs)
def _remove_from_project_override(self, *args, **kwargs):
self.child_obj.remove_from_project_override(*args, **kwargs)
def reset_callbacks(self):
super(PathEntity, self).reset_callbacks()
self.child_obj.reset_callbacks()
class ListStrictEntity(ItemEntity):
schema_types = ["list-strict"]
_key_regex = re.compile(r"[0-9]+")
def __getitem__(self, idx):
if not isinstance(idx, int):
idx = int(idx)
return self.children[idx]
def __setitem__(self, idx, value):
if not isinstance(idx, int):
idx = int(idx)
self.children[idx].set(value)
def get(self, idx, default=None):
if not isinstance(idx, int):
idx = int(idx)
if idx < len(self.children):
return self.children[idx]
return default
def has_child_with_key(self, key):
if (
key
and isinstance(key, six.string_types)
and self._key_regex.match(key)
):
key = int(key)
if not isinstance(key, int):
return False
return 0 <= key < len(self.children)
def _item_initialization(self):
self.valid_value_types = (list, )
self.require_key = True
self.initial_value = None
self._ignore_child_changes = False
# Child items
self.object_types = self.schema_data["object_types"]
self.children = []
for children_schema in self.object_types:
child_obj = self.create_schema_object(children_schema, self, True)
self.children.append(child_obj)
# GUI attribute
self.is_horizontal = self.schema_data.get("horizontal", True)
if self.group_item is None and not self.is_group:
self.is_group = True
def schema_validations(self):
# List entity must have file parent.
if (
not self.is_dynamic_schema_node
and not self.is_in_dynamic_schema_node
and not self.is_file
and self.file_item is None
):
raise EntitySchemaError(
self, "Missing file entity in hierarchy."
)
super(ListStrictEntity, self).schema_validations()
def collect_static_entities_by_path(self):
output = {}
if self.is_dynamic_item or self.is_in_dynamic_item:
return output
output[self.path] = self
for child_obj in self.children:
result = child_obj.collect_static_entities_by_path()
if result:
output.update(result)
return output
def get_child_path(self, child_obj):
result_idx = None
for idx, _child_obj in enumerate(self.children):
if _child_obj is child_obj:
result_idx = idx
break
if result_idx is None:
raise ValueError("Didn't found child {}".format(child_obj))
return "/".join([self.path, str(result_idx)])
@property
def value(self):
output = []
for child_obj in self.children:
output.append(child_obj.value)
return output
def set(self, value):
new_value = self.convert_to_valid_type(value)
for idx, item in enumerate(new_value):
self.children[idx].set(item)
def collect_dynamic_schema_entities(self, collector):
if self.is_dynamic_schema_node:
collector.add_entity(self)
def settings_value(self):
if self._override_state is OverrideState.NOT_DEFINED:
return NOT_SET
if (
self.is_group
and self._override_state is not OverrideState.DEFAULTS
):
if self._override_state is OverrideState.STUDIO:
if not self.has_studio_override:
return NOT_SET
elif self._override_state is OverrideState.PROJECT:
if not self.has_project_override:
return NOT_SET
output = []
for child_obj in self.children:
output.append(child_obj.settings_value())
return output
def on_change(self):
for callback in self.on_change_callbacks:
callback()
self.parent.on_child_change(self)
def on_child_change(self, _child_obj):
if self._ignore_child_changes:
return
if self._override_state is OverrideState.STUDIO:
self._has_studio_override = self._child_has_studio_override
elif self._override_state is OverrideState.PROJECT:
self._has_project_override = self._child_has_project_override
self.on_change()
@property
def has_unsaved_changes(self):
if self._override_state is OverrideState.NOT_DEFINED:
return False
if self._override_state is OverrideState.DEFAULTS:
if not self.has_default_value:
return True
elif self._override_state is OverrideState.STUDIO:
if self.had_studio_override != self._has_studio_override:
return True
if not self._has_studio_override and not self.has_default_value:
return True
elif self._override_state is OverrideState.PROJECT:
if self.had_project_override != self._has_project_override:
return True
if (
not self._has_project_override
and not self._has_studio_override
and not self.has_default_value
):
return True
if self._child_has_unsaved_changes:
return True
if self.settings_value() != self.initial_value:
return True
return False
@property
def has_studio_override(self):
return self._has_studio_override or self._child_has_studio_override
@property
def has_project_override(self):
return self._has_project_override or self._child_has_project_override
@property
def _child_has_unsaved_changes(self):
for child_obj in self.children:
if child_obj.has_unsaved_changes:
return True
return False
@property
def _child_has_studio_override(self):
for child_obj in self.children:
if child_obj.has_studio_override:
return True
return False
@property
def _child_has_project_override(self):
for child_obj in self.children:
if child_obj.has_project_override:
return True
return False
def set_override_state(self, state, ignore_missing_defaults):
# Trigger override state change of root if is not same
if self.root_item.override_state is not state:
self.root_item.set_override_state(state)
return
self._override_state = state
self._ignore_missing_defaults = ignore_missing_defaults
# Ignore if is dynamic item and use default in that case
if not self.is_dynamic_item and not self.is_in_dynamic_item:
if state > OverrideState.DEFAULTS:
if (
not self.has_default_value
and not ignore_missing_defaults
):
raise DefaultsNotDefined(self)
elif state > OverrideState.STUDIO:
if (
not self.had_studio_override
and not ignore_missing_defaults
):
raise StudioDefaultsNotDefined(self)
for child_entity in self.children:
child_entity.set_override_state(state, ignore_missing_defaults)
self.initial_value = self.settings_value()
def _discard_changes(self, on_change_trigger):
for child_obj in self.children:
child_obj.discard_changes(on_change_trigger)
def _add_to_studio_default(self, _on_change_trigger):
self._has_studio_override = True
self.on_change()
def _remove_from_studio_default(self, on_change_trigger):
self._ignore_child_changes = True
for child_obj in self.children:
child_obj.remove_from_studio_default(on_change_trigger)
self._ignore_child_changes = False
self._has_studio_override = False
def _add_to_project_override(self, _on_change_trigger):
self._has_project_override = True
self.on_change()
def _remove_from_project_override(self, on_change_trigger):
self._ignore_child_changes = True
for child_obj in self.children:
child_obj.remove_from_project_override(on_change_trigger)
self._ignore_child_changes = False
self._has_project_override = False
def _check_update_value(self, value, value_type):
value = super(ListStrictEntity, self)._check_update_value(
value, value_type
)
if value is NOT_SET:
return value
child_len = len(self.children)
value_len = len(value)
if value_len == child_len:
return value
self.log.warning(
(
"{} Amount of strict list items in {} values is"
" not same as expected. Expected {} items. Got {} items. {}"
).format(
self.path, value_type,
child_len, value_len, str(value)
)
)
if value_len < child_len:
# Fill missing values with NOT_SET
for _ in range(child_len - value_len):
value.append(NOT_SET)
else:
# Pop values that are overloaded
for _ in range(value_len - child_len):
value.pop(child_len)
return value
def update_default_value(self, value):
value = self._check_update_value(value, "default")
self.has_default_value = value is not NOT_SET
if value is NOT_SET:
for child_obj in self.children:
child_obj.update_default_value(value)
else:
for idx, item_value in enumerate(value):
self.children[idx].update_default_value(item_value)
def update_studio_value(self, value):
value = self._check_update_value(value, "studio override")
if value is NOT_SET:
for child_obj in self.children:
child_obj.update_studio_value(value)
else:
for idx, item_value in enumerate(value):
self.children[idx].update_studio_value(item_value)
def update_project_value(self, value):
value = self._check_update_value(value, "project override")
if value is NOT_SET:
for child_obj in self.children:
child_obj.update_project_value(value)
else:
for idx, item_value in enumerate(value):
self.children[idx].update_project_value(item_value)
def reset_callbacks(self):
super(ListStrictEntity, self).reset_callbacks()
for child_obj in self.children:
child_obj.reset_callbacks()
```
#### File: openpype/settings/exceptions.py
```python
class SaveSettingsValidation(Exception):
pass
class SaveWarningExc(SaveSettingsValidation):
def __init__(self, warnings):
if isinstance(warnings, str):
warnings = [warnings]
self.warnings = warnings
msg = " | ".join(warnings)
super(SaveWarningExc, self).__init__(msg)
```
#### File: openpype/style/__init__.py
```python
import os
import json
import collections
import six
from openpype import resources
from .color_defs import parse_color
current_dir = os.path.dirname(os.path.abspath(__file__))
class _Cache:
stylesheet = None
font_ids = None
tools_icon_color = None
default_entity_icon_color = None
disabled_entity_icon_color = None
deprecated_entity_font_color = None
def get_style_image_path(image_name):
# All filenames are lowered
image_name = image_name.lower()
# Male sure filename has png extension
if not image_name.endswith(".png"):
image_name += ".png"
filepath = os.path.join(current_dir, "images", image_name)
if os.path.exists(filepath):
return filepath
return None
def _get_colors_raw_data():
"""Read data file with stylesheet fill values.
Returns:
dict: Loaded data for stylesheet.
"""
data_path = os.path.join(current_dir, "data.json")
with open(data_path, "r") as data_stream:
data = json.load(data_stream)
return data
def get_colors_data():
"""Only color data from stylesheet data."""
data = _get_colors_raw_data()
return data.get("color") or {}
def _convert_color_values_to_objects(value):
"""Parse all string values in dictionary to Color definitions.
Recursive function calling itself if value is dictionary.
Args:
value (dict, str): String is parsed into color definition object and
dictionary is passed into this function.
Raises:
TypeError: If value in color data do not contain string of dictionary.
"""
if isinstance(value, dict):
output = {}
for _key, _value in value.items():
output[_key] = _convert_color_values_to_objects(_value)
return output
if not isinstance(value, six.string_types):
raise TypeError((
"Unexpected type in colors data '{}'. Expected 'str' or 'dict'."
).format(str(type(value))))
return parse_color(value)
def get_objected_colors():
"""Colors parsed from stylesheet data into color definitions.
Returns:
dict: Parsed color objects by keys in data.
"""
colors_data = get_colors_data()
output = {}
for key, value in colors_data.items():
output[key] = _convert_color_values_to_objects(value)
return output
def _load_stylesheet():
"""Load strylesheet and trigger all related callbacks.
Style require more than a stylesheet string. Stylesheet string
contains paths to resources which must be registered into Qt application
and load fonts used in stylesheets.
Also replace values from stylesheet data into stylesheet text.
"""
from . import qrc_resources
qrc_resources.qInitResources()
style_path = os.path.join(current_dir, "style.css")
with open(style_path, "r") as style_file:
stylesheet = style_file.read()
data = _get_colors_raw_data()
data_deque = collections.deque()
for item in data.items():
data_deque.append(item)
fill_data = {}
while data_deque:
key, value = data_deque.popleft()
if isinstance(value, dict):
for sub_key, sub_value in value.items():
new_key = "{}:{}".format(key, sub_key)
data_deque.append((new_key, sub_value))
continue
fill_data[key] = value
for key, value in fill_data.items():
replacement_key = "{" + key + "}"
stylesheet = stylesheet.replace(replacement_key, value)
return stylesheet
def _load_font():
"""Load and register fonts into Qt application."""
from Qt import QtGui
# Check if font ids are still loaded
if _Cache.font_ids is not None:
for font_id in tuple(_Cache.font_ids):
font_families = QtGui.QFontDatabase.applicationFontFamilies(
font_id
)
# Reset font if font id is not available
if not font_families:
_Cache.font_ids = None
break
if _Cache.font_ids is None:
_Cache.font_ids = []
fonts_dirpath = os.path.join(current_dir, "fonts")
font_dirs = []
font_dirs.append(os.path.join(fonts_dirpath, "Noto_Sans"))
font_dirs.append(os.path.join(
fonts_dirpath,
"Noto_Sans_Mono",
"static",
"NotoSansMono"
))
loaded_fonts = []
for font_dir in font_dirs:
for filename in os.listdir(font_dir):
if os.path.splitext(filename)[1] not in [".ttf"]:
continue
full_path = os.path.join(font_dir, filename)
font_id = QtGui.QFontDatabase.addApplicationFont(full_path)
_Cache.font_ids.append(font_id)
font_families = QtGui.QFontDatabase.applicationFontFamilies(
font_id
)
loaded_fonts.extend(font_families)
print("Registered font families: {}".format(", ".join(loaded_fonts)))
def load_stylesheet():
"""Load and return OpenPype Qt stylesheet."""
if _Cache.stylesheet is None:
_Cache.stylesheet = _load_stylesheet()
_load_font()
return _Cache.stylesheet
def get_app_icon_path():
"""Path to OpenPype icon."""
return resources.get_openpype_icon_filepath()
def app_icon_path():
# Backwards compatibility
return get_app_icon_path()
def get_default_tools_icon_color():
"""Default color used in tool icons.
Color must be possible to parse using QColor.
Returns:
str: Color as a string.
"""
if _Cache.tools_icon_color is None:
color_data = get_colors_data()
_Cache.tools_icon_color = color_data["icon-tools"]
return _Cache.tools_icon_color
def get_default_entity_icon_color():
"""Default color of entities icons.
Color must be possible to parse using QColor.
Returns:
str: Color as a string.
"""
if _Cache.default_entity_icon_color is None:
color_data = get_colors_data()
_Cache.default_entity_icon_color = color_data["icon-entity-default"]
return _Cache.default_entity_icon_color
def get_disabled_entity_icon_color():
"""Default color of entities icons.
TODO: Find more suitable function name.
Color must be possible to parse using QColor.
Returns:
str: Color as a string.
"""
if _Cache.disabled_entity_icon_color is None:
color_data = get_colors_data()
_Cache.disabled_entity_icon_color = color_data["icon-entity-disabled"]
return _Cache.disabled_entity_icon_color
def get_deprecated_entity_font_color():
"""Font color for deprecated entities.
Color must be possible to parse using QColor.
Returns:
str: Color as a string.
"""
if _Cache.deprecated_entity_font_color is None:
color_data = get_colors_data()
_Cache.deprecated_entity_font_color = (
color_data["font-entity-deprecated"]
)
return _Cache.deprecated_entity_font_color
```
#### File: openpype/style/qrc_resources.py
```python
import Qt
initialized = False
resources = None
if Qt.__binding__ == "PySide2":
from . import pyside2_resources as resources
elif Qt.__binding__ == "PyQt5":
from . import pyqt5_resources as resources
def qInitResources():
global resources
global initialized
if resources is not None and not initialized:
initialized = True
resources.qInitResources()
def qCleanupResources():
global resources
global initialized
if resources is not None:
initialized = False
resources.qCleanupResources()
__all__ = (
"resources",
"qInitResources",
"qCleanupResources"
)
```
#### File: openpype/tests/mongo_performance.py
```python
import pymongo
import bson
import random
from datetime import datetime
import os
class TestPerformance():
'''
Class for testing performance of representation and their 'files'
parts.
Discussion is if embedded array:
'files' : [ {'_id': '1111', 'path':'....},
{'_id'...}]
OR documents:
'files' : {
'1111': {'path':'....'},
'2222': {'path':'...'}
}
is faster.
Current results:
without additional partial index documents is 3x faster
With index is array 50x faster then document
Partial index something like:
db.getCollection('performance_test').createIndex
({'files._id': 1},
{partialFilterExpresion: {'files': {'$exists': true}}})
!DIDNT work for me, had to create manually in Compass
'''
MONGO_URL = 'mongodb://localhost:27017'
MONGO_DB = 'performance_test'
MONGO_COLLECTION = 'performance_test'
MAX_FILE_SIZE_B = 5000
MAX_NUMBER_OF_SITES = 50
ROOT_DIR = "C:/projects"
inserted_ids = []
def __init__(self, version='array'):
'''
It creates and fills collection, based on value of 'version'.
:param version: 'array' - files as embedded array,
'doc' - as document
'''
self.client = pymongo.MongoClient(self.MONGO_URL)
self.db = self.client[self.MONGO_DB]
self.collection_name = self.MONGO_COLLECTION
self.version = version
if self.version != 'array':
self.collection_name = self.MONGO_COLLECTION + '_doc'
self.collection = self.db[self.collection_name]
self.ids = [] # for testing
self.inserted_ids = []
def prepare(self, no_of_records=100000, create_files=False):
'''
Produce 'no_of_records' of representations with 'files' segment.
It depends on 'version' value in constructor, 'arrray' or 'doc'
:return:
'''
print('Purging {} collection'.format(self.collection_name))
self.collection.delete_many({})
id = bson.objectid.ObjectId()
insert_recs = []
for i in range(no_of_records):
file_id = bson.objectid.ObjectId()
file_id2 = bson.objectid.ObjectId()
file_id3 = bson.objectid.ObjectId()
self.inserted_ids.extend([file_id, file_id2, file_id3])
version_str = "v{:03d}".format(i + 1)
file_name = "test_Cylinder_workfileLookdev_{}.mb".\
format(version_str)
document = {"files": self.get_files(self.version, i + 1,
file_id, file_id2, file_id3,
create_files)
,
"context": {
"subset": "workfileLookdev",
"username": "petrk",
"task": "lookdev",
"family": "workfile",
"hierarchy": "Assets",
"project": {"code": "test", "name": "Test"},
"version": i + 1,
"asset": "Cylinder",
"representation": "mb",
"root": self.ROOT_DIR
},
"dependencies": [],
"name": "mb",
"parent": {"oid": '{}'.format(id)},
"data": {
"path": "C:\\projects\\test_performance\\Assets\\Cylinder\\publish\\workfile\\workfileLookdev\\{}\\{}".format(version_str, file_name), # noqa: E501
"template": "{root[work]}\\{project[name]}\\{hierarchy}\\{asset}\\publish\\{family}\\{subset}\\v{version:0>3}\\{project[code]}_{asset}_{subset}_v{version:0>3}<_{output}><.{frame:0>4}>.{representation}" # noqa: E501
},
"type": "representation",
"schema": "openpype:representation-2.0"
}
insert_recs.append(document)
print('Prepared {} records in {} collection'.
format(no_of_records, self.collection_name))
self.collection.insert_many(insert_recs)
# TODO refactore to produce real array and not needeing ugly regex
self.collection.insert_one({"inserted_id": self.inserted_ids})
print('-' * 50)
def run(self, queries=1000, loops=3):
'''
Run X'queries' that are searching collection Y'loops' times
:param queries: how many times do ..find(...)
:param loops: loop of testing X queries
:return: None
'''
print('Testing version {} on {}'.format(self.version,
self.collection_name))
print('Queries rung {} in {} loops'.format(queries, loops))
inserted_ids = list(self.collection.
find({"inserted_id": {"$exists": True}}))
import re
self.ids = re.findall("'[0-9a-z]*'", str(inserted_ids))
import time
found_cnt = 0
for _ in range(loops):
print('Starting loop {}'.format(_))
start = time.time()
for _ in range(queries):
# val = random.choice(self.ids)
# val = val.replace("'", '')
val = random.randint(0, 50)
print(val)
if (self.version == 'array'):
# prepared for partial index, without 'files': exists
# wont engage
found = self.collection.\
find({'files': {"$exists": True},
'files.sites.name': "local_{}".format(val)}).\
count()
else:
key = "files.{}".format(val)
found = self.collection.find_one({key: {"$exists": True}})
print("found {} records".format(found))
# if found:
# found_cnt += len(list(found))
end = time.time()
print('duration per loop {}'.format(end - start))
print("found_cnt {}".format(found_cnt))
def get_files(self, mode, i, file_id, file_id2, file_id3,
create_files=False):
'''
Wrapper to decide if 'array' or document version should be used
:param mode: 'array'|'doc'
:param i: step number
:param file_id: ObjectId of first dummy file
:param file_id2: ..
:param file_id3: ..
:return:
'''
if mode == 'array':
return self.get_files_array(i, file_id, file_id2, file_id3,
create_files)
else:
return self.get_files_doc(i, file_id, file_id2, file_id3)
def get_files_array(self, i, file_id, file_id2, file_id3,
create_files=False):
ret = [
{
"path": "{root[work]}" + "{root[work]}/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/v{:03d}/test_Cylinder_A_workfileLookdev_v{:03d}.dat".format(i, i), # noqa: E501
"_id": '{}'.format(file_id),
"hash": "temphash",
"sites": self.get_sites(self.MAX_NUMBER_OF_SITES),
"size": random.randint(0, self.MAX_FILE_SIZE_B)
},
{
"path": "{root[work]}" + "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/v{:03d}/test_Cylinder_B_workfileLookdev_v{:03d}.dat".format(i, i), # noqa: E501
"_id": '{}'.format(file_id2),
"hash": "temphash",
"sites": self.get_sites(self.MAX_NUMBER_OF_SITES),
"size": random.randint(0, self.MAX_FILE_SIZE_B)
},
{
"path": "{root[work]}" + "/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/v{:03d}/test_Cylinder_C_workfileLookdev_v{:03d}.dat".format(i, i), # noqa: E501
"_id": '{}'.format(file_id3),
"hash": "temphash",
"sites": self.get_sites(self.MAX_NUMBER_OF_SITES),
"size": random.randint(0, self.MAX_FILE_SIZE_B)
}
]
if create_files:
for f in ret:
path = f.get("path").replace("{root[work]}", self.ROOT_DIR)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as fp:
fp.write(os.urandom(f.get("size")))
return ret
def get_files_doc(self, i, file_id, file_id2, file_id3):
ret = {}
ret['{}'.format(file_id)] = {
"path": "{root[work]}" +
"/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/" # noqa: E501
"v{:03d}/test_CylinderA_workfileLookdev_v{:03d}.mb".format(i, i), # noqa: E501
"hash": "temphash",
"sites": ["studio"],
"size": 87236
}
ret['{}'.format(file_id2)] = {
"path": "{root[work]}" +
"/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/" # noqa: E501
"v{:03d}/test_CylinderB_workfileLookdev_v{:03d}.mb".format(i, i), # noqa: E501
"hash": "temphash",
"sites": ["studio"],
"size": 87236
}
ret['{}'.format(file_id3)] = {
"path": "{root[work]}" +
"/test_performance/Assets/Cylinder/publish/workfile/workfileLookdev/" # noqa: E501
"v{:03d}/test_CylinderC_workfileLookdev_v{:03d}.mb".format(i, i), # noqa: E501
"hash": "temphash",
"sites": ["studio"],
"size": 87236
}
return ret
def get_sites(self, number_of_sites=50):
"""
Return array of sites declaration.
Currently on 1st site has "created_dt" fillled, which should
trigger upload to 'gdrive' site.
'gdrive' site is appended, its destination for syncing for
Sync Server
Args:
number_of_sites:
Returns:
"""
sites = []
for i in range(number_of_sites):
site = {'name': "local_{}".format(i)}
# do not create null 'created_dt' field, Mongo doesnt like it
if i == 0:
site['created_dt'] = datetime.now()
sites.append(site)
sites.append({'name': "gdrive"})
return sites
if __name__ == '__main__':
tp = TestPerformance('array')
tp.prepare(no_of_records=10000, create_files=True)
# tp.run(10, 3)
# print('-'*50)
#
# tp = TestPerformance('doc')
# tp.prepare() # enable to prepare data
# tp.run(1000, 3)
```
#### File: tools/assetlinks/widgets.py
```python
from Qt import QtWidgets
class SimpleLinkView(QtWidgets.QWidget):
def __init__(self, dbcon, parent=None):
super(SimpleLinkView, self).__init__(parent=parent)
self.dbcon = dbcon
# TODO: display selected target
in_text = QtWidgets.QLabel("Inputs")
in_view = QtWidgets.QListWidget(parent=self)
out_text = QtWidgets.QLabel("Outputs")
out_view = QtWidgets.QListWidget(parent=self)
layout = QtWidgets.QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(in_text, 0, 0)
layout.addWidget(in_view, 1, 0)
layout.addWidget(out_text, 0, 1)
layout.addWidget(out_view, 1, 1)
self._in_view = in_view
self._out_view = out_view
def clear(self):
self._in_view.clear()
self._out_view.clear()
def set_version(self, version_doc):
self.clear()
if not version_doc or not self.isVisible():
return
# inputs
#
for link in version_doc["data"].get("inputLinks", []):
# Backwards compatibility for "input" key used as "id"
if "id" not in link:
link_id = link["input"]
else:
link_id = link["id"]
version = self.dbcon.find_one(
{"_id": link_id, "type": "version"},
projection={"name": 1, "parent": 1}
)
if not version:
continue
subset = self.dbcon.find_one(
{"_id": version["parent"], "type": "subset"},
projection={"name": 1, "parent": 1}
)
if not subset:
continue
asset = self.dbcon.find_one(
{"_id": subset["parent"], "type": "asset"},
projection={"name": 1}
)
self._in_view.addItem("{asset} {subset} v{version:0>3}".format(
asset=asset["name"],
subset=subset["name"],
version=version["name"],
))
# outputs
#
outputs = self.dbcon.find(
{"type": "version", "data.inputLinks.input": version_doc["_id"]},
projection={"name": 1, "parent": 1}
)
for version in outputs or []:
subset = self.dbcon.find_one(
{"_id": version["parent"], "type": "subset"},
projection={"name": 1, "parent": 1}
)
if not subset:
continue
asset = self.dbcon.find_one(
{"_id": subset["parent"], "type": "asset"},
projection={"name": 1}
)
self._out_view.addItem("{asset} {subset} v{version:0>3}".format(
asset=asset["name"],
subset=subset["name"],
version=version["name"],
))
```
#### File: project_manager/project_manager/model.py
```python
import collections
import copy
import json
from uuid import uuid4
from pymongo import UpdateOne, DeleteOne
from Qt import QtCore, QtGui
from .constants import (
IDENTIFIER_ROLE,
ITEM_TYPE_ROLE,
DUPLICATED_ROLE,
HIERARCHY_CHANGE_ABLE_ROLE,
REMOVED_ROLE,
EDITOR_OPENED_ROLE,
PROJECT_NAME_ROLE
)
from .style import ResourceCache
from openpype.lib import CURRENT_DOC_SCHEMAS
class ProjectModel(QtGui.QStandardItemModel):
"""Load possible projects to modify from MongoDB.
Mongo collection must contain project document with "type" "project" and
matching "name" value with name of collection.
"""
def __init__(self, dbcon, *args, **kwargs):
self.dbcon = dbcon
self._items_by_name = {}
super(ProjectModel, self).__init__(*args, **kwargs)
def refresh(self):
"""Reload projects."""
self.dbcon.Session["AVALON_PROJECT"] = None
new_project_items = []
if None not in self._items_by_name:
none_project = QtGui.QStandardItem("< Select Project >")
self._items_by_name[None] = none_project
new_project_items.append(none_project)
project_docs = self.dbcon.projects(
projection={"name": 1},
only_active=True
)
project_names = set()
for project_doc in project_docs:
project_name = project_doc.get("name")
if not project_name:
continue
project_names.add(project_name)
if project_name not in self._items_by_name:
project_item = QtGui.QStandardItem(project_name)
project_item.setData(project_name, PROJECT_NAME_ROLE)
self._items_by_name[project_name] = project_item
new_project_items.append(project_item)
root_item = self.invisibleRootItem()
for project_name in tuple(self._items_by_name.keys()):
if project_name is None or project_name in project_names:
continue
project_item = self._items_by_name.pop(project_name)
root_item.removeRow(project_item.row())
if new_project_items:
root_item.appendRows(new_project_items)
class ProjectProxyFilter(QtCore.QSortFilterProxyModel):
"""Filters default project item."""
def __init__(self, *args, **kwargs):
super(ProjectProxyFilter, self).__init__(*args, **kwargs)
self._filter_default = False
def set_filter_default(self, enabled=True):
"""Set if filtering of default item is enabled."""
if enabled == self._filter_default:
return
self._filter_default = enabled
self.invalidateFilter()
def filterAcceptsRow(self, row, parent):
if not self._filter_default:
return True
model = self.sourceModel()
source_index = model.index(row, self.filterKeyColumn(), parent)
return source_index.data(PROJECT_NAME_ROLE) is not None
class HierarchySelectionModel(QtCore.QItemSelectionModel):
"""Selection model with defined allowed multiselection columns.
This model allows to select multiple rows and enter one of their
editors to edit value of all selected rows.
"""
def __init__(self, multiselection_columns, *args, **kwargs):
super(HierarchySelectionModel, self).__init__(*args, **kwargs)
self.multiselection_columns = multiselection_columns
def setCurrentIndex(self, index, command):
if index.column() in self.multiselection_columns:
if (
command & QtCore.QItemSelectionModel.Clear
and command & QtCore.QItemSelectionModel.Select
):
command = QtCore.QItemSelectionModel.NoUpdate
super(HierarchySelectionModel, self).setCurrentIndex(index, command)
class HierarchyModel(QtCore.QAbstractItemModel):
"""Main model for hierarchy modification and value changes.
Main part of ProjectManager.
Model should be able to load existing entities, create new, handle their
validations like name duplication and validate if is possible to save its
data.
Args:
dbcon (AvalonMongoDB): Connection to MongoDB with set AVALON_PROJECT in
its Session to current project.
"""
# Definition of all possible columns with their labels in default order
# - order is important as column names are used as keys for column indexes
_columns_def = [
("name", "Name"),
("type", "Type"),
("fps", "FPS"),
("frameStart", "Frame start"),
("frameEnd", "Frame end"),
("handleStart", "Handle start"),
("handleEnd", "Handle end"),
("resolutionWidth", "Width"),
("resolutionHeight", "Height"),
("clipIn", "Clip in"),
("clipOut", "Clip out"),
("pixelAspect", "Pixel aspect"),
("tools_env", "Tools")
]
# Columns allowing multiselection in edit mode
# - gives ability to set all of keys below on multiple items at once
multiselection_columns = {
"frameStart",
"frameEnd",
"fps",
"resolutionWidth",
"resolutionHeight",
"handleStart",
"handleEnd",
"clipIn",
"clipOut",
"pixelAspect",
"tools_env"
}
columns = [
item[0]
for item in _columns_def
]
columns_len = len(columns)
column_labels = {
idx: item[1]
for idx, item in enumerate(_columns_def)
}
index_moved = QtCore.Signal(QtCore.QModelIndex)
project_changed = QtCore.Signal()
def __init__(self, dbcon, parent=None):
super(HierarchyModel, self).__init__(parent)
self.multiselection_column_indexes = {
self.columns.index(key)
for key in self.multiselection_columns
}
# TODO Reset them on project change
self._current_project = None
self._root_item = None
self._items_by_id = {}
self._asset_items_by_name = collections.defaultdict(set)
self.dbcon = dbcon
self._reset_root_item()
@property
def items_by_id(self):
return self._items_by_id
def _reset_root_item(self):
"""Removes all previous content related to model."""
self._root_item = RootItem(self)
def refresh_project(self):
"""Reload project data and discard unsaved changes."""
self.set_project(self._current_project, True)
@property
def project_item(self):
"""Access to current project item.
Model can have 0-1 ProjectItems at once.
"""
output = None
for row in range(self._root_item.rowCount()):
item = self._root_item.child(row)
if isinstance(item, ProjectItem):
output = item
break
return output
def set_project(self, project_name, force=False):
"""Change project and discard unsaved changes.
Args:
project_name(str): New project name. Or None if just clearing
content.
force(bool): Force to change project even if project name is same
as current project.
"""
if self._current_project == project_name and not force:
return
# Reset attributes
self._items_by_id.clear()
self._asset_items_by_name.clear()
self.clear()
self._current_project = project_name
# Skip if project is None
if not project_name:
return
# Find project'd document
project_doc = self.dbcon.database[project_name].find_one(
{"type": "project"},
ProjectItem.query_projection
)
# Skip if project document does not exist
# - this shouldn't happen using only UI elements
if not project_doc:
return
# Create project item
project_item = ProjectItem(project_doc)
self.add_item(project_item)
# Query all assets of the project
asset_docs = self.dbcon.database[project_name].find(
{"type": "asset"},
AssetItem.query_projection
)
asset_docs_by_id = {
asset_doc["_id"]: asset_doc
for asset_doc in asset_docs
}
# Check if asset have published content and prepare booleans
# if asset item can be modified (name and hierarchy change)
# - the same must be applied to all it's parents
asset_ids = list(asset_docs_by_id.keys())
result = []
if asset_ids:
result = self.dbcon.database[project_name].aggregate([
{
"$match": {
"type": "subset",
"parent": {"$in": asset_ids}
}
},
{
"$group": {
"_id": "$parent",
"count": {"$sum": 1}
}
}
])
asset_modifiable = {
asset_id: True
for asset_id in asset_docs_by_id.keys()
}
for item in result:
asset_id = item["_id"]
count = item["count"]
asset_modifiable[asset_id] = count < 1
# Store assets by their visual parent to be able create their hierarchy
asset_docs_by_parent_id = collections.defaultdict(list)
for asset_doc in asset_docs_by_id.values():
parent_id = asset_doc["data"].get("visualParent")
asset_docs_by_parent_id[parent_id].append(asset_doc)
appending_queue = collections.deque()
appending_queue.append((None, project_item))
asset_items_by_id = {}
non_modifiable_items = set()
while appending_queue:
parent_id, parent_item = appending_queue.popleft()
asset_docs = asset_docs_by_parent_id.get(parent_id) or []
new_items = []
for asset_doc in sorted(asset_docs, key=lambda item: item["name"]):
# Create new Item
new_item = AssetItem(asset_doc)
# Store item to be added under parent in bulk
new_items.append(new_item)
# Store item by id for task processing
asset_id = asset_doc["_id"]
if not asset_modifiable[asset_id]:
non_modifiable_items.add(new_item.id)
asset_items_by_id[asset_id] = new_item
# Add item to appending queue
appending_queue.append((asset_id, new_item))
if new_items:
self.add_items(new_items, parent_item)
# Handle Asset's that are not modifiable
# - pass the information to all it's parents
non_modifiable_queue = collections.deque()
for item_id in non_modifiable_items:
non_modifiable_queue.append(item_id)
while non_modifiable_queue:
item_id = non_modifiable_queue.popleft()
item = self._items_by_id[item_id]
item.setData(False, HIERARCHY_CHANGE_ABLE_ROLE)
parent = item.parent()
if (
isinstance(parent, AssetItem)
and parent.id not in non_modifiable_items
):
non_modifiable_items.add(parent.id)
non_modifiable_queue.append(parent.id)
# Add task items
for asset_id, asset_item in asset_items_by_id.items():
asset_doc = asset_docs_by_id[asset_id]
asset_tasks = asset_doc["data"]["tasks"]
if not asset_tasks:
continue
task_items = []
for task_name in sorted(asset_tasks.keys()):
_task_data = copy.deepcopy(asset_tasks[task_name])
_task_data["name"] = task_name
task_item = TaskItem(_task_data)
task_items.append(task_item)
self.add_items(task_items, asset_item)
# Emit that project was successfully changed
self.project_changed.emit()
def rowCount(self, parent=None):
"""Number of rows for passed parent."""
if parent is None or not parent.isValid():
parent_item = self._root_item
else:
parent_item = parent.internalPointer()
return parent_item.rowCount()
def columnCount(self, *args, **kwargs):
"""Number of columns is static for this model."""
return self.columns_len
def data(self, index, role):
"""Access data for passed index and it's role.
Model is using principles implemented in BaseItem so converts passed
index column into key and ask item to return value for passed role.
"""
if not index.isValid():
return None
column = index.column()
key = self.columns[column]
item = index.internalPointer()
return item.data(role, key)
def setData(self, index, value, role=QtCore.Qt.EditRole):
"""Store data to passed index under role.
Pass values to corresponding item and behave by it's result.
"""
if not index.isValid():
return False
item = index.internalPointer()
column = index.column()
key = self.columns[column]
# Capture asset name changes for duplcated asset names validation.
if (
key == "name"
and role in (QtCore.Qt.EditRole, QtCore.Qt.DisplayRole)
):
self._rename_asset(item, value)
# Pass values to item and by result emi dataChanged signal or not
result = item.setData(value, role, key)
if result:
self.dataChanged.emit(index, index, [role])
return result
def headerData(self, section, orientation, role):
"""Header labels."""
if role == QtCore.Qt.DisplayRole:
if section < self.columnCount():
return self.column_labels[section]
return super(HierarchyModel, self).headerData(
section, orientation, role
)
def flags(self, index):
"""Index flags are defined by corresponding item."""
item = index.internalPointer()
if item is None:
return QtCore.Qt.NoItemFlags
column = index.column()
key = self.columns[column]
return item.flags(key)
def parent(self, index=None):
"""Parent for passed index as QModelIndex.
Args:
index(QModelIndex): Parent index. Root item is used if not passed.
"""
if not index.isValid():
return QtCore.QModelIndex()
item = index.internalPointer()
parent_item = item.parent()
# If it has no parents we return invalid
if not parent_item or parent_item is self._root_item:
return QtCore.QModelIndex()
return self.createIndex(parent_item.row(), 0, parent_item)
def index(self, row, column, parent=None):
"""Return index for row/column under parent.
Args:
row(int): Row number.
column(int): Column number.
parent(QModelIndex): Parent index. Root item is used if not passed.
"""
parent_item = None
if parent is not None and parent.isValid():
parent_item = parent.internalPointer()
return self.index_from_item(row, column, parent_item)
def index_for_item(self, item, column=0):
"""Index for passed item.
This is for cases that index operations are required on specific item.
Args:
item(BaseItem): Item from model that will be converted to
corresponding QModelIndex.
column(int): Which column will be part of returned index. By
default is used column 0.
"""
return self.index_from_item(
item.row(), column, item.parent()
)
def index_from_item(self, row, column, parent=None):
"""Index for passed row, column and parent item.
Same implementation as `index` method but "parent" is one of
BaseItem objects instead of QModelIndex.
Args:
row(int): Row number.
column(int): Column number.
parent(BaseItem): Parent item. Root item is used if not passed.
"""
if parent is None:
parent = self._root_item
child_item = parent.child(row)
if child_item:
return self.createIndex(row, column, child_item)
return QtCore.QModelIndex()
def add_new_asset(self, source_index):
"""Create new asset item in hierarchy.
Args:
source_index(QModelIndex): Parent under which new asset will be
added.
"""
item_id = source_index.data(IDENTIFIER_ROLE)
item = self.items_by_id[item_id]
if isinstance(item, TaskItem):
item = item.parent()
if isinstance(item, (RootItem, ProjectItem)):
name = "ep"
new_row = None
elif isinstance(item, AssetItem):
name = None
new_row = item.rowCount()
else:
return
asset_data = {}
if name:
asset_data["name"] = name
new_child = AssetItem(asset_data)
result = self.add_item(new_child, item, new_row)
if result is not None:
# WARNING Expecting result is index for column 0 ("name")
new_name = result.data(QtCore.Qt.EditRole)
self._validate_asset_duplicity(new_name)
return result
def add_new_task(self, parent_index):
"""Create new TaskItem under passed parent index or it's parent.
Args:
parent_index(QModelIndex): Index of parent AssetItem under which
will be task added. If index represents TaskItem it's parent is
used as parent.
"""
item_id = parent_index.data(IDENTIFIER_ROLE)
item = self.items_by_id[item_id]
if isinstance(item, TaskItem):
parent = item.parent()
else:
parent = item
if not isinstance(parent, AssetItem):
return None
new_child = TaskItem()
return self.add_item(new_child, parent)
def add_items(self, items, parent=None, start_row=None):
"""Add new items with definition of QAbstractItemModel.
Trigger `beginInsertRows` and `endInsertRows` to trigger proper
callbacks in view or proxy model.
Args:
items(list[BaseItem]): List of item that will be inserted in model.
parent(RootItem, ProjectItem, AssetItem): Parent of items under
which will be items added. Root item is used if not passed.
start_row(int): Define to which row will be items added. Next
available row of parent is used if not passed.
"""
if parent is None:
parent = self._root_item
if parent.data(REMOVED_ROLE):
return []
if start_row is None:
start_row = parent.rowCount()
end_row = start_row + len(items) - 1
parent_index = self.index_from_item(parent.row(), 0, parent.parent())
self.beginInsertRows(parent_index, start_row, end_row)
for idx, item in enumerate(items):
row = start_row + idx
if item.parent() is not parent:
item.set_parent(parent)
parent.add_child(item, row)
if isinstance(item, AssetItem):
name = item.data(QtCore.Qt.EditRole, "name")
self._asset_items_by_name[name].add(item.id)
if item.id not in self._items_by_id:
self._items_by_id[item.id] = item
self.endInsertRows()
indexes = []
for row in range(start_row, end_row + 1):
indexes.append(
self.index_from_item(row, 0, parent)
)
return indexes
def add_item(self, item, parent=None, row=None):
"""Add single item into model."""
result = self.add_items([item], parent, row)
if result:
return result[0]
return None
def remove_delete_flag(self, item_ids, with_children=True):
"""Remove deletion flag from items with matching ids.
The flag is also removed from all parents of passed children as it
wouldn't make sense to not to do so.
Children of passed item ids are by default also unset for deletion.
Args:
list(uuid4): Ids of model items where remove flag should be unset.
with_children(bool): Unset remove flag also on all children of
passed items.
"""
items_by_id = {}
for item_id in item_ids:
if item_id in items_by_id:
continue
item = self.items_by_id[item_id]
if isinstance(item, (AssetItem, TaskItem)):
items_by_id[item_id] = item
for item in tuple(items_by_id.values()):
parent = item.parent()
while True:
if not isinstance(parent, (AssetItem, TaskItem)):
break
if parent.id not in items_by_id:
items_by_id[parent.id] = parent
parent = parent.parent()
if not with_children:
continue
def _children_recursion(_item):
if not isinstance(_item, AssetItem):
return
for row in range(_item.rowCount()):
_child_item = _item.child(row)
if _child_item.id in items_by_id:
continue
items_by_id[_child_item.id] = _child_item
_children_recursion(_child_item)
_children_recursion(item)
for item in items_by_id.values():
if item.data(REMOVED_ROLE):
item.setData(False, REMOVED_ROLE)
if isinstance(item, AssetItem):
name = item.data(QtCore.Qt.EditRole, "name")
self._asset_items_by_name[name].add(item.id)
self._validate_asset_duplicity(name)
def delete_index(self, index):
"""Delete item of the index from model."""
return self.delete_indexes([index])
def delete_indexes(self, indexes):
"""Delete items from model."""
items_by_id = {}
processed_ids = set()
for index in indexes:
if not index.isValid():
continue
item_id = index.data(IDENTIFIER_ROLE)
# There may be indexes for multiple columns
if item_id not in processed_ids:
processed_ids.add(item_id)
item = self._items_by_id[item_id]
if isinstance(item, (TaskItem, AssetItem)):
items_by_id[item_id] = item
if not items_by_id:
return
for item in items_by_id.values():
self._remove_item(item)
def _remove_item(self, item):
"""Remove item from model or mark item for deletion.
Deleted items are using definition of QAbstractItemModel which call
`beginRemoveRows` and `endRemoveRows` to trigger proper view and proxy
model callbacks.
Item is not just removed but is checked if can be removed from model or
just mark it for deletion for save.
First of all will find all related children and based on their
attributes define if can be removed.
"""
# Skip if item is already marked for deletion
is_removed = item.data(REMOVED_ROLE)
if is_removed:
return
parent = item.parent()
# Find all descendants and store them by parent id
all_descendants = collections.defaultdict(dict)
all_descendants[parent.id][item.id] = item
def _fill_children(_all_descendants, cur_item, parent_item=None):
if parent_item is not None:
_all_descendants[parent_item.id][cur_item.id] = cur_item
if isinstance(cur_item, TaskItem):
was_removed = cur_item.data(REMOVED_ROLE)
task_removed = True
if not was_removed and parent_item is not None:
task_removed = parent_item.data(REMOVED_ROLE)
if not was_removed:
cur_item.setData(task_removed, REMOVED_ROLE)
return task_removed
remove_item = True
task_children = []
for row in range(cur_item.rowCount()):
child_item = cur_item.child(row)
if isinstance(child_item, TaskItem):
task_children.append(child_item)
continue
if not _fill_children(_all_descendants, child_item, cur_item):
remove_item = False
if remove_item:
cur_item.setData(True, REMOVED_ROLE)
if isinstance(cur_item, AssetItem):
self._rename_asset(cur_item, None)
# Process tasks as last because their logic is based on parent
# - tasks may be processed before parent check all asset children
for task_item in task_children:
_fill_children(_all_descendants, task_item, cur_item)
return remove_item
_fill_children(all_descendants, item)
modified_children = []
while all_descendants:
for parent_id in tuple(all_descendants.keys()):
children = all_descendants[parent_id]
if not children:
all_descendants.pop(parent_id)
continue
parent_children = {}
all_without_children = True
for child_id in tuple(children.keys()):
if child_id in all_descendants:
all_without_children = False
break
parent_children[child_id] = children[child_id]
if not all_without_children:
continue
# Row ranges of items to remove
# - store tuples of row "start", "end" (can be the same)
row_ranges = []
# Predefine start, end variables
start_row = end_row = None
chilren_by_row = {}
parent_item = self._items_by_id[parent_id]
for row in range(parent_item.rowCount()):
child_item = parent_item.child(row)
child_id = child_item.id
# Not sure if this can happen
# TODO validate this line it seems dangerous as start/end
# row is not changed
if child_id not in children:
continue
chilren_by_row[row] = child_item
children.pop(child_item.id)
removed_mark = child_item.data(REMOVED_ROLE)
if not removed_mark or not child_item.is_new:
# Skip row sequence store child for later processing
# and store current start/end row range
modified_children.append(child_item)
if end_row is not None:
row_ranges.append((start_row, end_row))
start_row = end_row = None
continue
end_row = row
if start_row is None:
start_row = row
if end_row is not None:
row_ranges.append((start_row, end_row))
if not row_ranges:
continue
# Remove items from model
parent_index = self.index_for_item(parent_item)
for start, end in row_ranges:
self.beginRemoveRows(parent_index, start, end)
for idx in range(start, end + 1):
child_item = chilren_by_row[idx]
# Force name validation
if isinstance(child_item, AssetItem):
self._rename_asset(child_item, None)
child_item.set_parent(None)
self._items_by_id.pop(child_item.id)
self.endRemoveRows()
# Trigger data change to repaint items
# - `BackgroundRole` is random role without any specific reason
for item in modified_children:
s_index = self.index_for_item(item)
e_index = self.index_for_item(item, column=self.columns_len - 1)
self.dataChanged.emit(s_index, e_index, [QtCore.Qt.BackgroundRole])
def _rename_asset(self, asset_item, new_name):
if not isinstance(asset_item, AssetItem):
return
prev_name = asset_item.data(QtCore.Qt.EditRole, "name")
if prev_name == new_name:
return
if asset_item.id in self._asset_items_by_name[prev_name]:
self._asset_items_by_name[prev_name].remove(asset_item.id)
self._validate_asset_duplicity(prev_name)
if new_name is None:
return
self._asset_items_by_name[new_name].add(asset_item.id)
self._validate_asset_duplicity(new_name)
def _validate_asset_duplicity(self, name):
if name not in self._asset_items_by_name:
return
item_ids = self._asset_items_by_name[name]
if not item_ids:
self._asset_items_by_name.pop(name)
elif len(item_ids) == 1:
for item_id in item_ids:
item = self._items_by_id[item_id]
index = self.index_for_item(item)
self.setData(index, False, DUPLICATED_ROLE)
else:
for item_id in item_ids:
item = self._items_by_id[item_id]
index = self.index_for_item(item)
self.setData(index, True, DUPLICATED_ROLE)
def _move_horizontal_single(self, index, direction):
if not index.isValid():
return
item_id = index.data(IDENTIFIER_ROLE)
if item_id is None:
return
item = self._items_by_id[item_id]
if isinstance(item, (RootItem, ProjectItem)):
return
if item.data(REMOVED_ROLE):
return
if (
isinstance(item, AssetItem)
and not item.data(HIERARCHY_CHANGE_ABLE_ROLE)
):
return
if abs(direction) != 1:
return
# Move under parent of parent
src_row = item.row()
src_parent = item.parent()
src_parent_index = self.index_from_item(
src_parent.row(), 0, src_parent.parent()
)
dst_row = None
dst_parent = None
if direction == -1:
if isinstance(src_parent, (RootItem, ProjectItem)):
return
dst_parent = src_parent.parent()
dst_row = src_parent.row() + 1
# Move under parent before or after if before is None
elif direction == 1:
src_row_count = src_parent.rowCount()
if src_row_count == 1:
return
item_row = item.row()
dst_parent = None
for row in reversed(range(item_row)):
_item = src_parent.child(row)
if not isinstance(_item, AssetItem):
continue
if _item.data(REMOVED_ROLE):
continue
dst_parent = _item
break
_next_row = item_row + 1
if dst_parent is None and _next_row < src_row_count:
for row in range(_next_row, src_row_count):
_item = src_parent.child(row)
if not isinstance(_item, AssetItem):
continue
if _item.data(REMOVED_ROLE):
continue
dst_parent = _item
break
if dst_parent is None:
return
dst_row = dst_parent.rowCount()
if src_parent is dst_parent:
return
if (
isinstance(item, TaskItem)
and not isinstance(dst_parent, AssetItem)
):
return
dst_parent_index = self.index_from_item(
dst_parent.row(), 0, dst_parent.parent()
)
self.beginMoveRows(
src_parent_index,
src_row,
src_row,
dst_parent_index,
dst_row
)
src_parent.remove_child(item)
dst_parent.add_child(item)
item.set_parent(dst_parent)
dst_parent.move_to(item, dst_row)
self.endMoveRows()
new_index = self.index(dst_row, index.column(), dst_parent_index)
self.index_moved.emit(new_index)
def move_horizontal(self, indexes, direction):
if not indexes:
return
if isinstance(indexes, QtCore.QModelIndex):
indexes = [indexes]
if len(indexes) == 1:
self._move_horizontal_single(indexes[0], direction)
return
items_by_id = {}
for index in indexes:
item_id = index.data(IDENTIFIER_ROLE)
item = self._items_by_id[item_id]
if isinstance(item, (RootItem, ProjectItem)):
continue
if (
direction == -1
and isinstance(item.parent(), (RootItem, ProjectItem))
):
continue
items_by_id[item_id] = item
if not items_by_id:
return
parents_by_id = {}
items_ids_by_parent_id = collections.defaultdict(set)
skip_ids = set(items_by_id.keys())
for item_id, item in tuple(items_by_id.items()):
item_parent = item.parent()
parent_ids = set()
skip_item = False
parent = item_parent
while parent is not None:
if parent.id in skip_ids:
skip_item = True
skip_ids |= parent_ids
break
parent_ids.add(parent.id)
parent = parent.parent()
if skip_item:
items_by_id.pop(item_id)
else:
parents_by_id[item_parent.id] = item_parent
items_ids_by_parent_id[item_parent.id].add(item_id)
if direction == 1:
for parent_id, parent in parents_by_id.items():
items_ids = items_ids_by_parent_id[parent_id]
if len(items_ids) == parent.rowCount():
for item_id in items_ids:
items_by_id.pop(item_id)
items = tuple(items_by_id.values())
if direction == -1:
items = reversed(items)
for item in items:
index = self.index_for_item(item)
self._move_horizontal_single(index, direction)
def _move_vertical_single(self, index, direction):
if not index.isValid():
return
item_id = index.data(IDENTIFIER_ROLE)
item = self._items_by_id[item_id]
if isinstance(item, (RootItem, ProjectItem)):
return
if item.data(REMOVED_ROLE):
return
if (
isinstance(item, AssetItem)
and not item.data(HIERARCHY_CHANGE_ABLE_ROLE)
):
return
if abs(direction) != 1:
return
src_parent = item.parent()
if not isinstance(src_parent, AssetItem):
return
src_parent_index = self.index_from_item(
src_parent.row(), 0, src_parent.parent()
)
source_row = item.row()
parent_items = []
parent = src_parent
while True:
parent = parent.parent()
parent_items.insert(0, parent)
if isinstance(parent, ProjectItem):
break
dst_parent = None
# Down
if direction == 1:
current_idxs = []
current_max_idxs = []
for parent_item in parent_items:
current_max_idxs.append(parent_item.rowCount())
if not isinstance(parent_item, ProjectItem):
current_idxs.append(parent_item.row())
current_idxs.append(src_parent.row())
indexes_len = len(current_idxs)
while True:
def _update_parents(idx, top=True):
if idx < 0:
return False
if current_max_idxs[idx] == current_idxs[idx]:
if not _update_parents(idx - 1, False):
return False
parent = parent_items[idx]
row_count = 0
if parent is not None:
row_count = parent.rowCount()
current_max_idxs[idx] = row_count
current_idxs[idx] = 0
return True
if top:
return True
current_idxs[idx] += 1
parent_item = parent_items[idx]
new_item = parent_item.child(current_idxs[idx])
parent_items[idx + 1] = new_item
return True
updated = _update_parents(indexes_len - 1)
if not updated:
return
start = current_idxs[-1]
end = current_max_idxs[-1]
current_idxs[-1] = current_max_idxs[-1]
parent = parent_items[-1]
for row in range(start, end):
child_item = parent.child(row)
if (
child_item is src_parent
or child_item.data(REMOVED_ROLE)
or not isinstance(child_item, AssetItem)
):
continue
dst_parent = child_item
destination_row = 0
break
if dst_parent is not None:
break
# Up
elif direction == -1:
current_idxs = []
for parent_item in parent_items:
if not isinstance(parent_item, ProjectItem):
current_idxs.append(parent_item.row())
current_idxs.append(src_parent.row())
max_idxs = [0 for _ in current_idxs]
indexes_len = len(current_idxs)
while True:
if current_idxs == max_idxs:
return
def _update_parents(_current_idx, top=True):
if _current_idx < 0:
return False
if current_idxs[_current_idx] == 0:
if not _update_parents(_current_idx - 1, False):
return False
parent = parent_items[_current_idx]
row_count = 0
if parent is not None:
row_count = parent.rowCount()
current_idxs[_current_idx] = row_count
return True
if top:
return True
current_idxs[_current_idx] -= 1
parent_item = parent_items[_current_idx]
new_item = parent_item.child(current_idxs[_current_idx])
parent_items[_current_idx + 1] = new_item
return True
updated = _update_parents(indexes_len - 1)
if not updated:
return
parent_item = parent_items[-1]
row_count = current_idxs[-1]
current_idxs[-1] = 0
for row in reversed(range(row_count)):
child_item = parent_item.child(row)
if (
child_item is src_parent
or child_item.data(REMOVED_ROLE)
or not isinstance(child_item, AssetItem)
):
continue
dst_parent = child_item
destination_row = dst_parent.rowCount()
break
if dst_parent is not None:
break
if dst_parent is None:
return
dst_parent_index = self.index_from_item(
dst_parent.row(), 0, dst_parent.parent()
)
self.beginMoveRows(
src_parent_index,
source_row,
source_row,
dst_parent_index,
destination_row
)
if src_parent is dst_parent:
dst_parent.move_to(item, destination_row)
else:
src_parent.remove_child(item)
dst_parent.add_child(item)
item.set_parent(dst_parent)
dst_parent.move_to(item, destination_row)
self.endMoveRows()
new_index = self.index(
destination_row, index.column(), dst_parent_index
)
self.index_moved.emit(new_index)
def move_vertical(self, indexes, direction):
"""Move item vertically in model to matching parent if possible.
If passed indexes contain items that has parent<->child relation at any
hierarchy level only the top parent is actually moved.
Example (items marked with star are passed in `indexes`):
- shots*
- ep01
- ep01_sh0010*
- ep01_sh0020*
In this case only `shots` item will be moved vertically and
both "ep01_sh0010" "ep01_sh0020" will stay as children of "ep01".
Args:
indexes(list[QModelIndex]): Indexes that should be moved
vertically.
direction(int): Which way will be moved -1 or 1 to determine.
"""
if not indexes:
return
# Convert single index to list of indexes
if isinstance(indexes, QtCore.QModelIndex):
indexes = [indexes]
# Just process single index
if len(indexes) == 1:
self._move_vertical_single(indexes[0], direction)
return
items_by_id = {}
for index in indexes:
item_id = index.data(IDENTIFIER_ROLE)
items_by_id[item_id] = self._items_by_id[item_id]
skip_ids = set(items_by_id.keys())
for item_id, item in tuple(items_by_id.items()):
parent = item.parent()
parent_ids = set()
skip_item = False
while parent is not None:
if parent.id in skip_ids:
skip_item = True
skip_ids |= parent_ids
break
parent_ids.add(parent.id)
parent = parent.parent()
if skip_item:
items_by_id.pop(item_id)
items = tuple(items_by_id.values())
if direction == 1:
items = reversed(items)
for item in items:
index = self.index_for_item(item)
self._move_vertical_single(index, direction)
def child_removed(self, child):
"""Callback for removed child."""
self._items_by_id.pop(child.id, None)
def column_name(self, column):
"""Return column key by index"""
if column < len(self.columns):
return self.columns[column]
return None
def clear(self):
"""Reset model."""
self.beginResetModel()
self._reset_root_item()
self.endResetModel()
def save(self):
"""Save all changes from current project manager session.
Will create new asset documents, update existing and asset documents
marked for deletion are removed from mongo if has published content or
their type is changed to `archived_asset` to not loose their data.
"""
# Check if all items are valid before save
all_valid = True
for item in self._items_by_id.values():
if not item.is_valid:
all_valid = False
break
if not all_valid:
return
# Check project item and do not save without it
project_item = None
for _project_item in self._root_item.children():
project_item = _project_item
if not project_item:
return
project_name = project_item.name
project_col = self.dbcon.database[project_name]
# Process asset items per one hierarchical level.
# - new assets are inserted per one parent
# - update and delete data are stored and processed at once at the end
to_process = collections.deque()
to_process.append(project_item)
bulk_writes = []
while to_process:
parent = to_process.popleft()
insert_list = []
for item in parent.children():
if not isinstance(item, AssetItem):
continue
to_process.append(item)
if item.is_new:
insert_list.append(item)
elif item.data(REMOVED_ROLE):
if item.data(HIERARCHY_CHANGE_ABLE_ROLE):
bulk_writes.append(DeleteOne(
{"_id": item.asset_id}
))
else:
bulk_writes.append(UpdateOne(
{"_id": item.asset_id},
{"$set": {"type": "archived_asset"}}
))
else:
update_data = item.update_data()
if update_data:
bulk_writes.append(UpdateOne(
{"_id": item.asset_id},
update_data
))
if insert_list:
new_docs = []
for item in insert_list:
new_docs.append(item.to_doc())
result = project_col.insert_many(new_docs)
for idx, mongo_id in enumerate(result.inserted_ids):
insert_list[idx].mongo_id = mongo_id
if bulk_writes:
project_col.bulk_write(bulk_writes)
self.refresh_project()
def copy_mime_data(self, indexes):
items = []
processed_ids = set()
for index in indexes:
if not index.isValid():
continue
item_id = index.data(IDENTIFIER_ROLE)
if item_id in processed_ids:
continue
processed_ids.add(item_id)
item = self._items_by_id[item_id]
items.append(item)
parent_item = None
for item in items:
if not isinstance(item, TaskItem):
raise ValueError("Can copy only tasks")
if parent_item is None:
parent_item = item.parent()
elif item.parent() is not parent_item:
raise ValueError("Can copy only tasks from same parent")
data = []
for task_item in items:
data.append(task_item.to_json_data())
encoded_data = QtCore.QByteArray()
stream = QtCore.QDataStream(encoded_data, QtCore.QIODevice.WriteOnly)
stream.writeQString(json.dumps(data))
mimedata = QtCore.QMimeData()
mimedata.setData("application/copy_task", encoded_data)
return mimedata
def paste_mime_data(self, index, mime_data):
if not index.isValid():
return
item_id = index.data(IDENTIFIER_ROLE)
item = self._items_by_id[item_id]
if not isinstance(item, (AssetItem, TaskItem)):
return
raw_data = mime_data.data("application/copy_task")
if isinstance(raw_data, QtCore.QByteArray):
# Raw data are already QByteArrat and we don't have to load them
encoded_data = raw_data
else:
encoded_data = QtCore.QByteArray.fromRawData(raw_data)
stream = QtCore.QDataStream(encoded_data, QtCore.QIODevice.ReadOnly)
text = stream.readQString()
try:
data = json.loads(text)
except Exception:
data = []
if not data:
return
if isinstance(item, TaskItem):
parent = item.parent()
else:
parent = item
for task_item_data in data:
task_data = {}
for name, data in task_item_data.items():
task_data = data
task_data["name"] = name
task_item = TaskItem(task_data, True)
self.add_item(task_item, parent)
class BaseItem:
"""Base item for HierarchyModel.
Is not meant to be used as real item but as superclass for all items used
in HierarchyModel.
TODO cleanup some attributes and methods related only to AssetItem and
TaskItem.
"""
columns = []
# Use `set` for faster result
editable_columns = set()
_name_icons = None
_is_duplicated = False
item_type = "base"
_None = object()
def __init__(self, data=None):
self._id = uuid4()
self._children = list()
self._parent = None
self._data = {
key: None
for key in self.columns
}
self._global_data = {}
self._source_data = data
if data:
for key, value in data.items():
if key in self.columns:
self._data[key] = value
def name_icon(self):
"""Icon shown next to name.
Item must imlpement this method to change it.
"""
return None
@property
def is_valid(self):
return not self._is_duplicated
def model(self):
return self._parent.model()
def move_to(self, item, row):
idx = self._children.index(item)
if idx == row:
return
self._children.pop(idx)
self._children.insert(row, item)
def _get_global_data(self, role):
"""Global data getter without column specification."""
if role == ITEM_TYPE_ROLE:
return self.item_type
if role == IDENTIFIER_ROLE:
return self._id
if role == DUPLICATED_ROLE:
return self._is_duplicated
if role == REMOVED_ROLE:
return False
return self._global_data.get(role, self._None)
def _set_global_data(self, value, role):
self._global_data[role] = value
return True
def data(self, role, key=None):
value = self._get_global_data(role)
if value is not self._None:
return value
if key not in self.columns:
return None
if role == QtCore.Qt.ForegroundRole:
if key == "name" and not self.is_valid:
return ResourceCache.colors["warning"]
return None
if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole):
value = self._data[key]
if value is None:
value = self.parent().data(role, key)
return value
if role == QtCore.Qt.DecorationRole and key == "name":
return self.name_icon()
return None
def setData(self, value, role, key=None):
if role == DUPLICATED_ROLE:
if value == self._is_duplicated:
return False
self._is_duplicated = value
return True
if role == QtCore.Qt.EditRole:
if key in self.editable_columns:
self._data[key] = value
# must return true if successful
return True
return self._set_global_data(value, role)
@property
def id(self):
return self._id
@property
def is_new(self):
return False
def rowCount(self):
return len(self._children)
def child(self, row):
if -1 < row < self.rowCount():
return self._children[row]
return None
def children(self):
return self._children
def child_row(self, child):
if child not in self._children:
return -1
return self._children.index(child)
def parent(self):
return self._parent
def set_parent(self, parent):
if parent is self._parent:
return
if self._parent:
self._parent.remove_child(self)
self._parent = parent
def row(self):
if self._parent is not None:
return self._parent.child_row(self)
return -1
def add_child(self, item, row=None):
if item in self._children:
return
row_count = self.rowCount()
if row is None or row == row_count:
self._children.append(item)
return
if row > row_count or row < 0:
raise ValueError(
"Invalid row number {} expected range 0 - {}".format(
row, row_count
)
)
self._children.insert(row, item)
def remove_child(self, item):
if item in self._children:
self._children.remove(item)
def flags(self, key):
flags = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
if key in self.editable_columns:
flags |= QtCore.Qt.ItemIsEditable
return flags
class RootItem(BaseItem):
"""Invisible root item used as base item for model."""
item_type = "root"
def __init__(self, model):
super(RootItem, self).__init__()
self._model = model
def model(self):
return self._model
def flags(self, *args, **kwargs):
return QtCore.Qt.NoItemFlags
class ProjectItem(BaseItem):
"""Item representing project document in Mongo.
Item is used only to read it's data. It is not possible to modify them.
"""
item_type = "project"
columns = {
"name",
"type",
"frameStart",
"frameEnd",
"fps",
"resolutionWidth",
"resolutionHeight",
"handleStart",
"handleEnd",
"clipIn",
"clipOut",
"pixelAspect",
"tools_env",
}
query_projection = {
"_id": 1,
"name": 1,
"type": 1,
"data.frameStart": 1,
"data.frameEnd": 1,
"data.fps": 1,
"data.resolutionWidth": 1,
"data.resolutionHeight": 1,
"data.handleStart": 1,
"data.handleEnd": 1,
"data.clipIn": 1,
"data.clipOut": 1,
"data.pixelAspect": 1,
"data.tools_env": 1
}
def __init__(self, project_doc):
self._mongo_id = project_doc["_id"]
data = self.data_from_doc(project_doc)
super(ProjectItem, self).__init__(data)
@property
def project_id(self):
"""Project Mongo ID."""
return self._mongo_id
@property
def asset_id(self):
"""Should not be implemented.
TODO Remove this method from ProjectItem.
"""
return None
@property
def name(self):
"""Project name"""
return self._data["name"]
def child_parents(self):
"""Used by children AssetItems for filling `data.parents` key."""
return []
@classmethod
def data_from_doc(cls, project_doc):
"""Convert document data into item data.
Project data are used as default value for it's children.
"""
data = {
"name": project_doc["name"],
"type": project_doc["type"]
}
doc_data = project_doc.get("data") or {}
for key in cls.columns:
if key in data:
continue
data[key] = doc_data.get(key)
return data
def flags(self, *args, **kwargs):
"""Project is enabled and selectable."""
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
class AssetItem(BaseItem):
"""Item represent asset document.
Item have ability to set all required and optional data for OpenPype
workflow. Some of them are not modifiable in specific cases e.g. when asset
has published content it is not possible to change it's name or parent.
"""
item_type = "asset"
columns = {
"name",
"type",
"fps",
"frameStart",
"frameEnd",
"resolutionWidth",
"resolutionHeight",
"handleStart",
"handleEnd",
"clipIn",
"clipOut",
"pixelAspect",
"tools_env"
}
editable_columns = {
"name",
"frameStart",
"frameEnd",
"fps",
"resolutionWidth",
"resolutionHeight",
"handleStart",
"handleEnd",
"clipIn",
"clipOut",
"pixelAspect",
"tools_env"
}
query_projection = {
"_id": 1,
"data.tasks": 1,
"data.visualParent": 1,
"schema": 1,
"name": 1,
"type": 1,
"data.frameStart": 1,
"data.frameEnd": 1,
"data.fps": 1,
"data.resolutionWidth": 1,
"data.resolutionHeight": 1,
"data.handleStart": 1,
"data.handleEnd": 1,
"data.clipIn": 1,
"data.clipOut": 1,
"data.pixelAspect": 1,
"data.tools_env": 1
}
def __init__(self, asset_doc):
if not asset_doc:
asset_doc = {}
self.mongo_id = asset_doc.get("_id")
self._project_id = None
self._edited_columns = {
column_name: False
for column_name in self.editable_columns
}
# Item data
self._hierarchy_changes_enabled = True
self._removed = False
# Task children duplication variables
self._task_items_by_name = collections.defaultdict(list)
self._task_name_by_item_id = {}
self._duplicated_task_names = set()
# Copy of original document
self._origin_asset_doc = copy.deepcopy(asset_doc)
data = self.data_from_doc(asset_doc)
self._origin_data = copy.deepcopy(data)
super(AssetItem, self).__init__(data)
@property
def project_id(self):
"""Access to project "parent" id which is always set."""
if self._project_id is None:
self._project_id = self.parent().project_id
return self._project_id
@property
def asset_id(self):
"""Property access to mongo id."""
return self.mongo_id
@property
def is_new(self):
"""Item was created during current project manager session."""
return self.asset_id is None
@property
def is_valid(self):
"""Item is invalid for saving."""
if self._is_duplicated or not self._data["name"]:
return False
return True
@property
def name(self):
"""Asset name.
Returns:
str: If name is set.
None: If name is not yet set in that case is AssetItem marked as
invalid.
"""
return self._data["name"]
def child_parents(self):
"""Children AssetItem can use this method to get it's parent names.
This is used for `data.parents` key on document.
"""
parents = self.parent().child_parents()
parents.append(self.name)
return parents
def to_doc(self):
"""Convert item to Mongo document matching asset schema.
Method does no validate if item is valid or children are valid.
Returns:
dict: Document with all related data about asset item also
contains task children.
"""
tasks = {}
for item in self.children():
if isinstance(item, TaskItem):
tasks.update(item.to_doc_data())
doc_data = {
"parents": self.parent().child_parents(),
"visualParent": self.parent().asset_id,
"tasks": tasks
}
schema_name = (
self._origin_asset_doc.get("schema")
or CURRENT_DOC_SCHEMAS["asset"]
)
doc = {
"name": self.data(QtCore.Qt.EditRole, "name"),
"type": self.data(QtCore.Qt.EditRole, "type"),
"schema": schema_name,
"data": doc_data,
"parent": self.project_id
}
if self.mongo_id:
doc["_id"] = self.mongo_id
for key in self._data.keys():
if key in doc:
continue
# Use `data` method to get inherited values
doc_data[key] = self.data(QtCore.Qt.EditRole, key)
return doc
def update_data(self):
"""Changes dictionary ready for Mongo's update.
Method should be used on save. There is not other usage of this method.
# Example
```python
{
"$set": {
"name": "new_name"
}
}
```
Returns:
dict: May be empty if item was not changed.
"""
if not self.mongo_id:
return {}
document = self.to_doc()
changes = {}
for key, value in document.items():
if key in ("data", "_id"):
continue
if (
key in self._origin_asset_doc
and self._origin_asset_doc[key] == value
):
continue
changes[key] = value
if "data" not in self._origin_asset_doc:
changes["data"] = document["data"]
else:
origin_data = self._origin_asset_doc["data"]
for key, value in document["data"].items():
if key in origin_data and origin_data[key] == value:
continue
_key = "data.{}".format(key)
changes[_key] = value
if changes:
return {"$set": changes}
return {}
@classmethod
def data_from_doc(cls, asset_doc):
"""Convert asset document from Mongo to item data."""
# Minimum required data for cases that it is new AssetItem without doc
data = {
"name": None,
"type": "asset"
}
if asset_doc:
for key in data.keys():
if key in asset_doc:
data[key] = asset_doc[key]
doc_data = asset_doc.get("data") or {}
for key in cls.columns:
if key in data:
continue
data[key] = doc_data.get(key)
return data
def name_icon(self):
"""Icon shown next to name."""
if self.__class__._name_icons is None:
self.__class__._name_icons = ResourceCache.get_icons()["asset"]
if self._removed:
icon_type = "removed"
elif not self.is_valid:
icon_type = "invalid"
elif self.is_new:
icon_type = "new"
else:
icon_type = "default"
return self.__class__._name_icons[icon_type]
def _get_global_data(self, role):
"""Global data getter without column specification."""
if role == HIERARCHY_CHANGE_ABLE_ROLE:
return self._hierarchy_changes_enabled
if role == REMOVED_ROLE:
return self._removed
if role == QtCore.Qt.ToolTipRole:
name = self.data(QtCore.Qt.EditRole, "name")
if not name:
return "Name is not set"
elif self._is_duplicated:
return "Duplicated asset name \"{}\"".format(name)
return None
return super(AssetItem, self)._get_global_data(role)
def data(self, role, key=None):
if role == EDITOR_OPENED_ROLE:
if key not in self._edited_columns:
return False
return self._edited_columns[key]
if role == QtCore.Qt.DisplayRole and self._edited_columns.get(key):
return ""
return super(AssetItem, self).data(role, key)
def setData(self, value, role, key=None):
# Store information that column has opened editor
# - DisplayRole for the column will return empty string
if role == EDITOR_OPENED_ROLE:
if key not in self._edited_columns:
return False
self._edited_columns[key] = value
return True
if role == REMOVED_ROLE:
self._removed = value
return True
# This can be set only on project load (or save)
if role == HIERARCHY_CHANGE_ABLE_ROLE:
if self._hierarchy_changes_enabled == value:
return False
self._hierarchy_changes_enabled = value
return True
# Do not allow to change name if item is marked to not be able do any
# hierarchical changes.
if (
role == QtCore.Qt.EditRole
and key == "name"
and not self._hierarchy_changes_enabled
):
return False
return super(AssetItem, self).setData(value, role, key)
def flags(self, key):
if key == "name":
flags = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
if self._hierarchy_changes_enabled:
flags |= QtCore.Qt.ItemIsEditable
return flags
return super(AssetItem, self).flags(key)
def _add_task(self, item):
name = item.data(QtCore.Qt.EditRole, "name").lower()
item_id = item.data(IDENTIFIER_ROLE)
self._task_name_by_item_id[item_id] = name
self._task_items_by_name[name].append(item)
if len(self._task_items_by_name[name]) > 1:
self._duplicated_task_names.add(name)
for _item in self._task_items_by_name[name]:
_item.setData(True, DUPLICATED_ROLE)
elif item.data(DUPLICATED_ROLE):
item.setData(False, DUPLICATED_ROLE)
def _remove_task(self, item):
# This method is probably obsolete with changed logic and added
# `on_task_remove_state_change` method.
item_id = item.data(IDENTIFIER_ROLE)
if item_id not in self._task_name_by_item_id:
return
name = self._task_name_by_item_id.pop(item_id)
self._task_items_by_name[name].remove(item)
if not self._task_items_by_name[name]:
self._task_items_by_name.pop(name)
elif len(self._task_items_by_name[name]) == 1:
self._duplicated_task_names.remove(name)
for _item in self._task_items_by_name[name]:
_item.setData(False, DUPLICATED_ROLE)
def _rename_task(self, item):
# Skip processing if item is marked for removing
# - item is not in any of attributes below
if item.data(REMOVED_ROLE):
return
new_name = item.data(QtCore.Qt.EditRole, "name").lower()
item_id = item.data(IDENTIFIER_ROLE)
prev_name = self._task_name_by_item_id[item_id]
if new_name == prev_name:
return
# Remove from previous name mapping
self._task_items_by_name[prev_name].remove(item)
if not self._task_items_by_name[prev_name]:
self._task_items_by_name.pop(prev_name)
elif len(self._task_items_by_name[prev_name]) == 1:
self._duplicated_task_names.remove(prev_name)
for _item in self._task_items_by_name[prev_name]:
_item.setData(False, DUPLICATED_ROLE)
# Add to new name mapping
self._task_items_by_name[new_name].append(item)
if len(self._task_items_by_name[new_name]) > 1:
self._duplicated_task_names.add(new_name)
for _item in self._task_items_by_name[new_name]:
_item.setData(True, DUPLICATED_ROLE)
else:
item.setData(False, DUPLICATED_ROLE)
self._task_name_by_item_id[item_id] = new_name
def on_task_name_change(self, task_item):
"""Method called from TaskItem children on name change.
Helps to handle duplicated task name validations.
"""
self._rename_task(task_item)
def on_task_remove_state_change(self, task_item):
"""Method called from children TaskItem to handle name duplications.
Method is called when TaskItem children is marked for deletion or
deletion was reversed.
Name is removed/added to task item mapping attribute and removed/added
to `_task_items_by_name` used for determination of duplicated tasks.
"""
is_removed = task_item.data(REMOVED_ROLE)
item_id = task_item.data(IDENTIFIER_ROLE)
if is_removed:
name = self._task_name_by_item_id.pop(item_id)
self._task_items_by_name[name].remove(task_item)
else:
name = task_item.data(QtCore.Qt.EditRole, "name").lower()
self._task_name_by_item_id[item_id] = name
self._task_items_by_name[name].append(task_item)
# Remove from previous name mapping
if not self._task_items_by_name[name]:
self._task_items_by_name.pop(name)
elif len(self._task_items_by_name[name]) == 1:
if name in self._duplicated_task_names:
self._duplicated_task_names.remove(name)
task_item.setData(False, DUPLICATED_ROLE)
else:
self._duplicated_task_names.add(name)
for _item in self._task_items_by_name[name]:
_item.setData(True, DUPLICATED_ROLE)
def add_child(self, item, row=None):
"""Add new children.
Args:
item(AssetItem, TaskItem): New added item.
row(int): Optionally can be passed on which row (index) should be
children added.
"""
if item in self._children:
return
super(AssetItem, self).add_child(item, row)
# Call inner method for checking task name duplications
if isinstance(item, TaskItem):
self._add_task(item)
def remove_child(self, item):
"""Remove one of children from AssetItem children.
Skipped if item is not children of item.
Args:
item(AssetItem, TaskItem): Child item.
"""
if item not in self._children:
return
# Call inner method to remove task from registered task name
# validations.
if isinstance(item, TaskItem):
self._remove_task(item)
super(AssetItem, self).remove_child(item)
class TaskItem(BaseItem):
"""Item representing Task item on Asset document.
Always should be AssetItem children and never should have any other
children.
It's name value should be validated with it's parent which only knows if
has same name as other sibling under same parent.
"""
# String representation of item
item_type = "task"
columns = {
"name",
"type"
}
editable_columns = {
"name",
"type"
}
def __init__(self, data=None, is_new=None):
self._removed = False
if is_new is None:
is_new = data is None
self._is_new = is_new
if data is None:
data = {}
self._edited_columns = {
column_name: False
for column_name in self.editable_columns
}
self._origin_data = copy.deepcopy(data)
super(TaskItem, self).__init__(data)
@property
def is_new(self):
"""Task was created during current project manager session."""
return self._is_new
@property
def is_valid(self):
"""Task valid for saving."""
if self._is_duplicated or not self._data["type"]:
return False
if not self.data(QtCore.Qt.EditRole, "name"):
return False
return True
def name_icon(self):
"""Icon shown next to name."""
if self.__class__._name_icons is None:
self.__class__._name_icons = ResourceCache.get_icons()["task"]
if self._removed:
icon_type = "removed"
elif not self.is_valid:
icon_type = "invalid"
elif self.is_new:
icon_type = "new"
else:
icon_type = "default"
return self.__class__._name_icons[icon_type]
def add_child(self, item, row=None):
"""Reimplement `add_child` to avoid adding items under task."""
raise AssertionError("BUG: Can't add children to Task")
def _get_global_data(self, role):
"""Global data getter without column specification."""
if role == REMOVED_ROLE:
return self._removed
if role == QtCore.Qt.ToolTipRole:
if not self._data["type"]:
return "Type is not set"
name = self.data(QtCore.Qt.EditRole, "name")
if not name:
return "Name is not set"
elif self._is_duplicated:
return "Duplicated task name \"{}".format(name)
return None
return super(TaskItem, self)._get_global_data(role)
def to_doc_data(self):
"""Data for Asset document.
Returns:
dict: May be empty if task is marked as removed or with single key
dict with name as key and task data as value.
"""
if self._removed:
return {}
data = copy.deepcopy(self._data)
data.pop("name")
name = self.data(QtCore.Qt.EditRole, "name")
return {
name: data
}
def data(self, role, key=None):
if role == EDITOR_OPENED_ROLE:
if key not in self._edited_columns:
return False
return self._edited_columns[key]
# Return empty string if column is edited
if role == QtCore.Qt.DisplayRole and self._edited_columns.get(key):
return ""
if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole):
if key == "type":
return self._data["type"]
# Always require task type filled
if key == "name":
if not self._data["type"]:
if role == QtCore.Qt.DisplayRole:
return "< Select Type >"
if role == QtCore.Qt.EditRole:
return ""
else:
return self._data[key] or self._data["type"]
return super(TaskItem, self).data(role, key)
def setData(self, value, role, key=None):
# Store information that item on a column is edited
# - DisplayRole will return empty string in that case
if role == EDITOR_OPENED_ROLE:
if key not in self._edited_columns:
return False
self._edited_columns[key] = value
return True
if role == REMOVED_ROLE:
# Skip value change if is same as already set value
if value == self._removed:
return False
self._removed = value
self.parent().on_task_remove_state_change(self)
return True
# Convert empty string to None on EditRole
if (
role == QtCore.Qt.EditRole
and key == "name"
and not value
):
value = None
result = super(TaskItem, self).setData(value, role, key)
if role == QtCore.Qt.EditRole:
# Trigger task name change of parent AssetItem
if (
key == "name"
or (key == "type" and not self._data["name"])
):
self.parent().on_task_name_change(self)
return result
def to_json_data(self):
"""Convert json data without parent reference.
Method used for mime data on copy/paste
"""
return self.to_doc_data()
```
#### File: tools/publisher/app.py
```python
from .window import PublisherWindow
class _WindowCache:
window = None
def show(parent=None):
window = _WindowCache.window
if window is None:
window = PublisherWindow(parent)
_WindowCache.window = window
window.show()
window.activateWindow()
return window
```
#### File: publisher/publish_report_viewer/window.py
```python
import os
import json
import six
import appdirs
from Qt import QtWidgets, QtCore, QtGui
from openpype import style
from openpype.lib import JSONSettingRegistry
from openpype.resources import get_openpype_icon_filepath
from openpype.tools import resources
from openpype.tools.utils import (
IconButton,
paint_image_with_color
)
from openpype.tools.utils.delegates import PrettyTimeDelegate
if __package__:
from .widgets import PublishReportViewerWidget
from .report_items import PublishReport
else:
from widgets import PublishReportViewerWidget
from report_items import PublishReport
FILEPATH_ROLE = QtCore.Qt.UserRole + 1
MODIFIED_ROLE = QtCore.Qt.UserRole + 2
class PublisherReportRegistry(JSONSettingRegistry):
"""Class handling storing publish report tool.
Attributes:
vendor (str): Name used for path construction.
product (str): Additional name used for path construction.
"""
def __init__(self):
self.vendor = "pypeclub"
self.product = "openpype"
name = "publish_report_viewer"
path = appdirs.user_data_dir(self.product, self.vendor)
super(PublisherReportRegistry, self).__init__(name, path)
class LoadedFilesMopdel(QtGui.QStandardItemModel):
def __init__(self, *args, **kwargs):
super(LoadedFilesMopdel, self).__init__(*args, **kwargs)
self.setColumnCount(2)
self._items_by_filepath = {}
self._reports_by_filepath = {}
self._registry = PublisherReportRegistry()
self._loading_registry = False
self._load_registry()
def headerData(self, section, orientation, role):
if role in (QtCore.Qt.DisplayRole, QtCore.Qt.EditRole):
if section == 0:
return "Exports"
if section == 1:
return "Modified"
return ""
super(LoadedFilesMopdel, self).headerData(section, orientation, role)
def _load_registry(self):
self._loading_registry = True
try:
filepaths = self._registry.get_item("filepaths")
self.add_filepaths(filepaths)
except ValueError:
pass
self._loading_registry = False
def _store_registry(self):
if self._loading_registry:
return
filepaths = list(self._items_by_filepath.keys())
self._registry.set_item("filepaths", filepaths)
def data(self, index, role=None):
if role is None:
role = QtCore.Qt.DisplayRole
col = index.column()
if col != 0:
index = self.index(index.row(), 0, index.parent())
if role == QtCore.Qt.ToolTipRole:
if col == 0:
role = FILEPATH_ROLE
elif col == 1:
return "File modified"
return None
elif role == QtCore.Qt.DisplayRole:
if col == 1:
role = MODIFIED_ROLE
return super(LoadedFilesMopdel, self).data(index, role)
def add_filepaths(self, filepaths):
if not filepaths:
return
if isinstance(filepaths, six.string_types):
filepaths = [filepaths]
filtered_paths = []
for filepath in filepaths:
normalized_path = os.path.normpath(filepath)
if normalized_path in self._items_by_filepath:
continue
if (
os.path.exists(normalized_path)
and normalized_path not in filtered_paths
):
filtered_paths.append(normalized_path)
if not filtered_paths:
return
new_items = []
for normalized_path in filtered_paths:
try:
with open(normalized_path, "r") as stream:
data = json.load(stream)
report = PublishReport(data)
except Exception:
# TODO handle errors
continue
modified = os.path.getmtime(normalized_path)
item = QtGui.QStandardItem(os.path.basename(normalized_path))
item.setColumnCount(self.columnCount())
item.setData(normalized_path, FILEPATH_ROLE)
item.setData(modified, MODIFIED_ROLE)
new_items.append(item)
self._items_by_filepath[normalized_path] = item
self._reports_by_filepath[normalized_path] = report
if not new_items:
return
parent = self.invisibleRootItem()
parent.appendRows(new_items)
self._store_registry()
def remove_filepaths(self, filepaths):
if not filepaths:
return
if isinstance(filepaths, six.string_types):
filepaths = [filepaths]
filtered_paths = []
for filepath in filepaths:
normalized_path = os.path.normpath(filepath)
if normalized_path in self._items_by_filepath:
filtered_paths.append(normalized_path)
if not filtered_paths:
return
parent = self.invisibleRootItem()
for filepath in filtered_paths:
self._reports_by_filepath.pop(normalized_path)
item = self._items_by_filepath.pop(filepath)
parent.removeRow(item.row())
self._store_registry()
def get_report_by_filepath(self, filepath):
return self._reports_by_filepath.get(filepath)
class LoadedFilesView(QtWidgets.QTreeView):
selection_changed = QtCore.Signal()
def __init__(self, *args, **kwargs):
super(LoadedFilesView, self).__init__(*args, **kwargs)
self.setEditTriggers(self.NoEditTriggers)
self.setIndentation(0)
self.setAlternatingRowColors(True)
model = LoadedFilesMopdel()
self.setModel(model)
time_delegate = PrettyTimeDelegate()
self.setItemDelegateForColumn(1, time_delegate)
remove_btn = IconButton(self)
remove_icon_path = resources.get_icon_path("delete")
loaded_remove_image = QtGui.QImage(remove_icon_path)
pix = paint_image_with_color(loaded_remove_image, QtCore.Qt.white)
icon = QtGui.QIcon(pix)
remove_btn.setIcon(icon)
model.rowsInserted.connect(self._on_rows_inserted)
remove_btn.clicked.connect(self._on_remove_clicked)
self.selectionModel().selectionChanged.connect(
self._on_selection_change
)
self._model = model
self._time_delegate = time_delegate
self._remove_btn = remove_btn
def _update_remove_btn(self):
viewport = self.viewport()
height = viewport.height() + self.header().height()
pos_x = viewport.width() - self._remove_btn.width() - 5
pos_y = height - self._remove_btn.height() - 5
self._remove_btn.move(max(0, pos_x), max(0, pos_y))
def _on_rows_inserted(self):
header = self.header()
header.resizeSections(header.ResizeToContents)
def resizeEvent(self, event):
super(LoadedFilesView, self).resizeEvent(event)
self._update_remove_btn()
def showEvent(self, event):
super(LoadedFilesView, self).showEvent(event)
self._update_remove_btn()
header = self.header()
header.resizeSections(header.ResizeToContents)
def _on_selection_change(self):
self.selection_changed.emit()
def add_filepaths(self, filepaths):
self._model.add_filepaths(filepaths)
self._fill_selection()
def remove_filepaths(self, filepaths):
self._model.remove_filepaths(filepaths)
self._fill_selection()
def _on_remove_clicked(self):
index = self.currentIndex()
filepath = index.data(FILEPATH_ROLE)
self.remove_filepaths(filepath)
def _fill_selection(self):
index = self.currentIndex()
if index.isValid():
return
index = self._model.index(0, 0)
if index.isValid():
self.setCurrentIndex(index)
def get_current_report(self):
index = self.currentIndex()
filepath = index.data(FILEPATH_ROLE)
return self._model.get_report_by_filepath(filepath)
class LoadedFilesWidget(QtWidgets.QWidget):
report_changed = QtCore.Signal()
def __init__(self, parent):
super(LoadedFilesWidget, self).__init__(parent)
self.setAcceptDrops(True)
view = LoadedFilesView(self)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(view, 1)
view.selection_changed.connect(self._on_report_change)
self._view = view
def dragEnterEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
def dragLeaveEvent(self, event):
event.accept()
def dropEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
filepaths = []
for url in mime_data.urls():
filepath = url.toLocalFile()
ext = os.path.splitext(filepath)[-1]
if os.path.exists(filepath) and ext == ".json":
filepaths.append(filepath)
self._add_filepaths(filepaths)
event.accept()
def _on_report_change(self):
self.report_changed.emit()
def _add_filepaths(self, filepaths):
self._view.add_filepaths(filepaths)
def get_current_report(self):
return self._view.get_current_report()
class PublishReportViewerWindow(QtWidgets.QWidget):
default_width = 1200
default_height = 600
def __init__(self, parent=None):
super(PublishReportViewerWindow, self).__init__(parent)
self.setWindowTitle("Publish report viewer")
icon = QtGui.QIcon(get_openpype_icon_filepath())
self.setWindowIcon(icon)
body = QtWidgets.QSplitter(self)
body.setContentsMargins(0, 0, 0, 0)
body.setSizePolicy(
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding
)
body.setOrientation(QtCore.Qt.Horizontal)
loaded_files_widget = LoadedFilesWidget(body)
main_widget = PublishReportViewerWidget(body)
body.addWidget(loaded_files_widget)
body.addWidget(main_widget)
body.setStretchFactor(0, 70)
body.setStretchFactor(1, 65)
layout = QtWidgets.QHBoxLayout(self)
layout.addWidget(body, 1)
loaded_files_widget.report_changed.connect(self._on_report_change)
self._loaded_files_widget = loaded_files_widget
self._main_widget = main_widget
self.resize(self.default_width, self.default_height)
self.setStyleSheet(style.load_stylesheet())
def _on_report_change(self):
report = self._loaded_files_widget.get_current_report()
self.set_report(report)
def set_report(self, report_data):
self._main_widget.set_report(report_data)
```
#### File: publisher/widgets/precreate_widget.py
```python
from Qt import QtWidgets, QtCore
from openpype.widgets.attribute_defs import create_widget_for_attr_def
class PreCreateWidget(QtWidgets.QWidget):
def __init__(self, parent):
super(PreCreateWidget, self).__init__(parent)
# Precreate attribute defininitions of Creator
scroll_area = QtWidgets.QScrollArea(self)
contet_widget = QtWidgets.QWidget(scroll_area)
scroll_area.setWidget(contet_widget)
scroll_area.setWidgetResizable(True)
attributes_widget = AttributesWidget(contet_widget)
contet_layout = QtWidgets.QVBoxLayout(contet_widget)
contet_layout.setContentsMargins(0, 0, 0, 0)
contet_layout.addWidget(attributes_widget, 0)
contet_layout.addStretch(1)
# Widget showed when there are no attribute definitions from creator
empty_widget = QtWidgets.QWidget(self)
empty_widget.setVisible(False)
# Label showed when creator is not selected
no_creator_label = QtWidgets.QLabel(
"Creator is not selected",
empty_widget
)
no_creator_label.setWordWrap(True)
# Creator does not have precreate attributes
empty_label = QtWidgets.QLabel(
"This creator has no configurable options",
empty_widget
)
empty_label.setWordWrap(True)
empty_label.setVisible(False)
empty_layout = QtWidgets.QVBoxLayout(empty_widget)
empty_layout.setContentsMargins(0, 0, 0, 0)
empty_layout.addWidget(empty_label, 0, QtCore.Qt.AlignCenter)
empty_layout.addWidget(no_creator_label, 0, QtCore.Qt.AlignCenter)
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.addWidget(scroll_area, 1)
main_layout.addWidget(empty_widget, 1)
self._scroll_area = scroll_area
self._empty_widget = empty_widget
self._empty_label = empty_label
self._no_creator_label = no_creator_label
self._attributes_widget = attributes_widget
def current_value(self):
return self._attributes_widget.current_value()
def set_plugin(self, creator):
attr_defs = []
creator_selected = False
if creator is not None:
creator_selected = True
attr_defs = creator.get_pre_create_attr_defs()
self._attributes_widget.set_attr_defs(attr_defs)
attr_defs_available = len(attr_defs) > 0
self._scroll_area.setVisible(attr_defs_available)
self._empty_widget.setVisible(not attr_defs_available)
self._empty_label.setVisible(creator_selected)
self._no_creator_label.setVisible(not creator_selected)
class AttributesWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(AttributesWidget, self).__init__(parent)
layout = QtWidgets.QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
self._layout = layout
self._widgets = []
def current_value(self):
output = {}
for widget in self._widgets:
attr_def = widget.attr_def
if attr_def.is_value_def:
output[attr_def.key] = widget.current_value()
return output
def clear_attr_defs(self):
while self._layout.count():
item = self._layout.takeAt(0)
widget = item.widget()
if widget:
widget.setVisible(False)
widget.deleteLater()
self._widgets = []
def set_attr_defs(self, attr_defs):
self.clear_attr_defs()
row = 0
for attr_def in attr_defs:
widget = create_widget_for_attr_def(attr_def, self)
expand_cols = 2
if attr_def.is_value_def and attr_def.is_label_horizontal:
expand_cols = 1
col_num = 2 - expand_cols
if attr_def.label:
label_widget = QtWidgets.QLabel(attr_def.label, self)
self._layout.addWidget(
label_widget, row, 0, 1, expand_cols
)
if not attr_def.is_label_horizontal:
row += 1
self._layout.addWidget(
widget, row, col_num, 1, expand_cols
)
self._widgets.append(widget)
row += 1
```
#### File: settings/local_settings/general_widget.py
```python
import getpass
from Qt import QtWidgets, QtCore
from openpype.lib import is_admin_password_required
from openpype.widgets import PasswordDialog
from openpype.tools.utils import PlaceholderLineEdit
class LocalGeneralWidgets(QtWidgets.QWidget):
def __init__(self, parent):
super(LocalGeneralWidgets, self).__init__(parent)
self._loading_local_settings = False
username_input = PlaceholderLineEdit(self)
username_input.setPlaceholderText(getpass.getuser())
is_admin_input = QtWidgets.QCheckBox(self)
layout = QtWidgets.QFormLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addRow("OpenPype Username", username_input)
layout.addRow("Admin permissions", is_admin_input)
is_admin_input.stateChanged.connect(self._on_admin_check_change)
self.username_input = username_input
self.is_admin_input = is_admin_input
def update_local_settings(self, value):
self._loading_local_settings = True
username = ""
is_admin = False
if value:
username = value.get("username", username)
is_admin = value.get("is_admin", is_admin)
self.username_input.setText(username)
if self.is_admin_input.isChecked() != is_admin:
# Use state as `stateChanged` is connected to callbacks
if is_admin:
state = QtCore.Qt.Checked
else:
state = QtCore.Qt.Unchecked
self.is_admin_input.setCheckState(state)
self._loading_local_settings = False
def _on_admin_check_change(self):
if self._loading_local_settings:
return
if not self.is_admin_input.isChecked():
return
if not is_admin_password_required():
return
dialog = PasswordDialog(self, False)
dialog.setModal(True)
dialog.exec_()
result = dialog.result()
if self.is_admin_input.isChecked() != result:
# Use state as `stateChanged` is connected to callbacks
if result:
state = QtCore.Qt.Checked
else:
state = QtCore.Qt.Unchecked
self.is_admin_input.setCheckState(state)
def settings_value(self):
# Add changed
# If these have changed then
output = {}
username = self.username_input.text()
if username:
output["username"] = username
is_admin = self.is_admin_input.isChecked()
if is_admin:
output["is_admin"] = is_admin
return output
```
#### File: settings/settings/list_strict_widget.py
```python
from Qt import QtWidgets, QtCore
from .widgets import (
GridLabelWidget,
SpacerWidget
)
from .base import BaseWidget
class ListStrictWidget(BaseWidget):
def create_ui(self):
self.setObjectName("ListStrictWidget")
self._child_style_state = ""
self.input_fields = []
content_layout = QtWidgets.QGridLayout(self)
content_layout.setContentsMargins(0, 0, 0, 0)
content_layout.setSpacing(3)
self.content_layout = content_layout
self.content_widget = self
any_children_has_label = False
for child_obj in self.entity.children:
if child_obj.label:
any_children_has_label = True
break
self._any_children_has_label = any_children_has_label
# Change column stretch factor for vertical alignment
if not self.entity.is_horizontal:
col_index = 2 if any_children_has_label else 1
content_layout.setColumnStretch(col_index, 1)
for child_obj in self.entity.children:
self.input_fields.append(
self.create_ui_for_entity(
self.category_widget, child_obj, self
)
)
if self.entity.is_horizontal:
col = self.content_layout.columnCount()
spacer = SpacerWidget(self)
self.content_layout.addWidget(spacer, 0, col, 2, 1)
self.content_layout.setColumnStretch(col, 1)
self.entity_widget.add_widget_to_layout(self, self.entity.label)
@property
def is_invalid(self):
return self._is_invalid or self._child_invalid
@property
def _child_invalid(self):
for input_field in self.input_fields:
if input_field.is_invalid:
return True
return False
def get_invalid(self):
invalid = []
for input_field in self.input_fields:
invalid.extend(input_field.get_invalid())
return invalid
def make_sure_is_visible(self, path, scroll_to):
if not path:
return False
entity_path = self.entity.path
if entity_path == path:
self.set_focus(scroll_to)
return True
if path.startswith(entity_path):
for input_field in self.input_fields:
if input_field.make_sure_is_visible(path, scroll_to):
return True
return False
def add_widget_to_layout(self, widget, label=None):
# Horizontally added children
if self.entity.is_horizontal:
self._add_child_horizontally(widget, label)
else:
self._add_child_vertically(widget, label)
self.updateGeometry()
def _add_child_horizontally(self, widget, label):
col = self.content_layout.columnCount()
# Expand to whole grid if all children are without label
if not self._any_children_has_label:
self.content_layout.addWidget(widget, 0, col, 1, 2)
else:
if label:
label_widget = GridLabelWidget(label, widget)
label_widget.input_field = widget
widget.label_widget = label_widget
self.content_layout.addWidget(label_widget, 0, col, 1, 1)
col += 1
self.content_layout.addWidget(widget, 0, col, 1, 1)
def _add_child_vertically(self, widget, label):
row = self.content_layout.rowCount()
if not self._any_children_has_label:
self.content_layout.addWidget(widget, row, 0, 1, 1)
spacer_widget = SpacerWidget(self)
self.content_layout.addWidget(spacer_widget, row, 1, 1, 1)
else:
if label:
label_widget = GridLabelWidget(label, widget)
label_widget.input_field = widget
widget.label_widget = label_widget
self.content_layout.addWidget(
label_widget, row, 0, 1, 1,
alignment=QtCore.Qt.AlignRight | QtCore.Qt.AlignTop
)
self.content_layout.addWidget(widget, row, 1, 1, 1)
spacer_widget = SpacerWidget(self)
self.content_layout.addWidget(spacer_widget, row, 2, 1, 1)
def hierarchical_style_update(self):
self.update_style()
for input_field in self.input_fields:
input_field.hierarchical_style_update()
def set_entity_value(self):
for input_field in self.input_fields:
input_field.set_entity_value()
def _on_entity_change(self):
pass
def update_style(self):
if not self.label_widget:
return
style_state = self.get_style_state(
self.is_invalid,
self.entity.has_unsaved_changes,
self.entity.has_project_override,
self.entity.has_studio_override
)
if self._style_state == style_state:
return
self.label_widget.setProperty("state", style_state)
self.label_widget.style().polish(self.label_widget)
self._style_state = style_state
```
#### File: settings/settings/wrapper_widgets.py
```python
from uuid import uuid4
from Qt import QtWidgets
from .widgets import (
ExpandingWidget,
GridLabelWidget
)
from openpype.tools.settings import CHILD_OFFSET
class WrapperWidget(QtWidgets.QWidget):
def __init__(self, schema_data, parent=None):
super(WrapperWidget, self).__init__(parent)
self.entity = None
self.id = uuid4()
self.schema_data = schema_data
self.input_fields = []
self.create_ui()
def make_sure_is_visible(self, *args, **kwargs):
changed = False
for input_field in self.input_fields:
if input_field.make_sure_is_visible(*args, **kwargs):
changed = True
break
return changed
def create_ui(self):
raise NotImplementedError(
"{} does not have implemented `create_ui`.".format(
self.__class__.__name__
)
)
def add_widget_to_layout(self, widget, label=None):
raise NotImplementedError(
"{} does not have implemented `add_widget_to_layout`.".format(
self.__class__.__name__
)
)
class FormWrapper(WrapperWidget):
def create_ui(self):
self.content_layout = QtWidgets.QFormLayout(self)
self.content_layout.setContentsMargins(0, 0, 0, 0)
def add_widget_to_layout(self, widget, label=None):
if isinstance(widget, WrapperWidget):
raise TypeError(
"FormWrapper can't have other wrappers as children."
)
self.input_fields.append(widget)
label_widget = GridLabelWidget(label, widget)
label_widget.input_field = widget
widget.label_widget = label_widget
self.content_layout.addRow(label_widget, widget)
class CollapsibleWrapper(WrapperWidget):
def create_ui(self):
self.collapsible = self.schema_data.get("collapsible", True)
self.collapsed = self.schema_data.get("collapsed", True)
content_widget = QtWidgets.QWidget(self)
content_widget.setObjectName("ContentWidget")
content_widget.setProperty("content_state", "")
content_layout = QtWidgets.QGridLayout(content_widget)
content_layout.setContentsMargins(CHILD_OFFSET, 5, 0, 0)
body_widget = ExpandingWidget(self.schema_data["label"], self)
body_widget.set_content_widget(content_widget)
label_widget = body_widget.label_widget
main_layout = QtWidgets.QHBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.setSpacing(0)
if not body_widget:
main_layout.addWidget(content_widget)
else:
main_layout.addWidget(body_widget)
self.label_widget = label_widget
self.body_widget = body_widget
self.content_layout = content_layout
if self.collapsible:
body_widget.toggle_content(self.collapsed)
else:
body_widget.hide_toolbox(hide_content=False)
def make_sure_is_visible(self, *args, **kwargs):
result = super(CollapsibleWrapper, self).make_sure_is_visible(
*args, **kwargs
)
if result:
self.body_widget.toggle_content(True)
return result
def add_widget_to_layout(self, widget, label=None):
self.input_fields.append(widget)
row = self.content_layout.rowCount()
if not label or isinstance(widget, WrapperWidget):
self.content_layout.addWidget(widget, row, 0, 1, 2)
else:
label_widget = GridLabelWidget(label, widget)
label_widget.input_field = widget
widget.label_widget = label_widget
self.content_layout.addWidget(label_widget, row, 0, 1, 1)
self.content_layout.addWidget(widget, row, 1, 1, 1)
```
#### File: standalonepublish/widgets/widget_asset.py
```python
import contextlib
from Qt import QtWidgets, QtCore
import qtawesome
from openpype.tools.utils import PlaceholderLineEdit
from openpype.style import get_default_tools_icon_color
from . import RecursiveSortFilterProxyModel, AssetModel
from . import TasksTemplateModel, DeselectableTreeView
from . import _iter_model_rows
@contextlib.contextmanager
def preserve_expanded_rows(tree_view,
column=0,
role=QtCore.Qt.DisplayRole):
"""Preserves expanded row in QTreeView by column's data role.
This function is created to maintain the expand vs collapse status of
the model items. When refresh is triggered the items which are expanded
will stay expanded and vice versa.
Arguments:
tree_view (QWidgets.QTreeView): the tree view which is
nested in the application
column (int): the column to retrieve the data from
role (int): the role which dictates what will be returned
Returns:
None
"""
model = tree_view.model()
expanded = set()
for index in _iter_model_rows(model,
column=column,
include_root=False):
if tree_view.isExpanded(index):
value = index.data(role)
expanded.add(value)
try:
yield
finally:
if not expanded:
return
for index in _iter_model_rows(model,
column=column,
include_root=False):
value = index.data(role)
state = value in expanded
if state:
tree_view.expand(index)
else:
tree_view.collapse(index)
@contextlib.contextmanager
def preserve_selection(tree_view,
column=0,
role=QtCore.Qt.DisplayRole,
current_index=True):
"""Preserves row selection in QTreeView by column's data role.
This function is created to maintain the selection status of
the model items. When refresh is triggered the items which are expanded
will stay expanded and vice versa.
tree_view (QWidgets.QTreeView): the tree view nested in the application
column (int): the column to retrieve the data from
role (int): the role which dictates what will be returned
Returns:
None
"""
model = tree_view.model()
selection_model = tree_view.selectionModel()
flags = selection_model.Select | selection_model.Rows
if current_index:
current_index_value = tree_view.currentIndex().data(role)
else:
current_index_value = None
selected_rows = selection_model.selectedRows()
if not selected_rows:
yield
return
selected = set(row.data(role) for row in selected_rows)
try:
yield
finally:
if not selected:
return
# Go through all indices, select the ones with similar data
for index in _iter_model_rows(model,
column=column,
include_root=False):
value = index.data(role)
state = value in selected
if state:
tree_view.scrollTo(index) # Ensure item is visible
selection_model.select(index, flags)
if current_index_value and value == current_index_value:
tree_view.setCurrentIndex(index)
class AssetWidget(QtWidgets.QWidget):
"""A Widget to display a tree of assets with filter
To list the assets of the active project:
>>> # widget = AssetWidget()
>>> # widget.refresh()
>>> # widget.show()
"""
project_changed = QtCore.Signal(str)
assets_refreshed = QtCore.Signal() # on model refresh
selection_changed = QtCore.Signal() # on view selection change
current_changed = QtCore.Signal() # on view current index change
task_changed = QtCore.Signal()
def __init__(self, dbcon, settings, parent=None):
super(AssetWidget, self).__init__(parent=parent)
self.setContentsMargins(0, 0, 0, 0)
self.dbcon = dbcon
self._settings = settings
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(4)
# Project
self.combo_projects = QtWidgets.QComboBox()
# Change delegate so stylysheets are applied
project_delegate = QtWidgets.QStyledItemDelegate(self.combo_projects)
self.combo_projects.setItemDelegate(project_delegate)
self._set_projects()
self.combo_projects.currentTextChanged.connect(self.on_project_change)
# Tree View
model = AssetModel(dbcon=self.dbcon, parent=self)
proxy = RecursiveSortFilterProxyModel()
proxy.setSourceModel(model)
proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
view = DeselectableTreeView()
view.setIndentation(15)
view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
view.setHeaderHidden(True)
view.setModel(proxy)
# Header
header = QtWidgets.QHBoxLayout()
icon = qtawesome.icon(
"fa.refresh", color=get_default_tools_icon_color()
)
refresh = QtWidgets.QPushButton(icon, "")
refresh.setToolTip("Refresh items")
filter = PlaceholderLineEdit()
filter.textChanged.connect(proxy.setFilterFixedString)
filter.setPlaceholderText("Filter assets..")
header.addWidget(filter)
header.addWidget(refresh)
# Layout
layout.addWidget(self.combo_projects)
layout.addLayout(header)
layout.addWidget(view)
# tasks
task_view = DeselectableTreeView()
task_view.setIndentation(0)
task_view.setHeaderHidden(True)
task_view.setVisible(False)
task_model = TasksTemplateModel()
task_view.setModel(task_model)
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(0, 0, 0, 0)
main_layout.setSpacing(4)
main_layout.addLayout(layout, 80)
main_layout.addWidget(task_view, 20)
# Signals/Slots
selection = view.selectionModel()
selection.selectionChanged.connect(self.selection_changed)
selection.currentChanged.connect(self.current_changed)
task_view.selectionModel().selectionChanged.connect(
self._on_task_change
)
refresh.clicked.connect(self.refresh)
self.selection_changed.connect(self._refresh_tasks)
self.project_delegate = project_delegate
self.task_view = task_view
self.task_model = task_model
self.refreshButton = refresh
self.model = model
self.proxy = proxy
self.view = view
def collect_data(self):
project = self.dbcon.find_one({'type': 'project'})
asset = self.get_active_asset()
try:
index = self.task_view.selectedIndexes()[0]
task = self.task_model.itemData(index)[0]
except Exception:
task = None
data = {
'project': project['name'],
'asset': asset['name'],
'silo': asset.get("silo"),
'parents': self.get_parents(asset),
'task': task
}
return data
def get_parents(self, entity):
ent_parents = entity.get("data", {}).get("parents")
if ent_parents is not None and isinstance(ent_parents, list):
return ent_parents
output = []
if entity.get('data', {}).get('visualParent', None) is None:
return output
parent = self.dbcon.find_one({'_id': entity['data']['visualParent']})
output.append(parent['name'])
output.extend(self.get_parents(parent))
return output
def _get_last_projects(self):
if not self._settings:
return []
project_names = []
for project_name in self._settings.value("projects", "").split("|"):
if project_name:
project_names.append(project_name)
return project_names
def _add_last_project(self, project_name):
if not self._settings:
return
last_projects = []
for _project_name in self._settings.value("projects", "").split("|"):
if _project_name:
last_projects.append(_project_name)
if project_name in last_projects:
last_projects.remove(project_name)
last_projects.insert(0, project_name)
while len(last_projects) > 5:
last_projects.pop(-1)
self._settings.setValue("projects", "|".join(last_projects))
def _set_projects(self):
project_names = list()
for doc in self.dbcon.projects(projection={"name": 1},
only_active=True):
project_name = doc.get("name")
if project_name:
project_names.append(project_name)
self.combo_projects.clear()
if not project_names:
return
sorted_project_names = list(sorted(project_names))
self.combo_projects.addItems(list(sorted(sorted_project_names)))
last_project = sorted_project_names[0]
for project_name in self._get_last_projects():
if project_name in sorted_project_names:
last_project = project_name
break
index = sorted_project_names.index(last_project)
self.combo_projects.setCurrentIndex(index)
self.dbcon.Session["AVALON_PROJECT"] = last_project
def on_project_change(self):
projects = list()
for project in self.dbcon.projects(projection={"name": 1},
only_active=True):
projects.append(project['name'])
project_name = self.combo_projects.currentText()
if project_name in projects:
self.dbcon.Session["AVALON_PROJECT"] = project_name
self._add_last_project(project_name)
self.project_changed.emit(project_name)
self.refresh()
def _refresh_model(self):
with preserve_expanded_rows(
self.view, column=0, role=self.model.ObjectIdRole
):
with preserve_selection(
self.view, column=0, role=self.model.ObjectIdRole
):
self.model.refresh()
self.assets_refreshed.emit()
def refresh(self):
self._refresh_model()
def _on_task_change(self):
try:
index = self.task_view.selectedIndexes()[0]
task_name = self.task_model.itemData(index)[0]
except Exception:
task_name = None
self.dbcon.Session["AVALON_TASK"] = task_name
self.task_changed.emit()
def _refresh_tasks(self):
self.dbcon.Session["AVALON_TASK"] = None
tasks = []
selected = self.get_selected_assets()
if len(selected) == 1:
asset = self.dbcon.find_one({
"_id": selected[0], "type": "asset"
})
if asset:
tasks = asset.get('data', {}).get('tasks', [])
self.task_model.set_tasks(tasks)
self.task_view.setVisible(len(tasks) > 0)
self.task_changed.emit()
def get_active_asset(self):
"""Return the asset id the current asset."""
current = self.view.currentIndex()
return current.data(self.model.ItemRole)
def get_active_index(self):
return self.view.currentIndex()
def get_selected_assets(self):
"""Return the assets' ids that are selected."""
selection = self.view.selectionModel()
rows = selection.selectedRows()
return [row.data(self.model.ObjectIdRole) for row in rows]
def select_assets(self, assets, expand=True, key="name"):
"""Select assets by name.
Args:
assets (list): List of asset names
expand (bool): Whether to also expand to the asset in the view
Returns:
None
"""
# TODO: Instead of individual selection optimize for many assets
if not isinstance(assets, (tuple, list)):
assets = [assets]
assert isinstance(
assets, (tuple, list)
), "Assets must be list or tuple"
# convert to list - tuple can't be modified
assets = list(assets)
# Clear selection
selection_model = self.view.selectionModel()
selection_model.clearSelection()
# Select
mode = selection_model.Select | selection_model.Rows
for index in lib.iter_model_rows(
self.proxy, column=0, include_root=False
):
# stop iteration if there are no assets to process
if not assets:
break
value = index.data(self.model.ItemRole).get(key)
if value not in assets:
continue
# Remove processed asset
assets.pop(assets.index(value))
selection_model.select(index, mode)
if expand:
# Expand parent index
self.view.expand(self.proxy.parent(index))
# Set the currently active index
self.view.setCurrentIndex(index)
```
#### File: standalonepublish/widgets/widget_components_list.py
```python
from Qt import QtWidgets
class ComponentsList(QtWidgets.QTableWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setObjectName("ComponentList")
self._main_column = 0
self.setColumnCount(1)
self.setSelectionBehavior(
QtWidgets.QAbstractItemView.SelectRows
)
self.setSelectionMode(
QtWidgets.QAbstractItemView.ExtendedSelection
)
self.setVerticalScrollMode(
QtWidgets.QAbstractItemView.ScrollPerPixel
)
self.verticalHeader().hide()
try:
self.verticalHeader().setResizeMode(
QtWidgets.QHeaderView.ResizeToContents
)
except Exception:
self.verticalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents
)
self.horizontalHeader().setStretchLastSection(True)
self.horizontalHeader().hide()
def count(self):
return self.rowCount()
def add_widget(self, widget, row=None):
if row is None:
row = self.count()
self.insertRow(row)
self.setCellWidget(row, self._main_column, widget)
self.resizeRowToContents(row)
return row
def remove_widget(self, row):
self.removeRow(row)
def move_widget(self, widget, newRow):
oldRow = self.indexOfWidget(widget)
if oldRow:
self.insertRow(newRow)
# Collect the oldRow after insert to make sure we move the correct
# widget.
oldRow = self.indexOfWidget(widget)
self.setCellWidget(newRow, self._main_column, widget)
self.resizeRowToContents(oldRow)
# Remove the old row
self.removeRow(oldRow)
def clear_widgets(self):
'''Remove all widgets.'''
self.clear()
self.setRowCount(0)
def widget_index(self, widget):
index = None
for row in range(self.count()):
candidateWidget = self.widget_at(row)
if candidateWidget == widget:
index = row
break
return index
def widgets(self):
widgets = []
for row in range(self.count()):
widget = self.widget_at(row)
widgets.append(widget)
return widgets
def widget_at(self, row):
return self.cellWidget(row, self._main_column)
```
#### File: common/scriptsmenu/launchformari.py
```python
from vendor.Qt import QtWidgets
# Import local modules
import scriptsmenu
def _mari_main_window():
"""Get Mari main window.
Returns:
MriMainWindow: Mari's main window.
"""
for obj in QtWidgets.QApplication.topLevelWidgets():
if obj.metaObject().className() == 'MriMainWindow':
return obj
raise RuntimeError('Could not find Mari MainWindow instance')
def _mari_main_menubar():
"""Get Mari main menu bar.
Returns:
Retrieve the main menubar of the Mari window.
"""
mari_window = _mari_main_window()
menubar = [
i for i in mari_window.children() if isinstance(i, QtWidgets.QMenuBar)
]
assert len(menubar) == 1, "Error, could not find menu bar!"
return menubar[0]
def main(title="Scripts"):
"""Build the main scripts menu in Mari.
Args:
title (str): Name of the menu in the application.
Returns:
scriptsmenu.ScriptsMenu: Instance object.
"""
mari_main_bar = _mari_main_menubar()
for mari_bar in mari_main_bar.children():
if isinstance(mari_bar, scriptsmenu.ScriptsMenu):
if mari_bar.title() == title:
menu = mari_bar
return menu
menu = scriptsmenu.ScriptsMenu(title=title, parent=mari_main_bar)
return menu
```
#### File: common/scriptsmenu/launchfornuke.py
```python
import scriptsmenu
from .vendor.Qt import QtWidgets
def _nuke_main_window():
"""Return Nuke's main window"""
for obj in QtWidgets.QApplication.topLevelWidgets():
if (obj.inherits('QMainWindow') and
obj.metaObject().className() == 'Foundry::UI::DockMainWindow'):
return obj
raise RuntimeError('Could not find Nuke MainWindow instance')
def _nuke_main_menubar():
"""Retrieve the main menubar of the Nuke window"""
nuke_window = _nuke_main_window()
menubar = [i for i in nuke_window.children()
if isinstance(i, QtWidgets.QMenuBar)]
assert len(menubar) == 1, "Error, could not find menu bar!"
return menubar[0]
def main(title="Scripts"):
# Register control + shift callback to add to shelf (Nuke behavior)
# modifiers = QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier
# menu.register_callback(modifiers, to_shelf)
nuke_main_bar = _nuke_main_menubar()
for nuke_bar in nuke_main_bar.children():
if isinstance(nuke_bar, scriptsmenu.ScriptsMenu):
if nuke_bar.title() == title:
menu = nuke_bar
return menu
menu = scriptsmenu.ScriptsMenu(title=title, parent=nuke_main_bar)
return menu
```
#### File: opentimelineio/plugins/python_plugin.py
```python
import os
import imp
from .. import (
core,
exceptions,
)
class PythonPlugin(core.SerializableObject):
"""A class of plugin that is encoded in a python module, exposed via a
manifest.
"""
_serializable_label = "PythonPlugin.1"
def __init__(
self,
name=None,
execution_scope=None,
filepath=None,
):
super(PythonPlugin, self).__init__()
self.name = name
self.execution_scope = execution_scope
self.filepath = filepath
self._json_path = None
self._module = None
name = core.serializable_field("name", doc="Adapter name.")
execution_scope = core.serializable_field(
"execution_scope",
str,
doc=(
"Describes whether this adapter is executed in the current python"
" process or in a subshell. Options are: "
"['in process', 'out of process']."
)
)
filepath = core.serializable_field(
"filepath",
str,
doc=(
"Absolute path or relative path to adapter module from location of"
" json."
)
)
def module_abs_path(self):
"""Return an absolute path to the module implementing this adapter."""
filepath = self.filepath
if not os.path.isabs(filepath):
if not self._json_path:
raise exceptions.MisconfiguredPluginError(
"{} plugin is misconfigured, missing json path. "
"plugin: {}".format(
self.name,
repr(self)
)
)
filepath = os.path.join(os.path.dirname(self._json_path), filepath)
return filepath
def _imported_module(self, namespace):
"""Load the module this plugin points at."""
pyname = os.path.splitext(os.path.basename(self.module_abs_path()))[0]
pydir = os.path.dirname(self.module_abs_path())
(file_obj, pathname, description) = imp.find_module(pyname, [pydir])
with file_obj:
# this will reload the module if it has already been loaded.
mod = imp.load_module(
"opentimelineio.{}.{}".format(namespace, self.name),
file_obj,
pathname,
description
)
return mod
def module(self):
"""Return the module object for this adapter. """
if not self._module:
self._module = self._imported_module("adapters")
return self._module
def _execute_function(self, func_name, **kwargs):
"""Execute func_name on this adapter with error checking."""
# collects the error handling into a common place.
if not hasattr(self.module(), func_name):
raise exceptions.AdapterDoesntSupportFunctionError(
"Sorry, {} doesn't support {}.".format(self.name, func_name)
)
return (getattr(self.module(), func_name)(**kwargs))
```
#### File: hosts/aftereffects/test_publish_in_aftereffects.py
```python
import logging
from tests.lib.assert_classes import DBAssert
from tests.integration.hosts.aftereffects.lib import AfterEffectsTestClass
log = logging.getLogger("test_publish_in_aftereffects")
class TestPublishInAfterEffects(AfterEffectsTestClass):
"""Basic test case for publishing in AfterEffects
Uses generic TestCase to prepare fixtures for test data, testing DBs,
env vars.
Opens AfterEffects, run publish on prepared workile.
Test zip file sets 3 required env vars:
- HEADLESS_PUBLISH - this triggers publish immediately app is open
- IS_TEST - this differentiate between regular webpublish
- PYBLISH_TARGETS
Then checks content of DB (if subset, version, representations were
created.
Checks tmp folder if all expected files were published.
"""
PERSIST = False
TEST_FILES = [
("1c8261CmHwyMgS-g7S4xL5epAp0jCBmhf",
"test_aftereffects_publish.zip",
"")
]
APP = "aftereffects"
APP_VARIANT = ""
APP_NAME = "{}/{}".format(APP, APP_VARIANT)
TIMEOUT = 120 # publish timeout
def test_db_asserts(self, dbcon, publish_finished):
"""Host and input data dependent expected results in DB."""
print("test_db_asserts")
failures = []
failures.append(DBAssert.count_of_types(dbcon, "version", 2))
failures.append(
DBAssert.count_of_types(dbcon, "version", 0, name={"$ne": 1}))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 1,
name="imageMainBackgroundcopy"))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 1,
name="workfileTest_task"))
failures.append(
DBAssert.count_of_types(dbcon, "subset", 1,
name="reviewTesttask"))
failures.append(
DBAssert.count_of_types(dbcon, "representation", 4))
additional_args = {"context.subset": "renderTestTaskDefault",
"context.ext": "png"}
failures.append(
DBAssert.count_of_types(dbcon, "representation", 1,
additional_args=additional_args))
assert not any(failures)
if __name__ == "__main__":
test_case = TestPublishInAfterEffects()
```
#### File: modules/sync_server/test_site_operations.py
```python
import pytest
from tests.lib.testing_classes import ModuleUnitTest
from bson.objectid import ObjectId
class TestSiteOperation(ModuleUnitTest):
REPRESENTATION_ID = "60e578d0c987036c6a7b741d"
TEST_FILES = [("1eCwPljuJeOI8A3aisfOIBKKjcmIycTEt",
"test_site_operations.zip", '')]
@pytest.fixture(scope="module")
def setup_sync_server_module(self, dbcon):
"""Get sync_server_module from ModulesManager"""
from openpype.modules import ModulesManager
manager = ModulesManager()
sync_server = manager.modules_by_name["sync_server"]
yield sync_server
@pytest.mark.usefixtures("dbcon")
def test_project_created(self, dbcon):
assert ['test_project'] == dbcon.database.collection_names(False)
@pytest.mark.usefixtures("dbcon")
def test_objects_imported(self, dbcon):
count_obj = len(list(dbcon.database[self.TEST_PROJECT_NAME].find({})))
assert 15 == count_obj
@pytest.mark.usefixtures("setup_sync_server_module")
def test_add_site(self, dbcon, setup_sync_server_module):
"""Adds 'test_site', checks that added,
checks that doesn't duplicate."""
query = {
"_id": ObjectId(self.REPRESENTATION_ID)
}
ret = dbcon.database[self.TEST_PROJECT_NAME].find(query)
assert 1 == len(list(ret)), \
"Single {} must be in DB".format(self.REPRESENTATION_ID)
setup_sync_server_module.add_site(self.TEST_PROJECT_NAME,
self.REPRESENTATION_ID,
site_name='test_site')
ret = list(dbcon.database[self.TEST_PROJECT_NAME].find(query))
assert 1 == len(ret), \
"Single {} must be in DB".format(self.REPRESENTATION_ID)
ret = ret.pop()
site_names = [site["name"] for site in ret["files"][0]["sites"]]
assert 'test_site' in site_names, "Site name wasn't added"
@pytest.mark.usefixtures("setup_sync_server_module")
def test_add_site_again(self, dbcon, setup_sync_server_module):
"""Depends on test_add_site, must throw exception."""
with pytest.raises(ValueError):
setup_sync_server_module.add_site(self.TEST_PROJECT_NAME,
self.REPRESENTATION_ID,
site_name='test_site')
@pytest.mark.usefixtures("setup_sync_server_module")
def test_add_site_again_force(self, dbcon, setup_sync_server_module):
"""Depends on test_add_site, must not throw exception."""
setup_sync_server_module.add_site(self.TEST_PROJECT_NAME,
self.REPRESENTATION_ID,
site_name='test_site', force=True)
query = {
"_id": ObjectId(self.REPRESENTATION_ID)
}
ret = list(dbcon.database[self.TEST_PROJECT_NAME].find(query))
assert 1 == len(ret), \
"Single {} must be in DB".format(self.REPRESENTATION_ID)
@pytest.mark.usefixtures("setup_sync_server_module")
def test_remove_site(self, dbcon, setup_sync_server_module):
"""Depends on test_add_site, must remove 'test_site'."""
setup_sync_server_module.remove_site(self.TEST_PROJECT_NAME,
self.REPRESENTATION_ID,
site_name='test_site')
query = {
"_id": ObjectId(self.REPRESENTATION_ID)
}
ret = list(dbcon.database[self.TEST_PROJECT_NAME].find(query))
assert 1 == len(ret), \
"Single {} must be in DB".format(self.REPRESENTATION_ID)
ret = ret.pop()
site_names = [site["name"] for site in ret["files"][0]["sites"]]
assert 'test_site' not in site_names, "Site name wasn't removed"
@pytest.mark.usefixtures("setup_sync_server_module")
def test_remove_site_again(self, dbcon, setup_sync_server_module):
"""Depends on test_add_site, must trow exception"""
with pytest.raises(ValueError):
setup_sync_server_module.remove_site(self.TEST_PROJECT_NAME,
self.REPRESENTATION_ID,
site_name='test_site')
query = {
"_id": ObjectId(self.REPRESENTATION_ID)
}
ret = list(dbcon.database[self.TEST_PROJECT_NAME].find(query))
assert 1 == len(ret), \
"Single {} must be in DB".format(self.REPRESENTATION_ID)
test_case = TestSiteOperation()
```
#### File: OpenPype/tools/build_dependencies.py
```python
import os
import sys
import site
from sysconfig import get_platform
import platform
import subprocess
from pathlib import Path
import shutil
import blessed
import enlighten
import time
term = blessed.Terminal()
manager = enlighten.get_manager()
def _print(msg: str, type: int = 0) -> None:
"""Print message to console.
Args:
msg (str): message to print
type (int): type of message (0 info, 1 error, 2 note)
"""
if type == 0:
header = term.aquamarine3(">>> ")
elif type == 1:
header = term.orangered2("!!! ")
elif type == 2:
header = term.tan1("... ")
else:
header = term.darkolivegreen3("--- ")
print("{}{}".format(header, msg))
def count_folders(path: Path) -> int:
"""Recursively count items inside given Path.
Args:
path (Path): Path to count.
Returns:
int: number of items.
"""
cnt = 0
for child in path.iterdir():
if child.is_dir():
cnt += 1
cnt += count_folders(child)
return cnt
_print("Starting dependency cleanup ...")
start_time = time.time_ns()
# path to venv site packages
sites = site.getsitepackages()
# WARNING: this assumes that all we've got is path to venv itself and
# another path ending with 'site-packages' as is default. But because
# this must run under different platform, we cannot easily check if this path
# is the one, because under Linux and macOS site-packages are in different
# location.
site_pkg = None
for s in sites:
site_pkg = Path(s)
if site_pkg.name == "site-packages":
break
_print("Getting venv site-packages ...")
assert site_pkg, "No venv site-packages are found."
_print(f"Working with: {site_pkg}", 2)
openpype_root = Path(os.path.dirname(__file__)).parent
# create full path
if platform.system().lower() == "darwin":
build_dir = openpype_root.joinpath(
"build",
"OpenPype.app",
"Contents",
"MacOS")
else:
build_subdir = "exe.{}-{}".format(get_platform(), sys.version[0:3])
build_dir = openpype_root / "build" / build_subdir
_print(f"Using build at {build_dir}", 2)
if not build_dir.exists():
_print("Build directory doesn't exist", 1)
_print("Probably freezing of code failed. Check ./build/build.log", 3)
sys.exit(1)
def _progress(_base, _names):
progress_bar.update()
return []
deps_dir = build_dir / "dependencies"
vendor_dir = build_dir / "vendor"
vendor_src = openpype_root / "vendor"
# copy vendor files
_print("Copying vendor files ...")
total_files = count_folders(vendor_src)
progress_bar = enlighten.Counter(
total=total_files, desc="Copying vendor files ...",
units="%", color=(64, 128, 222))
shutil.copytree(vendor_src.as_posix(),
vendor_dir.as_posix(),
ignore=_progress)
progress_bar.close()
# copy all files
_print("Copying dependencies ...")
total_files = count_folders(site_pkg)
progress_bar = enlighten.Counter(
total=total_files, desc="Processing Dependencies",
units="%", color=(53, 178, 202))
shutil.copytree(site_pkg.as_posix(),
deps_dir.as_posix(),
ignore=_progress)
progress_bar.close()
# iterate over frozen libs and create list to delete
libs_dir = build_dir / "lib"
# On Linux use rpath from source libraries in destination libraries
if platform.system().lower() == "linux":
src_pyside_dir = openpype_root / "vendor" / "python" / "PySide2"
dst_pyside_dir = build_dir / "vendor" / "python" / "PySide2"
src_rpath_per_so_file = {}
for filepath in src_pyside_dir.glob("*.so"):
filename = filepath.name
rpath = (
subprocess.check_output(["patchelf", "--print-rpath", filepath])
.decode("utf-8")
.strip()
)
src_rpath_per_so_file[filename] = rpath
for filepath in dst_pyside_dir.glob("*.so"):
filename = filepath.name
if filename not in src_rpath_per_so_file:
continue
src_rpath = src_rpath_per_so_file[filename]
subprocess.check_call(
["patchelf", "--set-rpath", src_rpath, filepath]
)
to_delete = []
# _print("Finding duplicates ...")
deps_items = list(deps_dir.iterdir())
item_count = len(list(libs_dir.iterdir()))
find_progress_bar = enlighten.Counter(
total=item_count, desc="Finding duplicates", units="%",
color=(56, 211, 159))
for d in libs_dir.iterdir():
if (deps_dir / d.name) in deps_items:
to_delete.append(d)
# _print(f"found {d}", 3)
find_progress_bar.update()
find_progress_bar.close()
# add openpype and igniter in libs too
to_delete.append(libs_dir / "openpype")
to_delete.append(libs_dir / "igniter")
to_delete.append(libs_dir / "openpype.pth")
to_delete.append(deps_dir / "openpype.pth")
# delete duplicates
# _print(f"Deleting {len(to_delete)} duplicates ...")
delete_progress_bar = enlighten.Counter(
total=len(to_delete), desc="Deleting duplicates", units="%",
color=(251, 192, 32))
for d in to_delete:
if d.is_dir():
shutil.rmtree(d)
else:
try:
d.unlink()
except FileNotFoundError:
# skip non-existent silently
pass
delete_progress_bar.update()
delete_progress_bar.close()
end_time = time.time_ns()
total_time = (end_time - start_time) / 1000000000
_print(f"Dependency cleanup done in {total_time} secs.")
```
#### File: OpenPype/tools/parse_pyproject.py
```python
import sys
import os
import toml
from pathlib import Path
import click
@click.command()
@click.argument("keys", nargs=-1, type=click.STRING)
def main(keys):
"""Get values from `pyproject.toml`.
You can specify dot separated keys from `pyproject.toml`
as arguments and this script will return them on separate
lines. If key doesn't exists, None is returned.
"""
openpype_root = Path(os.path.dirname(__file__)).parent
py_project = toml.load(openpype_root / "pyproject.toml")
for q in keys:
query = q.split(".")
data = py_project
for k in query:
if isinstance(data, list):
try:
data = data[int(k)]
except IndexError:
print("None")
sys.exit()
continue
if isinstance(data, dict):
data = data.get(k)
print(data)
if __name__ == "__main__":
main()
``` |
{
"source": "joncnet/empower-runtime",
"score": 2
} |
#### File: apps/mcast/mcast.py
```python
import sys
import empower.managers.apimanager.apimanager as apimanager
from empower.core.app import EApp
from empower.core.app import EVERY
from empower.core.etheraddress import EtherAddress
from empower.core.txpolicy import TxPolicy
from empower.core.txpolicy import TX_MCAST
from empower.core.txpolicy import TX_MCAST_DMS
from empower.core.txpolicy import TX_MCAST_LEGACY
from empower.core.resourcepool import BT_HT20
TX_MCAST_SDNPLAY = 0x3
TX_MCAST_SDNPLAY_H = "sdn@play"
# pylint: disable=W0223
class McastServicesHandler(apimanager.EmpowerAPIHandler):
"""Access applications' attributes."""
URLS = [r"/api/v1/projects/([a-zA-Z0-9-]*)/apps/([a-zA-Z0-9-]*)/"
"mcast_services/([a-zA-Z0-9:]*)/?",
r"/api/v1/projects/([a-zA-Z0-9-]*)/apps/([a-zA-Z0-9-]*)/"
"mcast_services/?"]
@apimanager.validate(min_args=2, max_args=3)
def get(self, *args, **kwargs):
"""Access the mcast_services .
Args:
[0]: the project id (mandatory)
[1]: the app id (mandatory)
[3]: the mcast service MAC address (optional)
Example URLs:
GET /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps/
mcast_services/01:00:5E:00:01:C8
{
addr: "01:00:5E:00:01:C8",
ipaddress: "192.168.3.11",
mcs: 0,
schedule: [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
receivers: [
"FF:FF:FF:FF:FF:FF"
],
status: "true",
service_type: "emergency"
}
"""
if len(args) == 2:
return self.service.mcast_services
return self.service.mcast_services[EtherAddress(args[2])]
@apimanager.validate(returncode=201, min_args=2, max_args=2)
def post(self, *args, **kwargs):
"""Add/update a new mcast service
Args:
[0]: the project id (mandatory)
[1]: the app id (mandatory)
Request:
version: protocol version (1.0)
ipaddress: the mcast IP address
receivers: the list of mcast receptors
status: the service status
service_type: a label describing the service
Example URLs:
POST /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps/
7069c865-8849-4840-9d96-e028663a5dcf/mcast_service
{
"ipaddress": "192.168.3.11",
"receivers": ["ff:ff:ff:ff:ff:ff"],
"status": "true",
"service_type": "emergency"
}
"""
addr = self.service.upsert_mcast_service(kwargs['ipaddress'],
kwargs['receivers'],
kwargs['status'],
kwargs['service_type'])
self.service.save_service_state()
url = "/api/v1/projects/%s/apps/%s/mcast_service/%s" % \
(self.service.context.project_id, self.service.service_id, addr)
self.set_header("Location", url)
@apimanager.validate(min_args=3, max_args=3)
def delete(self, *args, **kwargs):
"""Delete the mcast_services .
Args:
[0]: the project id (mandatory)
[1]: the app id (mandatory)
[3]: the mcast service MAC address (mandatory)
Example URLs:
DELETE /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps/
mcast_services/01:00:5E:00:01:C8
"""
self.service.delete_mcast_service(EtherAddress(args[2]))
self.service.save_service_state()
class Mcast(EApp):
"""SDN@Play Multicast Manager.
This app implements the SDN@Play [1] algorithm.
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "SDN@Play: Software-Defined
Multicasting in Enterprise WLANs", in IEEE Communications Magazine,
vol. 57, no. 7, pp. 85-91, July 2019.
Parameters:
service_id: the application id as an UUID (mandatory)
project_id: the project id as an UUID (mandatory)
every: the loop period in ms (optional, default 2000ms)
Example:
POST /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps
{
"name": "empower.apps.mcast.mcast",
"params": {
"every": 2000
}
}
"""
HANDLERS = [McastServicesHandler]
def __init__(self, context, service_id, every=EVERY):
super().__init__(context=context, service_id=service_id, every=every)
self.receptors = {}
self.receptors_mcses = {}
self.receptors_quality = {}
self.prob_threshold = 90.0
self.current = 0
self.dms = 1
self.legacy = 9
self.schedule = \
[TX_MCAST_DMS] * self.dms + \
[TX_MCAST_LEGACY] * self.legacy # --> [DMS, LEGACY, LEGACY...]
self._demo_mode = TX_MCAST_SDNPLAY_H
self._services_registered = 0
self.status = {}
self.storage['mcast_services'] = {}
def upsert_mcast_service(self, ipaddress, receivers, status, service_type):
"""Update/insert new mcast services.
Expected input:
{
"ip": "192.168.3.11",
"receivers": ["ff:ff:ff:ff:ff:ff"],
"status": True,
"type": "emergency"
}
"""
addr = self.mcast_ip_to_ether(ipaddress)
if addr not in self.mcast_services:
schedule = self.schedule[-self._services_registered:] + \
self.schedule[:-self._services_registered]
self.mcast_services[addr] = {
"addr": addr,
"ipaddress": ipaddress,
"mcs": 6,
"schedule": schedule,
"receivers": [EtherAddress(x) for x in receivers],
"status": status,
"service_type": service_type
}
self._services_registered += 1
else:
self.mcast_services[addr]["receivers"] = \
[EtherAddress(x) for x in receivers]
self.mcast_services[addr]["status"] = status
self.mcast_services[addr]["service_type"] = service_type
return addr
def delete_mcast_service(self, addr):
"""Delete an mcast service."""
if addr in self.mcast_services:
del self.mcast_services[addr]
@property
def mcast_services(self):
"""Get the list of active mcast services."""
return self.storage['mcast_services']
@mcast_services.setter
def mcast_services(self, services):
"""Set the list of mcast services.
Notice that this setter expects to receive the full list of services
which is then parsed and saved locally.
The following format is expected
{
"ff:ff:ff:ff:ff:ff": {
"ip": "192.168.3.11",
"receivers": ["ff:ff:ff:ff:ff:ff"],
"status": True,
"type": "emergency"
}
}
"""
self.storage['mcast_services'] = {}
for service in services.values():
self.upsert_mcast_service(service['ipaddress'],
service['receivers'],
service['status'],
service['service_type'])
@property
def demo_mode(self):
"""Get demo mode."""
return self._demo_mode
@demo_mode.setter
def demo_mode(self, mode):
"""Set the demo mode."""
self._demo_mode = mode
for addr, entry in self.mcast_services.items():
phase = self.get_next_group_phase(addr)
self.log.info("Mcast phase %s for group %s", TX_MCAST[phase], addr)
for block in self.blocks():
# fetch txp
txp = block.tx_policies[addr]
if mode == TX_MCAST[TX_MCAST_DMS]:
txp.mcast = TX_MCAST_DMS
elif mode == TX_MCAST[TX_MCAST_LEGACY]:
txp.mcast = TX_MCAST_LEGACY
if block.band == BT_HT20:
txp.ht_mcs = [min(block.ht_supports)]
else:
txp.mcs = [min(block.supports)]
if mode != TX_MCAST_SDNPLAY_H:
entry['mcs'] = "None"
def lvap_join(self, lvap):
"""Called when an LVAP joins a tenant."""
service = "empower.primitives.wifircstats.wifircstats"
self.receptors[lvap.addr] = \
self.register_service(service, sta=lvap.addr)
def lvap_leave(self, lvap):
"""Called when an LVAP leaves the network."""
if lvap.addr in self.receptors:
del self.receptors[lvap.addr]
if lvap.addr in self.receptors_mcses:
del self.receptors_mcses[lvap.addr]
if lvap.addr in self.receptors_quality:
del self.receptors_quality[lvap.addr]
def compute_receptors_mcs(self):
"""New stats available."""
for rcstats in self.receptors.values():
highest_prob = 0
sta = rcstats.sta
keys = [float(i) for i in rcstats.rates.keys()]
best_mcs = min(list(map(int, keys)))
self.receptors_mcses[sta] = []
for mcs, stats in rcstats.rates.items():
if stats["prob"] >= self.prob_threshold:
self.receptors_mcses[sta].append(int(float(mcs)))
elif stats["prob"] > highest_prob:
best_mcs = int(float(mcs))
highest_prob = stats["prob"]
if not self.receptors_mcses[sta]:
self.receptors_quality[sta] = False
self.receptors_mcses[sta].append(best_mcs)
else:
self.receptors_quality[sta] = True
def calculate_group_mcs(self, group_receivers):
"""Compute group MCS magic."""
self.compute_receptors_mcs()
if not self.receptors_mcses:
return 0
if False not in self.receptors_quality.values():
mcses = []
for lvap, rates in self.receptors_mcses.items():
if lvap in group_receivers:
mcses.append(rates)
if mcses:
mcs_intersection = list(set.intersection(*map(set, mcses)))
if mcs_intersection:
mcs = max(mcs_intersection)
return mcs
mcs = sys.maxsize
for lvap, rates in self.receptors_mcses.items():
if lvap in group_receivers:
mcs = min(max(rates), mcs)
if mcs == sys.maxsize:
mcs = 0
return mcs
def get_next_group_phase(self, mcast_addr):
"""Get next mcast phase to be scheduled."""
self.mcast_services[mcast_addr]["schedule"] = \
self.mcast_services[mcast_addr]["schedule"][1:] + \
[self.mcast_services[mcast_addr]["schedule"][0]]
phase = self.mcast_services[mcast_addr]["schedule"][0]
return phase
@classmethod
def mcast_ip_to_ether(cls, ip_mcast_addr):
"""Transform an IP multicast address into an Ethernet one."""
if ip_mcast_addr is None:
return '\x00' * 6
# The first 24 bits are fixed according to class D IP
# and IP multicast address convenctions
mcast_base = '01:00:5e'
# The 23 low order bits are mapped.
ip_addr_bytes = str(ip_mcast_addr).split('.')
# The first IP byte is not use,
# and only the last 7 bits of the second byte are used.
second_byte = int(ip_addr_bytes[1]) & 127
third_byte = int(ip_addr_bytes[2])
fourth_byte = int(ip_addr_bytes[3])
mcast_upper = format(second_byte, '02x') + ':' + \
format(third_byte, '02x') + ':' + \
format(fourth_byte, '02x')
return EtherAddress(mcast_base + ':' + mcast_upper)
def loop(self):
""" Periodic job. """
# if the demo is now in DMS it should not calculate anything
if self.demo_mode == TX_MCAST[TX_MCAST_DMS] or \
self.demo_mode == TX_MCAST[TX_MCAST_LEGACY]:
return
for block in self.blocks():
for addr, entry in self.mcast_services.items():
phase = self.get_next_group_phase(addr)
self.log.info("Mcast phase %s for group %s",
TX_MCAST[phase], addr)
# fetch txp
if addr not in block.tx_policies:
block.tx_policies[addr] = TxPolicy(addr, block)
txp = block.tx_policies[addr]
# If the service is disabled, DMS must be the multicast mode
if entry["status"] is False:
txp.mcast = TX_MCAST_DMS
continue
if phase == TX_MCAST_DMS:
txp.mcast = TX_MCAST_DMS
else:
# compute MCS
temp_mcs = self.calculate_group_mcs(entry["receivers"])
if block.band == BT_HT20:
mcs = max(temp_mcs, min(block.ht_supports))
else:
mcs = max(temp_mcs, min(block.supports))
entry['mcs'] = mcs
txp.mcast = TX_MCAST_LEGACY
if block.band == BT_HT20:
txp.ht_mcs = [mcs]
else:
txp.mcs = [mcs]
# assign MCS
self.log.info("Block %s setting mcast %s to %s MCS %d",
block, addr, TX_MCAST[TX_MCAST_DMS], mcs)
def to_dict(self):
""" Return a JSON-serializable."""
out = super().to_dict()
out['demo_mode'] = self.demo_mode
out['status'] = self.status
out['schedule'] = [TX_MCAST[x] for x in self.schedule]
out['mcast_services'] = self.mcast_services
return out
def launch(context, service_id, every=EVERY):
""" Initialize the module. """
return Mcast(context=context, service_id=service_id, every=every)
```
#### File: empower/core/cellpool.py
```python
class CellPool(list):
"""Cell pool.
Extends the list in order to add a few filtering and sorting methods
"""
def sort_by_rsrp(self, ue_id):
"""Return list sorted by rsrp for the specified address."""
filtered = [x for x in self if ue_id in x.ue_measurements]
cells = sorted(filtered,
key=lambda x: x.ue_measurements[ue_id]['rsrp'],
reverse=True)
return CellPool(cells)
def sort_by_rsrq(self, ue_id):
"""Return list sorted by rsrq for the specified address."""
filtered = [x for x in self if ue_id in x.ue_measurements]
cells = sorted(filtered,
key=lambda x: x.ue_measurements[ue_id]['rsrq'],
reverse=True)
return CellPool(cells)
def first(self):
"""Return first entry in the list."""
if self:
cell = list.__getitem__(self, 0)
return cell
return None
def last(self):
"""Return last entry in the list."""
if self:
cell = list.__getitem__(self, -1)
return cell
return None
class Cell:
"""An LTE eNB cell.
Attributes:
vbs: The VBS at which this cell is available
pci: the physical cell id
dl_earfcn: downlink center frequency
dl_bandwidth: downlink bandwidth
ul_earfcn: uplink center frequency
ul_bandwidth: uplink bandwidth
ue_measurements: UE measurements (RSRP/RSRQ)
cell_measurements: cell measurements
"""
def __init__(self, vbs, pci):
self.vbs = vbs
self.pci = pci
self._dl_earfcn = None
self._dl_bandwidth = None
self._ul_earfcn = None
self._ul_bandwidth = None
self.ue_measurements = {}
self.cell_measurements = {}
@property
def dl_earfcn(self):
"""Get the dl_earfcn."""
return self._dl_earfcn
@dl_earfcn.setter
def dl_earfcn(self, dl_earfcn):
""" Set the dl_earfcn. """
self._dl_earfcn = dl_earfcn
@property
def dl_bandwidth(self):
"""Get the dl_bandwidth."""
return self._dl_bandwidth
@dl_bandwidth.setter
def dl_bandwidth(self, dl_bandwidth):
""" Set the dl_bandwidth. """
self._dl_bandwidth = dl_bandwidth
@property
def ul_earfcn(self):
"""Get the ul_earfcn."""
return self._ul_earfcn
@ul_earfcn.setter
def ul_earfcn(self, ul_earfcn):
""" Set the ul_earfcn. """
self._ul_earfcn = ul_earfcn
@property
def ul_bandwidth(self):
"""Get the ul_bandwidth."""
return self._ul_bandwidth
@ul_bandwidth.setter
def ul_bandwidth(self, ul_bandwidth):
""" Set the ul_bandwidth. """
self._ul_bandwidth = ul_bandwidth
def to_str(self):
"""Return an ASCII representation of the object."""
return "vbs %s pci %u dl_earfcn %u dl_earfcn %u" % \
(self.vbs.addr, self.pci, self.dl_earfcn, self.ul_earfcn)
def __str__(self):
return self.to_str()
def __repr__(self):
return self.__class__.__name__ + "('" + self.to_str() + "')"
def __hash__(self):
return hash(self.vbs) + hash(self.pci)
def __eq__(self, other):
if isinstance(other, Cell):
return self.vbs == other.vbs and self.pci == other.pci
return False
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
"""Return JSON-serializable representation of the object."""
out = {}
out['addr'] = self.vbs.addr
out['pci'] = self.pci
out['dl_earfcn'] = self.dl_earfcn
out['dl_bandwidth'] = self.dl_bandwidth
out['ul_earfcn'] = self.ul_earfcn
out['ul_bandwidth'] = self.ul_bandwidth
out['cell_measurements'] = self.cell_measurements
out['ue_measurements'] = self.ue_measurements
return out
```
#### File: empower/core/device.py
```python
import logging
from datetime import datetime
from pymodm import MongoModel, fields
from empower.core.etheraddress import EtherAddressField
P_STATE_DISCONNECTED = "disconnected"
P_STATE_CONNECTED = "connected"
P_STATE_ONLINE = "online"
class Device(MongoModel):
"""Base device class.
The Device State machine is the following:
disconnected <-> connected -> online
online -> disconnected
Attributes:
addr: This Device MAC address (EtherAddress)
desc: A human-radable description of this Device (str)
connection: Signalling channel connection
last_seen: Sequence number of the last hello message received (int)
last_seen_ts: Timestamp of the last hello message received (int)
period: update period (in ms)
state: this device status
log: logging facility
"""
addr = EtherAddressField(primary_key=True)
desc = fields.CharField(required=True)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.__connection = None
self.last_seen = 0
self.last_seen_ts = 0
self.period = 0
self.__state = P_STATE_DISCONNECTED
self.log = logging.getLogger("%s" % self.__class__.__module__)
@property
def state(self):
"""Return the state."""
return self.__state
@state.setter
def state(self, state):
"""Set the Device state."""
self.log.info("Device %s mode %s->%s", self.addr, self.state, state)
method = "_%s_%s" % (self.state, state)
if hasattr(self, method):
callback = getattr(self, method)
callback()
return
raise IOError("Invalid transistion %s -> %s" % (self.state, state))
def set_connected(self):
"""Move to connected state."""
self.state = P_STATE_CONNECTED
def is_connected(self):
"""Return true if the device is connected"""
return self.state == P_STATE_CONNECTED or self.is_online()
def set_disconnected(self):
"""Move to connected state."""
self.state = P_STATE_DISCONNECTED
def set_online(self):
"""Move to connected state."""
self.state = P_STATE_ONLINE
def is_online(self):
"""Return true if the device is online"""
return self.state == P_STATE_ONLINE
def _online_online(self):
# null transition
pass
def _disconnected_connected(self):
# set new state
self.__state = P_STATE_CONNECTED
def _connected_disconnected(self):
# set new state
self.__state = P_STATE_DISCONNECTED
def _online_disconnected(self):
# generate bye message
self.__connection.send_device_down_message_to_self()
# set new state
self.__state = P_STATE_DISCONNECTED
def _connected_online(self):
# set new state
self.__state = P_STATE_ONLINE
# generate register message
self.__connection.send_device_up_message_to_self()
@property
def connection(self):
"""Get the connection assigned to this Device."""
return self.__connection
@connection.setter
def connection(self, connection):
"""Set the connection assigned to this Device."""
self.__connection = connection
def to_dict(self):
"""Return JSON-serializable representation of the object."""
date = datetime.fromtimestamp(self.last_seen_ts) \
.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
out = {
'addr': self.addr,
'desc': self.desc,
'last_seen': self.last_seen,
'last_seen_ts': date,
'period': self.period,
'state': self.state
}
out['connection'] = \
self.connection.to_dict() if self.connection else None
return out
def to_str(self):
"""Return an ASCII representation of the object."""
if self.connection:
return "%s at %s last_seen %d" % (self.addr,
self.connection.to_str(),
self.last_seen)
return "%s" % self.addr
def __str__(self):
return self.to_str()
def __hash__(self):
return hash(self.addr)
def __eq__(self, other):
if isinstance(other, Device):
return self.addr == other.addr
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return self.__class__.__name__ + "('" + self.to_str() + "')"
```
#### File: empower/unittest/lteslices.py
```python
import json
import unittest
import requests
from .common import BaseTest
from .common import URL
class TestLTESlices(BaseTest):
"""LTE slices unit tests."""
def test_create_new_lte_slice(self):
"""test_create_new_lte_slice."""
data = {
"owner": "foo",
"desc": "Test project",
"lte_props": {
"plmnid": {
"mcc": "001",
"mnc": "01"
}
},
"lte_slices": [
{
"slice_id": 82,
"properties": {
"rbgs": 5,
"ue_scheduler": 0
},
"devices": {
"11:22:33:44:55:66": {
"rbgs": 3,
"ue_scheduler": 0
}
}
}
]
}
params = \
("root", "root", "/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.post(params, data, 201)
self.get(("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26"), 200)
self.get(("foo", "foo",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26"), 200)
self.get(("bar", "bar",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26"), 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/wifi_slices")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/wifi_slices/0")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/0")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/wifi_slices/82")
self.get(params, 404)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.delete(params, 204)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.get(params, 404)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.delete(params, 204)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.get(params, 404)
def test_create_new_lte_slice_after_prj(self):
"""test_create_new_lte_slice_after_prj."""
data = {
"owner": "foo",
"desc": "Test project",
"lte_props": {
"plmnid": {
"mcc": "001",
"mnc": "01"
}
}
}
params = \
("root", "root", "/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.post(params, data, 201)
self.get(("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26"), 200)
self.get(("foo", "foo",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26"), 200)
self.get(("bar", "bar",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26"), 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/wifi_slices")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/wifi_slices/0")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/0")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/wifi_slices/82")
self.get(params, 404)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.get(params, 404)
data = {
"slice_id": 82,
"properties": {
"rbgs": 5,
"ue_scheduler": 0
},
"devices": {
"11:22:33:44:55:66": {
"rbgs": 3,
"ue_scheduler": 0
}
}
}
params = ("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices")
self.post(params, data, 201)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/wifi_slices/82")
self.get(params, 404)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.delete(params, 204)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.get(params, 404)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.delete(params, 204)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.get(params, 404)
def test_update_lte_slice(self):
"""test_update_lte_slice."""
data = {
"owner": "foo",
"desc": "Test project",
"lte_props": {
"plmnid": {
"mcc": "001",
"mnc": "01"
}
},
"lte_slices": [
{
"slice_id": 82,
"properties": {
"rbgs": 5,
"ue_scheduler": 0
},
"devices": {
"11:22:33:44:55:66": {
"rbgs": 3,
"ue_scheduler": 0
}
}
}
]
}
params = \
("root", "root", "/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.post(params, data, 201)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.get(params, 200)
data = {
"properties": {
"rbgs": 4,
"ue_scheduler": 0
}
}
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.put(params, data, 204)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
req = requests.get(url=URL % params)
slc = json.loads(req.text)
self.assertEqual(slc['properties']['rbgs'], 4)
data = {
"properties": {
"rbgs": 4,
"ue_scheduler": 0
},
"devices": {
"aa:bb:cc:dd:ee:ff": {
"rbgs": 2,
"ue_scheduler": 0
}
}
}
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.put(params, data, 204)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
req = requests.get(url=URL % params)
slc = json.loads(req.text)
self.assertEqual(slc['devices']['AA:BB:CC:DD:EE:FF']['rbgs'], 2)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.delete(params, 204)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/82")
self.get(params, 404)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.delete(params, 204)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.get(params, 404)
def test_delete_default_lte_slice(self):
"""test_delete_default_lte_slice."""
data = {
"owner": "foo",
"desc": "Test project",
"lte_props": {
"plmnid": {
"mcc": "001",
"mnc": "01"
}
}
}
params = \
("root", "root", "/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.post(params, data, 201)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/0")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/0")
self.delete(params, 400)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/lte_slices/0")
self.get(params, 200)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.delete(params, 204)
params = \
("root", "root",
"/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26")
self.get(params, 404)
if __name__ == '__main__':
unittest.main()
```
#### File: empower/unittest/wtps.py
```python
import json
import unittest
import requests
from .common import BaseTest
from .common import URL
class TestWTPs(BaseTest):
"""WTP unit tests."""
def test_create_new_device_empty_body(self):
"""test_create_new_device_empty_body."""
data = {}
params = ("root", "root", "/wtps/00:15:6d:84:13:0f")
self.post(params, data, 400)
def test_create_new_device_wrong_address(self):
"""test_create_new_device_wrong_address."""
data = {
"addr": "AA:15:6d:84:13"
}
params = ("root", "root", "/wtps/00:15:6d:84:13:0f")
self.post(params, data, 400)
def test_create_new_device(self):
"""test_create_new_device."""
data = {
"addr": "AA:15:6d:84:13:0f"
}
params = ("root", "root", "/wtps")
self.post(params, data, 201)
self.get(("root", "root", "/wtps"), 200)
self.get(("root", "root", "/wtps/AA:15:6d:84:13:0f"), 200)
self.get(("root", "root", "/wtps/11:22:33:44:55:66"), 404)
self.get(("root", "root", "/wtps/AA:15:6d:84:13"), 400)
params = ("root", "root", "/wtps/AA:15:6d:84:13:0f")
req = requests.get(url=URL % params)
device = json.loads(req.text)
self.assertEqual(device['desc'], "Generic device")
self.delete(("root", "root", "/wtps/AA:15:6d:84:13:0f"), 204)
self.get(("root", "root", "/wtps/AA:15:6d:84:13:0f"), 404)
def test_create_new_device_custom_desc(self):
"""test_create_new_device_custom_desc."""
data = {
"addr": "AA:15:6d:84:13:0f",
"desc": "Custom description"
}
params = ("root", "root", "/wtps")
self.post(params, data, 201)
self.get(("root", "root", "/wtps"), 200)
self.get(("root", "root", "/wtps/AA:15:6d:84:13:0f"), 200)
self.get(("root", "root", "/wtps/11:22:33:44:55:66"), 404)
self.get(("root", "root", "/wtps/AA:15:6d:84:13"), 400)
params = ("root", "root", "/wtps/AA:15:6d:84:13:0f")
req = requests.get(url=URL % params)
device = json.loads(req.text)
self.assertEqual(device['desc'], "Custom description")
self.delete(("root", "root", "/wtps/AA:15:6d:84:13:0f"), 204)
self.get(("root", "root", "/wtps/AA:15:6d:84:13:0f"), 404)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jonco3/dynamic",
"score": 3
} |
#### File: test/tests/augassign.py
```python
input = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
expected = [11, 8, 30, 2.5, 2, 4, 10000000, 20, 2, 2, 14, 15]
def test(v):
v[0] += 1
v[1] -= 2
v[2] *= 3
v[3] /= 4
v[4] //= 5
v[5] %= 6
v[6] **= 7
v[7] <<= 1
v[8] >>= 2
v[9] &= 3
v[10] ^= 4
v[11] |= 5
return v
assert test(list(input)) == expected
class Wrapped:
def __init__(self, initial):
self.value = initial
def __eq__(self, other):
return self.value == other
def __iadd__(self, other):
self.value += other
return self
def __isub__(self, other):
self.value -= other
return self
def __imul__(self, other):
self.value *= other
return self
def __itruediv__(self, other):
self.value /= other
return self
def __ifloordiv__(self, other):
self.value //= other
return self
def __imod__(self, other):
self.value %= other
return self
def __ipow__(self, other):
self.value **= other
return self
def __ilshift__(self, other):
self.value <<= other
return self
def __irshift__(self, other):
self.value >>= other
return self
def __ior__(self, other):
self.value |= other
return self
def __iand__(self, other):
self.value &= other
return self
def __ixor__(self, other):
self.value ^= other
return self
assert test(list(map(Wrapped, input))) == expected
class Wrapped2:
def __init__(self, initial):
self.value = initial
def __add__(self, other):
return Wrapped(self.value + other)
def __sub__(self, other):
return Wrapped(self.value - other)
def __mul__(self, other):
return Wrapped(self.value * other)
def __truediv__(self, other):
return Wrapped(self.value / other)
def __floordiv__(self, other):
return Wrapped(self.value // other)
def __mod__(self, other):
return Wrapped(self.value % other)
def __pow__(self, other):
return Wrapped(self.value ** other)
def __lshift__(self, other):
return Wrapped(self.value << other)
def __rshift__(self, other):
return Wrapped(self.value >> other)
def __or__(self, other):
return Wrapped(self.value | other)
def __and__(self, other):
return Wrapped(self.value & other)
def __xor__(self, other):
return Wrapped(self.value ^ other)
assert test(list(map(Wrapped2, input))) == expected
class C:
def __init__(self, value):
self.value = value
o = C(1)
def incValue(self, other):
self.value += other
return self
o.__iadd__ = incValue
threw = False
try:
o += 1
except TypeError as e:
if "unsupported operand type" in str(e):
threw = True
assert threw
C.__iadd__ = incValue
o += 1
assert o.value == 2
class NonDataDescriptor:
def __get__(self, instance, owner):
def f(other):
o.value -= other
return o
return f
C.__iadd__ = NonDataDescriptor()
o += 1
assert o.value == 1
class D:
def __init__(self, initial):
self.value = initial
def __iadd__(self, other):
self.value += other
return self
def __add__(self, other):
return F(self.value - other)
class E:
def __init__(self, initial):
self.value = initial
def __iadd__(self, other):
self.value += other
return self
def __add__(self, other):
return NotImplemented
class F:
def __init__(self, initial):
self.value = initial
def __iadd__(self, other):
return NotImplemented
def __add__(self, other):
return F(self.value - other)
class G:
def __init__(self, initial):
self.value = initial
def __iadd__(self, other):
return NotImplemented
def __add__(self, other):
return NotImplemented
d = D(0); d += 1; assert d.value == 1
e = E(0); e += 1; assert e.value == 1
f = F(0); f += 1; assert f.value == -1
g = G(0);
threw = False
try:
g += 1
except TypeError:
threw = True
assert threw
assert g.value == 0
class H:
def __init__(self, initial):
self.value = initial
def __radd__(self, other):
return H(self.value + other)
h = 0; h += H(1); assert h.value == 1
# Test builtin stub reverses its arguments when required
def opt(a, b):
a += b
return a
assert opt(1, 1.5) == 2.5
assert opt(1, 1.5) == 2.5
print('ok')
```
#### File: test/tests/binaryop.py
```python
a, b, c, d, e, f = 1000, 1000, 1000, 1000, 1000, 1000
g, h, i, j, k, l = 2, 1000, 1000, 1000, 1000, 1000
a = a + 1
b = b - 2
c = c * 3
d = d / 4
e = e // 5
f = f % 6
g = g ** 7
h = h << 8
i = i >> 9
j = j & 10
k = k ^ 11
l = l | 12
assert a == 1001
assert b == 998
assert c == 3000
assert d == 250
assert e == 200
assert f == 4
assert g == 128
assert h == 256000
assert i == 1
assert j == 8
assert k == 995
assert l == 1004
class Wrapped:
def __init__(self, initial):
self.value = initial
def __add__(self, other):
return Wrapped(self.value + other)
def __sub__(self, other):
return Wrapped(self.value - other)
def __mul__(self, other):
return Wrapped(self.value * other)
def __truediv__(self, other):
return Wrapped(self.value / other)
def __floordiv__(self, other):
return Wrapped(self.value // other)
def __mod__(self, other):
return Wrapped(self.value % other)
def __pow__(self, other):
return Wrapped(self.value ** other)
def __lshift__(self, other):
return Wrapped(self.value << other)
def __rshift__(self, other):
return Wrapped(self.value >> other)
def __or__(self, other):
return Wrapped(self.value | other)
def __and__(self, other):
return Wrapped(self.value & other)
def __xor__(self, other):
return Wrapped(self.value ^ other)
def __radd__(self, other):
return other + self.value
def __rsub__(self, other):
return other - self.value
def __rmul__(self, other):
return other * self.value
def __rtruediv__(self, other):
return other / self.value
def __rfloordiv__(self, other):
return other // self.value
def __rmod__(self, other):
return other % self.value
def __rpow__(self, other):
return other ** self.value
def __rlshift__(self, other):
return other << self.value
def __rrshift__(self, other):
return other >> self.value
def __ror__(self, other):
return other | self.value
def __rand__(self, other):
return other & self.value
def __rxor__(self, other):
return other ^ self.value
a, b, c, d, e, f = Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000)
g, h, i, j, k, l = Wrapped(2), Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000), Wrapped(1000)
a = a + 1
b = b - 2
c = c * 3
d = d / 4
e = e // 5
f = f % 6
g = g ** 7
h = h << 8
i = i >> 9
j = j & 10
k = k ^ 11
l = l | 12
assert a.value == 1001
assert b.value == 998
assert c.value == 3000
assert d.value == 250
assert e.value == 200
assert f.value == 4
assert g.value == 128
assert h.value == 256000
assert i.value == 1
assert j.value == 8
assert k.value == 995
assert l.value == 1004
a, b, c, d, e, f = 1000, 1000, 1000, 1000, 1000, 1000
g, h, i, j, k, l = 2, 1000, 1000, 1000, 1000, 1000
a = a + Wrapped(1)
b = b - Wrapped(2)
c = c * Wrapped(3)
d = d / Wrapped(4)
e = e // Wrapped(5)
f = f % Wrapped(6)
g = g ** Wrapped(7)
h = h << Wrapped(8)
i = i >> Wrapped(9)
j = j & Wrapped(10)
k = k ^ Wrapped(11)
l = l | Wrapped(12)
assert a == 1001
assert b == 998
assert c == 3000
assert d == 250
assert e == 200
assert f == 4
assert g == 128
assert h == 256000
assert i == 1
assert j == 8
assert k == 995
assert l == 1004
print('ok')
```
#### File: test/tests/builtins.py
```python
assert(max(1, 2, 3) == 3)
assert(max(1, 3, 2) == 3)
assert(max(3, 2, 1) == 3)
assert(min([1]) == 1)
assert(min([1, 2, 3]) == 1)
assert(min([1, 3, 2]) == 1)
assert(min([3, 2, 1]) == 1)
exception = False
try:
min()
except TypeError:
exception = True
assert(exception)
exception = False
try:
max()
except TypeError:
exception = True
assert(exception)
def testIter(iterable):
i = iter(iterable)
assert(next(i) == 1)
assert(next(i) == 2)
assert(next(i) == 3)
caught = False
try:
next(i)
except StopIteration:
caught = True
assert(caught)
class OwnSequence:
def __init__(self, wrapped):
self.wrapped = wrapped
def __getitem__(self, index):
return self.wrapped[index]
testIter([1, 2, 3])
testIter(OwnSequence([1, 2, 3]))
def inc(x):
return x + 1
assert(list(map(inc, [])) == [])
assert(list(map(inc, [1, 2, 3])) == [2, 3, 4])
assert(list(zip()) == [])
assert(list(zip([1], [2])) == [(1, 2)])
assert(list(zip([1, 2], [2, 3])) == [(1, 2), (2, 3)])
assert(sum([]) == 0)
assert(sum([1, 2, 3]) == 6)
assert(sum((1, 2, 3), 4) == 10)
# todo: these tests are probably not sufficient
assert(divmod(5, 2) == (2, 1))
assert(divmod(4.5, 1.5) == (3.0, 0.0))
assert(divmod(5, 1.5) == (3.0, 0.5))
assert(divmod(1, -1) == (-1, 0))
assert(divmod(-1, 1) == (-1, 0))
# Builtin classes are not modifiable
builtinClasses = [ bool, dict, tuple, list, int, float, object ]
def checkException(thunk, exceptionType, message):
try:
thunk()
except Exception as e:
if isinstance(e, exceptionType) and message in str(e):
return True
return False
def addAttr(c):
c.foo = 1
for c in builtinClasses:
assert checkException(lambda: addAttr(c), TypeError, "can't set attributes")
# Can't add attributes to instances of builtin classes
for c in builtinClasses:
assert checkException(lambda: addAttr(c()), AttributeError,
"object has no attribute")
# locals
def locals1():
return locals()
#assert locals1() == {}
assert len(locals1().keys()) == 0
def locals2():
a = 1
b = 2
return locals()
x = locals2()
assert x['a'] == 1
assert x['b'] == 2
def locals3(z):
a = 1
b = 2
return locals()
x = locals3(3)
assert x['a'] == 1
assert x['b'] == 2
assert x['z'] == 3
def locals4():
a = 1
b = 2
def x():
return a
return locals()
x = locals4()
assert x['a'] == 1
assert x['b'] == 2
assert 'x' in x
# globals
foo = 1
g = globals()
assert g['foo'] == 1
g['foo'] = 2
assert g['foo'] == 2
assert foo == 2
#assert 'foo' in g.keys()
assert 2 in g.values()
assert len(g.values()) == len(g.keys())
print('ok')
```
#### File: test/tests/exec.py
```python
assert exec("1") == None
result = None
def setResult(x):
global result
result = x
# Read global
g = 1
exec("setResult(g)")
assert result == 1
def readGlobal():
exec("setResult(g)")
result = None
readGlobal()
assert result == 1
# Write global
exec("g = 2")
assert g == 2
def writeGlobal():
return exec("global g; g = 3")
writeGlobal()
assert g == 3
# Can't access nonlocals
def readNonLocal():
x = 4
def f():
return exec("setResult(x)")
return f
failed = False
try:
readNonLocal()()
except NameError:
failed = True
assert failed
# Read local
def readLocal():
x = 5
return exec("setResult(x)")
result = None
readLocal()
assert result == 5
# Can't write locals
def writeLocal():
x = 6
exec("x = 7")
return x
assert writeLocal() == 6
assert 'x' not in globals().keys()
globalDict = {'x': 6, 'setResult': setResult}
localDict = {'x': 7}
result = None
exec("setResult(x)", globalDict)
assert result == 6
result = None
exec("setResult(x)", None, localDict)
assert result == 7
result = None
exec("setResult(x)", globalDict, localDict)
assert result == 7
exec("global x; x = 8", globalDict)
assert globalDict['x'] == 8
exec("global x; del x", globalDict)
assert 'x' not in globalDict
exec("x = 9", None, localDict)
assert localDict['x'] == 9
globalDict['x'] = 8
exec("x = 10", globalDict, localDict)
assert globalDict['x'] == 8
assert localDict['x'] == 10
exec("global x; x = 11", globalDict, localDict)
assert globalDict['x'] == 11
assert localDict['x'] == 10
exec("y = 12", globalDict, localDict)
assert 'y' not in globalDict
assert localDict['y'] == 12
exec("global z; z = 13", globalDict, localDict)
assert globalDict['z'] == 13
aGlobal = None
dict = {}
exec("", dict)
assert "aGlobal" not in dict
assert "__builtins__" in dict
print("ok")
```
#### File: test/tests/fibonacci3.py
```python
def fib(n):
a, b = 0, 1
for i in range(0, n):
a, b = b, a + b
return a
print(fib(10))
```
#### File: test/tests/fibonacci4.py
```python
def fib():
a, b = 0, 1
while 1:
yield a
a, b = b, a + b
f = fib()
for i in range(10):
next(f)
print(next(f))
```
#### File: test/tests/generators.py
```python
def foo(x):
yield 1
yield x
iter = foo(2)
assert next(iter) == 1
assert next(iter) == 2
def bar(array):
for x in array:
yield 2 * x
iter = bar([1, 2, 3])
assert next(iter) == 2
assert next(iter) == 4
assert next(iter) == 6
def collect(iter):
result = []
for x in iter:
result.append(x)
return result
r = collect(foo(0))
assert len(r) == 2
assert r[0] == 1
assert r[1] == 0
def noExcept(f):
try:
yield f()
except:
yield None
def a():
return 1
def b():
raise Exception()
assert(collect(noExcept(a)) == [1])
assert(collect(noExcept(b)) == [None])
print('ok')
```
#### File: test/tests/listComp.py
```python
assert([x for x in ()] == [])
assert([x for x in range(0, 3)] == [0, 1, 2])
assert([(x, y) for x in range(0, 2) for y in range(2, 4)] ==
[(0, 2), (0, 3), (1, 2), (1, 3)])
assert([x for x in range(0, 3) if x >= 1] == [1, 2])
def inc(x):
return x + 1
assert([inc(y) for y in (1, 2, 3)] == [2, 3, 4])
a = 1
assert([a for y in (1, 2, 3)] == [1, 1, 1])
assert([(lambda x: x * 2)(y) for y in (1, 2, 3)] == [2, 4, 6])
assert([(lambda x: y * 2)(y) for y in (1, 2, 3)] == [2, 4, 6])
print('ok')
```
#### File: test/tests/loop.py
```python
count = 0
total = 0
last = 0
for i in (1, 2, 3):
count += 1
total += i
last = i
assert count == 3
assert total == 6
assert last == 3
count = 0
total = 0
last = 0
i = 1
while i <= 3:
count += 1
total += i
last = i
i += 1
assert count == 3
assert total == 6
assert last == 3
count = 0
total = 0
last = 0
for i in (1, 2, 3):
count += 1
total += i
last = i
if i == 2:
break
assert count == 2
assert total == 3
assert last == 2
count = 0
total = 0
last = 0
i = 1
while i <= 3:
count += 1
total += i
last = i
if i == 2:
break
i += 1
assert count == 2
assert total == 3
assert last == 2
count = 0
total = 0
last = 0
for i in (1, 2, 3):
if i == 2:
continue
count += 1
total += i
last = i
assert count == 2
assert total == 4
assert last == 3
count = 0
total = 0
last = 0
i = 1
while i <= 3:
if i == 2:
i += 1
continue
count += 1
total += i
last = i
i += 1
assert count == 2
assert total == 4
assert last == 3
f = 0
for i in (1, 2, 3):
try:
if i == 2:
break
finally:
f = i
assert f == 2
f = 0
for i in (1, 2, 3):
try:
try:
try:
if i == 1:
break
finally:
f += 1
except Exception:
pass
finally:
f += 1
assert f == 2
f = 0
for i in (1, 2, 3):
try:
if i == 2:
continue
finally:
f += 1
assert f == 3
# else clause
didElse = False
for i in []:
pass
else:
didElse = True
assert(didElse)
didElse = False
for i in (1, 2, 3):
pass
else:
didElse = True
assert(didElse)
didElse = False
for i in (1, 2, 3):
if i == 3:
continue
else:
didElse = True
assert(didElse)
didElse = False
for i in (1, 2, 3):
if i == 3:
break
else:
didElse = True
assert(not didElse)
class OwnSequence:
def __init__(self, wrapped):
self.wrapped = wrapped
def __getitem__(self, index):
return self.wrapped[index]
count = 0
total = 0
last = 0
for i in OwnSequence([1, 2, 3]):
count += 1
total += i
last = i
assert count == 3
assert total == 6
assert last == 3
print('ok')
```
#### File: test/tests/stackspace.py
```python
def rr(a = 1, b = 2, c = 3):
return 5
def rrr(a = 1, b = 2, c = 3):
return rr()
assert rrr() == 5
print("ok")
``` |
{
"source": "jonco3/varas",
"score": 3
} |
#### File: varas/test/expr_example.py
```python
from varas import *
import sys
import re
LITERAL_TOKEN = 1
tokenizer = Tokenizer(("\d+", LITERAL_TOKEN),
(".", None))
def handle_lparen(parser, actions, token):
expr = parser.expression(actions)
parser.match(")")
return expr
def handle_lsquare(parser, actions, token):
result = []
while not parser.opt("]"):
if result:
parser.match(",")
result.append(parser.expression(actions))
return ("list", result)
def parse_expr(input):
return list(Parser(expr_spec, tokenizer.tokenize(input)).parse_all())
expr_spec = ExprSpec()
expr_spec.add_word(LITERAL_TOKEN, lambda t: int(t.content))
expr_spec.add_unary_op("+", lambda t, r: r)
expr_spec.add_unary_op("-", lambda t, r: ("neg", r))
expr_spec.add_binary_op("+", 10, Assoc.LEFT, lambda t, l, r: ("add", l, r))
expr_spec.add_binary_op("-", 10, Assoc.LEFT, lambda t, l, r: ("sub", l, r))
expr_spec.add_binary_op("*", 20, Assoc.LEFT, lambda t, l, r: ("mul", l, r))
expr_spec.add_binary_op("/", 20, Assoc.LEFT, lambda t, l, r: ("div", l, r))
expr_spec.add_binary_op("^", 30, Assoc.RIGHT, lambda t, l, r: ("pow", l, r))
expr_spec.add_prefix_handler("(", handle_lparen)
expr_spec.add_prefix_handler("[", handle_lsquare)
import unittest
class TestExpr(unittest.TestCase):
def check(self, expected, input):
self.assertEqual([expected], parse_expr(input))
def checkError(self, input):
self.assertRaises(ParseError, parse_expr, input)
def test_(self):
self.check(1, "1")
self.check(1, "(1)")
self.check(("add", 1, 2), "1+2")
self.check(("add", 1, ("mul", 2, 3)), "1+2*3")
self.check(("mul", 1, ("add", 2, 3)), "1*(2+3)")
self.check(("list", [1, 2, 3]), "[1, 2, 3]")
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == "-t":
sys.argv.pop(1)
unittest.main()
else:
while True:
try:
program = raw_input("> ")
for result in parse_expr(program):
print(repr(result))
except EOFError:
print("")
exit(0)
``` |
{
"source": "joncombe/django-file-serve-view",
"score": 2
} |
#### File: django-file-serve-view/fileserveview/views.py
```python
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.views.generic import View
class FileServeView(View):
authenticated_user_only = True
content_type = None
download_filename = None
file = None
has_permission = True
is_download = True
def error401(self, request, *args, **kwargs):
return HttpResponse('Unauthorized', status=401)
def error403(self, request, *args, **kwargs):
return HttpResponseForbidden()
def error404(self, request, *args, **kwargs):
raise Http404()
def get(self, request, *args, **kwargs):
# 401 for non-authenticated users who should be authenticated
if self.authenticated_user_only and not request.user.is_authenticated:
return self.error401(request, *args, **kwargs)
# get file
self.get_file(request, *args, **kwargs)
# 403 for authenticated users who do not have permission
if not self.has_permission:
return self.error403(request, *args, **kwargs)
return self.serve(request, *args, **kwargs)
def get_content_type(self, request, *args, **kwargs):
if self.content_type is None:
try:
suffix = self.file.lower().split('.')[-1:][0]
# thank you:
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Complete_list_of_MIME_types
self.content_type = {
'aac': 'audio/aac',
'abw': 'application/x-abiword',
'arc': 'application/x-freearc',
'avi': 'video/x-msvideo',
'azw': 'application/vnd.amazon.ebook',
'bin': 'application/octet-stream',
'bmp': 'image/bmp',
'bz': 'application/x-bzip',
'bz2': 'application/x-bzip2',
'csh': 'application/x-csh',
'css': 'text/css',
'csv': 'text/csv',
'doc': 'application/msword',
'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'eot': 'application/vnd.ms-fontobject',
'epub': 'application/epub+zip',
'gif': 'image/gif',
'htm': 'text/html',
'html': 'text/html',
'ico': 'image/vnd.microsoft.icon',
'ics': 'text/calendar',
'jar': 'application/java-archive',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'js': 'text/javascript',
'json': 'application/json',
'jsonld': 'application/ld+json',
'mid': 'audio/midi audio/x-midi',
'midi': 'audio/midi audio/x-midi',
'mjs': 'text/javascript',
'mp3': 'audio/mpeg',
'mpeg': 'video/mpeg',
'mpkg': 'application/vnd.apple.installer+xml',
'odp': 'application/vnd.oasis.opendocument.presentation',
'ods': 'application/vnd.oasis.opendocument.spreadsheet',
'odt': 'application/vnd.oasis.opendocument.text',
'oga': 'audio/ogg',
'ogv': 'video/ogg',
'ogx': 'application/ogg',
'otf': 'font/otf',
'png': 'image/png',
'pdf': 'application/pdf',
'ppt': 'application/vnd.ms-powerpoint',
'pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'rar': 'application/x-rar-compressed',
'rtf': 'application/rtf',
'sh': 'application/x-sh',
'svg': 'image/svg+xml',
'swf': 'application/x-shockwave-flash',
'tar': 'application/x-tar',
'tif': 'image/tiff',
'tiff': 'image/tiff',
'ttf': 'font/ttf',
'txt': 'text/plain',
'vsd': 'application/vnd.visio',
'wav': 'audio/wav',
'weba': 'audio/webm',
'webm': 'video/webm',
'webp': 'image/webp',
'woff': 'font/woff',
'woff2': 'font/woff2',
'xhtml': 'application/xhtml+xml',
'xls': 'application/vnd.ms-excel',
'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'xml': 'application/xml',
'xul': 'application/vnd.mozilla.xul+xml',
'zip': 'application/zip',
'3gp': 'video/3gpp, *args, **kwargs',
'3g2': 'video/3gpp2',
'7z': 'application/x-7z-compressed'
}[suffix]
except:
self.content_type = 'application/octet-stream'
def get_file(self, request, *args, **kwargs):
#
# Add your own logic here to set the file, e.g.
# self.file = '....'
#
# The logic may decide that this user does not have
# permission to see this file, therefore:
# self.has_permission = False
#
pass
def serve(self, request, *args, **kwargs):
# 404 for no file
if self.file is None:
return self.error404(request, *args, **kwargs)
# set the content_type (e.g. file mime type)
if self.content_type is None:
self.get_content_type(request, *args, **kwargs)
# load file
fp = open(self.file, 'rb')
response = HttpResponse(fp.read(), content_type=self.content_type)
response['Content-Length'] = len(response.content)
fp.close()
# download?
if self.is_download:
if self.download_filename is None:
self.download_filename = \
self.file.replace('\\', '/').split('/')[-1:][0]
response['Content-Disposition'] = 'attachment; filename="%s"' % \
self.download_filename
# serve
return response
``` |
{
"source": "joncombe/django-raw-query",
"score": 3
} |
#### File: django-raw-query/rawquery/rawquery.py
```python
from django.db import connection
class RawQuery:
# return a list of dicts
# e.g. SELECT * FROM my_table
# [
# {'a': 1, 'b': 2, 'c': 3},
# {'a': 1, 'b': 2, 'c': 3},
# ]
def multiple_rows(self, sql, params=[]):
cursor = self._do_query(sql, params)
columns = [col[0] for col in cursor.description]
return [
dict(zip(columns, row))
for row in cursor.fetchall()
]
# return a single dict
# e.g. SELECT COUNT(*) AS count, AVG(price) AS avg_price FROM my_table
# { 'count': 12, 'avg_price': 95.2 }
def single_row(self, sql, params=[]):
return self.multiple_rows(sql, params)[0]
# return a single value
# e.g. SELECT COUNT(*) FROM my_table
# 134
def single_value(self, sql, params=[]):
cursor = self._do_query(sql, params)
return cursor.fetchone()[0]
# return a list of single values
# e.g. SELECT id FROM my_table
# [1, 2, 3, 4, 5]
def multiple_values(self, sql, params=[]):
cursor = self._do_query(sql, params)
return [row[0] for row in cursor.fetchall()]
# UPDATE, INSERT, etc.
def run(self, sql, params=[]):
cursor = self._do_query(sql, params)
return cursor.rowcount
def _do_query(self, sql, params):
cursor = connection.cursor()
cursor.execute(sql, params)
return cursor
``` |
{
"source": "joncombe/python-moneyspace",
"score": 3
} |
#### File: python-moneyspace/pythonmoneyspace/moneyspace.py
```python
from datetime import datetime
import hashlib
import hmac
import pytz
import requests
class MoneySpace:
base_url = 'https://a.moneyspace.net'
allowed_kwargs = [
'address'
'bankType',
'description',
'endTerm',
'firstname',
'lastname',
'message',
'phone',
'startTerm',
]
def __init__(self, secret_id, secret_key):
self.secret_id = secret_id
self.secret_key = secret_key
def call_api(self, path, params, optional_params={}):
# update params
params['secret_id'] = self.secret_id
params['secret_key'] = self.secret_key
# add optional parameters
for key in optional_params:
if key in self.allowed_kwargs:
params[key] = optional_params[key]
# send request to moneyspace
r = requests.post(
'%s%s' % (self.base_url, path),
data=params
)
try:
return r.json()[0]
except:
return { 'status': 'error' }
# check the status of a transaction using the merchant's
# order_id as the identifier
def check_order_id(self, order_id):
return self.call_api('/CheckOrderID', { 'order_id': order_id })
# check the status of a transaction using the moneyspace's
# transaction_id as the identifier
def check_payment(self, transaction_id):
return self.call_api('/CheckPayment', { 'transaction_ID': transaction_id })
# create a transaction
def create_transaction(self, payment_type, email, amount, order_id, success_url, fail_url, cancel_url, agreement, **kwargs):
# create params dict
params = {
'address': '',
'agreement': agreement,
'amount': '{:0.2f}'.format(amount),
'cancel_Url': cancel_url,
'description': '',
'email': email,
'fail_Url': fail_url,
'feeType': 'include',
'firstname': '',
'lastname': '',
'message': '',
'order_id': order_id,
'payment_type': payment_type,
'phone': '',
'success_Url': success_url,
}
# note: description is a required field for QR code payments
# (and possibly other payment types too, let me know).
# here i re-use the order_id as the description if no
# description is passed in
if payment_type == 'qrnone':
params['description'] = kwargs.get('description', order_id)
# call the api
return self.call_api('/CreateTransactionID', params, kwargs)
# get a QR code image url from the transaction ID
def get_qr_image_url(self, transaction_id):
dt = datetime.now(pytz.timezone('Asia/Bangkok'))
timestamp = dt.strftime('%Y%m%d%H%M%S')
pre_hash = '%s%s' % (transaction_id, timestamp)
local_hash = hmac.new(self.secret_key.encode(), pre_hash.encode(), hashlib.sha256).hexdigest()
return 'https://www.moneyspace.net/merchantapi/makepayment/linkpaymentcard?transactionID=%s&timehash=%s&secreteID=%s&hash=%s' % (
transaction_id,
timestamp,
self.secret_id,
local_hash
)
# validate webhooks sent from moneyspace
def webhook_is_valid(self, amount, status, order_id, transaction_id, hash):
if status == 'OK':
pre_hash = '%s%s' % (transaction_id, amount)
else:
pre_hash = '%s%s%s%s' % (transaction_id, amount, status, order_id)
# compare results
local_hash = hmac.new(self.secret_key.encode(), pre_hash.encode(), hashlib.sha256).hexdigest()
return local_hash == hash
``` |
{
"source": "joncooknmsu/GHClassroomTools",
"score": 3
} |
#### File: joncooknmsu/GHClassroomTools/userstats.py
```python
import os
import sys
import re
import subprocess
import datetime
debug = 0
# Overall data structure that holds accumulated stats
# each element index is a username, each element is a
# dictionary itself, with data as below in code (sorry!)
userData = {}
#---------------------------------------------------------
# Run the git log command and parse commit lines
#---------------------------------------------------------
def processGitLogCmd():
global userData, debug
user = 'none'
sout = subprocess.check_output(['git', 'log', '--all',\
'--pretty=format:COMMIT:@%h@%cn@%an@%ct@%s', '--numstat']).split('\n')
for line in sout:
v = re.match("COMMIT:@([^@]+)@([^@]+)@([^@]+)@([^@]+)@(.*)",line)
if debug > 1: print line
if debug > 0: print "Commit {0} user={1} time={2} msg=[{3}]"\
.format(chash,user,timestamp,message)
if v is None:
# line is not a commit, assume it is file stats
processFileLine(line,user)
continue
chash = v.group(1)
user = v.group(3)
timestamp = v.group(4)
message = v.group(5)
if not user in userData:
userData[user] = {}
userData[user]['commits'] = 0
userData[user]['merges'] = 0
userData[user]['pullrequests'] = 0
userData[user]['filedata'] = {}
userData[user]['binfiledata'] = {}
userData[user]['commits'] += 1
if message.find('Merge pull request') >= 0:
userData[user]['pullrequests'] += 1
elif message.find('Merge') >= 0:
userData[user]['merges'] += 1
#---------------------------------------------------------
# Match a file statistics line of input
# - will be after a commit line, so user is valid
#---------------------------------------------------------
def processFileLine(line,user):
global userData
#v = re.match(" *(.*[^ ]) *\| *(.*)",line) // for --stat line
v = re.match("([0-9-]+)\t([0-9-]+)\t(.*)",line)
if v is None:
return
filetype = os.path.splitext(v.group(3))[1]
if filetype =='':
filetype = 'no-ext'
added = v.group(1)
deleted = v.group(2)
if debug > 0: print " File ({0}) info ({1} {2})".\
format(filetype,added,deleted)
if added.isdigit() and deleted.isdigit():
if not filetype in userData[user]['filedata']:
userData[user]['filedata'][filetype] = (int(added),int(deleted))
else:
cadd = userData[user]['filedata'][filetype][0]
cdel = userData[user]['filedata'][filetype][1]
userData[user]['filedata'][filetype] = \
(int(added)+cadd,int(deleted)+cdel)
else:
if not filetype in userData[user]['binfiledata']:
userData[user]['binfiledata'][filetype] = 1
else:
userData[user]['binfiledata'][filetype] += 1
#---------------------------------------------------------
# Walk the data and output the results
#---------------------------------------------------------
def outputUserData():
print "User statistics, generated on {0}".format(str(datetime.datetime.now()))
print "------"
for user in userData:
print "User: {0:16} commits:{1:<8} pullrequests:{3:<8} othermerges:{2:<8}".\
format(user,userData[user]['commits'],userData[user]['merges'],\
userData[user]['pullrequests'])
for tfile in userData[user]['filedata']:
print " filetype:{0:10} added:{1:<9} removed:{2}".format(tfile,\
userData[user]['filedata'][tfile][0],\
userData[user]['filedata'][tfile][1])
for bfile in userData[user]['binfiledata']:
print " binfile:{0:11} number:{1}".format(bfile,\
userData[user]['binfiledata'][bfile])
print "------"
#---------------------------------------------------------
# MAIN
#---------------------------------------------------------
# No arguments for now
processGitLogCmd()
outputUserData()
quit()
``` |
{
"source": "joncotton/armstrong.hatband",
"score": 3
} |
#### File: armstrong/hatband/http.py
```python
from django.http import HttpResponse
import json
class JsonResponse(HttpResponse):
"""
Simple HttpResponse object that takes a JSON value as it's parameter
TODO: Find a proper home for this.
"""
def __init__(self, data, *args, **kwargs):
super(JsonResponse, self).__init__(json.dumps(data), *args, **kwargs)
```
#### File: hatband/tests/http.py
```python
import json
import random
from ._utils import HatbandTestCase as TestCase
from ..http import JsonResponse
class JsonResponseTestCase(TestCase):
def test_turns_body_into_json(self):
data = {
"foo": "bar",
"random": random.randint(1000, 2000),
}
response = JsonResponse(data)
self.assertIsA(response.content, str, msg="sanity check")
self.assertEqual(json.loads(response.content), data)
```
#### File: hatband/tests/__init__.py
```python
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
from contextlib import contextmanager
import fudge
from .http import *
from .sites import *
from .widgets import *
from ... import hatband
def generate_random_registry():
from ._utils import random_range
return dict([("key%d" % i, i) for i in random_range()])
@contextmanager
def fake_autodiscover():
from django.contrib import admin
autodiscover = fudge.Fake().is_callable()
with fudge.patched_context(admin, "autodiscover", autodiscover):
yield
@contextmanager
def fake_django_site_registry(test):
with fake_autodiscover():
random_registry = generate_random_registry()
from django.contrib import admin
site = fudge.Fake()
site.has_attr(_registry=random_registry)
with fudge.patched_context(admin, "site", site):
test.assertEqual( len(random_registry.items()),
len(site._registry.items()), msg="sanity check")
yield random_registry
class AutodiscoverTestCase(HatbandTestCase):
def setUp(self):
from copy import copy
self.original_site = copy(hatband.site)
hatband.site._registry = {}
def tearDown(self):
hatband.site = self.original_site
@fudge.test
def test_dispatches_to_djangos_autodiscover(self):
from django.contrib import admin
autodiscover = fudge.Fake().is_callable().expects_call()
with fudge.patched_context(admin, "autodiscover", autodiscover):
hatband.autodiscover()
@fudge.test
def test_has_a_copy_of_main_django_registry(self):
random_registry = generate_random_registry()
from django.contrib import admin
site = fudge.Fake()
site.has_attr(_registry=random_registry)
with fake_autodiscover():
with fudge.patched_context(admin, "site", site):
hatband.autodiscover()
for key in random_registry.keys():
self.assertTrue(key in hatband.site._registry)
@fudge.test
def test_has_hatband_registered_plus_(self):
with fake_django_site_registry(self) as random_registry:
from .hatband_support.models import TestCategory
self.assertFalse(TestCategory in hatband.site._registry.keys(),
msg="Sanity check")
hatband.site.register(TestCategory)
self.assertTrue(TestCategory in hatband.site._registry.keys(),
msg="Sanity check")
hatband.autodiscover()
registry = hatband.site._registry.items()
self.assertTrue(TestCategory in hatband.site._registry.keys(),
msg="TestCategory should still be in the registry")
@fudge.test
def test_original_django_sites_registry_remains_untouched(self):
with fake_django_site_registry(self) as random_registry:
from .hatband_support.models import TestCategory
self.assertFalse(TestCategory in random_registry.keys())
hatband.site.register(TestCategory)
hatband.autodiscover()
self.assertFalse(TestCategory in random_registry.keys())
```
#### File: hatband/tests/_utils.py
```python
from armstrong.dev.tests.utils import ArmstrongTestCase
import random
def random_range():
# TODO: make sure this can only be generated once
return range(random.randint(1000, 2000))
class HatbandTestCase(ArmstrongTestCase):
pass
class HatbandTestMixin(object):
script_code = """
<script type="text/javascript" src="/static/ckeditor/ckeditor.js"></script>
""".strip()
textarea_code = 'class="ckeditor"></textarea>'
def assertCkEditorPresent(self, response):
self.assertContains(response, self.script_code)
self.assertContains(response, self.textarea_code)
def assertCkEditorNotPresent(self, response):
self.assertNotContains(response, self.script_code)
self.assertNotContains(response, self.textarea_code)
```
#### File: hatband/widgets/ckeditor.py
```python
from django.forms import widgets
from django.conf import settings
class CKEditorWidget(widgets.Textarea):
class Media:
js = (''.join((settings.STATIC_URL, "ckeditor/ckeditor.js")),)
def __init__(self, attrs=None):
final_attrs = {'class': 'ckeditor'}
if attrs is not None:
final_attrs.update(attrs)
if 'class' in attrs:
final_attrs['class'] = ' '.join((attrs['class'], 'ckeditor'))
super(CKEditorWidget, self).__init__(attrs=final_attrs)
```
#### File: hatband/widgets/visualsearch.py
```python
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import Widget
from django.template.loader import render_to_string
from ..utils import static_url
class GenericKeyWidget(Widget):
template = "admin/hatband/widgets/generickey.html"
if getattr(settings, "ARMSTRONG_ADMIN_PROVIDE_STATIC", True):
class Media:
js = (static_url("visualsearch/dependencies.js"),
static_url("visualsearch/visualsearch.js"),
static_url("generickey.js"),
)
css = {
"all": (static_url("visualsearch/visualsearch.css"),
static_url("hatband/css/generickey.css"),
)
}
def __init__(self, object_id_name="object_id",
content_type_name="content_type",
facet_url=None,
query_lookup_url=None,
base_lookup_url=None,
*args, **kwargs):
super(GenericKeyWidget, self).__init__(*args, **kwargs)
self.object_id_name = object_id_name
self.content_type_name = content_type_name
self.facet_url = facet_url
self.query_lookup_url = query_lookup_url
self.base_lookup_url = base_lookup_url
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
final_attrs.update({
"value": value,
"is_templated": final_attrs["id"].find("__prefix__") > -1,
"object_id_name": self.object_id_name,
"content_type_name": self.content_type_name,
"facet_url": self.facet_url or
reverse("admin:generic_key_facets"),
"query_lookup_url": (self.query_lookup_url or
reverse("admin:type_and_model_to_query")),
"base_lookup_url": (self.base_lookup_url or
reverse("admin:index"))
})
return render_to_string(self.template, final_attrs)
```
#### File: joncotton/armstrong.hatband/fabfile.py
```python
from armstrong.dev.tasks import *
from fabric.api import local, task
settings = {
'DEBUG': True,
'INSTALLED_APPS': (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'armstrong.hatband',
'armstrong.hatband.tests.hatband_support',
'lettuce.django',
),
'STATIC_URL': '/TESTING/',
'ROOT_URLCONF': 'armstrong.hatband.tests.hatband_support.urls',
}
main_app = "hatband"
tested_apps = ("hatband_support", "hatband", )
full_name = "armstrong.hatband"
pip_install_first = True
@task
def update_visualsearch():
local("cp -R ./vendor/visualsearch/build-min/* ./armstrong/hatband/static/visualsearch/")
local("cp ./vendor/visualsearch/lib/images/embed/icons/* ./armstrong/hatband/static/images/embed/icons/")
``` |
{
"source": "joncovington/FileHandling",
"score": 3
} |
#### File: src/EasyFileHandling/main.py
```python
import os
from typing import Union
import json
from EasyFileHandling.errors.error import ZeroLineFile
class FileHandler:
def __init__(self, _filename: str) -> None:
self._filename = _filename
self._extension = _filename[_filename.rfind("."):]
def does_file_exist(self) -> bool:
if os.path.exists(self._filename):
return True
else:
return False
def get_file_content(self) -> str:
try:
with open(self._filename, "r") as f:
_content = f.read()
return _content
except FileNotFoundError:
return f"{self._filename} is not found, please enter the correct file!"
def get_line_count(self) -> Union[int, str]:
"""Returns number of lines in file"""
try:
with open(self._filename, "r") as f:
num_lines = sum(1 for _ in f)
return num_lines
except FileNotFoundError:
return f"{self._filename} is not found, please enter the correct file!"
def get_longest_line(self) -> None:
"""Prints longest line length and line indices of lines with that length"""
with open(self._filename, "r") as f:
try:
lines = f.readlines()
if len(lines) == 0:
raise ZeroLineFile
lines = [line.rstrip('\n') for line in lines]
except ZeroLineFile:
print('Cannot get longest line. This file is empty and has 0 lines.')
return
longest_line = 0
for line in lines:
if len(line) > longest_line:
longest_line = len(line)
longest_index_list = []
for index, line in enumerate(lines):
if len(line) == longest_line:
longest_index_list.append(index)
print(f'Longest line is {longest_line} at line index of {longest_index_list}')
def change_file_content(self, new_content: Union[str, bytes, bytearray]) -> None:
"""Changes whole file content"""
if isinstance(new_content, (bytes, bytearray)):
with open(self._filename, "wb") as f:
f.write(new_content)
elif isinstance(new_content, str):
with open(self._filename, "w") as f:
f.write(new_content)
else:
raise TypeError("Content must be string or bytes type object")
def add_something_to_file_content(self, content: Union[str, bytes]) -> None:
"""Appends content to end of the file!"""
if isinstance(content, (bytes, bytearray)):
with open(self._filename, "ab") as f:
f.write(content)
elif isinstance(content, str):
with open(self._filename, "a") as f:
f.write(content)
else:
raise TypeError("Content must be string or bytes type object")
def what_type_of_file(self) -> str:
"""Tells you what type of file is it."""
fileTypes = {
".py": "Python",
".js": "Javascript",
".cpp": "C++",
".c": "C",
".sql": "SQL",
".ts": "TypeScript",
".json": "JSON",
".csv": "CSV",
".html": "HTML",
".css": "CSS",
".rb": "Ruby",
".swift": "Swift",
".txt": "Text",
}
if self._extension not in fileTypes:
return "Unknown file/extension. Please Report The Developers [ https://github.com/ProjectsWithPython/FileHandling/issues ]"
else:
return f"This is a {fileTypes[self._extension]} file."
def change_file_extension(self, current_extension: str, new_extension: str) -> str:
try:
os.name(
self._filename,
self._filename.replace(rf"[{current_extension}]", f"{new_extension}"),
)
return "Done!"
except:
return f"Please Report The Developers [ https://github.com/ProjectsWithPython/FileHandling/issues ]"
def is_file_writeable(self) -> bool:
"""Tells you if the file is writeable"""
with open(f"{self._filename}", "w") as f:
if f.writable():
return True
else:
return False
class JsonHandler:
"""JsonHandler it reads, converts and writes but remeber file should be json only!"""
def __init__(self, _filename):
self._filename = _filename
def write_to_json(self, obj: Union[dict, list]) -> None:
"""Writes to json file"""
with open(self._filename, "w") as f:
json.dump(obj, f)
def append_to_json(self, obj: Union[dict, list]) -> None:
"""Writes to json file"""
with open(self._filename, "a") as f:
json.dump(obj, f)
def read_json_file(self) -> Union[dict, list]:
with open(self._filename, "r") as f:
x = json.load(f)
return x
def change_file_name(path: str, new_name: str) -> None:
"""Changes file name"""
os.rename(path, new_name)
def delete(file: str) -> Union[str, None]:
"""Deletes file"""
if os.path.exists(file):
os.remove(file)
else:
return f"{file} does not exists. Make sure the name is correct or you need to put the file path."
def create_file(file: str) -> Union[bool, None]:
"""Creates a file"""
if os.path.exists(file):
return False
else:
with open(file, "w") as f:
f.write("File Created With FileHandler ©")
return "Created File"
def list_item_in_dir(path) -> list:
"""Gives a list of all items in the folder"""
if os.path.exists(path):
return os.listdir(path)
else:
raise FileNotFoundError("Folder or path not found")
def delete_all_item_in_dict(path: str) -> list:
"""Deletes all items in the folder"""
em = []
if os.path.exists(path):
for i in os.listdir(path):
os.remove(i)
em.append(i)
return em
else:
raise FileNotFoundError("Folder or path not found")
def is_pythonfile(file: str) -> bool:
"""Returns True if file extension is py and if not then false."""
if file.endswith("py"):
return True
else:
return False
``` |
{
"source": "joncrain/dynamic_dock",
"score": 3
} |
#### File: joncrain/dynamic_dock/dynamic-dock.py
```python
import stat
import os
import json
import sys
from docklib import Dock
try:
from urllib.request import urlopen # Python 3
from urllib.error import HTTPError, URLError
except ImportError:
from urllib2 import urlopen, HTTPError, URLError # Python 2
dock_url = "https://domain.org/dock/"
dock_backup = "/Users/Shared/dock/"
if not os.path.exists(dock_backup):
os.makedirs(dock_backup)
os.chmod(dock_backup, 0o777)
def get_applications(dock_name):
"""
Returns a dictionary of applications from a file called "dock_name.json"
"""
try:
response = urlopen("%s%s.json" % (dock_url, dock_name))
backup_file = "%s%s.json" % (dock_backup, dock_name)
if not os.path.exists(backup_file):
f = open(backup_file, "w")
f.close()
os.chmod(backup_file, 0o777)
f = open(backup_file, "w")
app_json = response.read()
f.write(app_json)
f.close()
dock_dict = json.loads(app_json)
except HTTPError:
"""
404 connection error -
The json for this manifest doesn't exist
"""
dock_dict = {}
pass
except URLError:
"""
Most likely we have lost connection
so we will fall back to the standard dock
"""
f = open("%s%s.json" % (dock_backup, dock_name), "r")
app_json = f.read()
dock_dict = json.loads(app_json)
except Exception as e:
print(e)
return dock_dict
def backup_dock(dock_name):
"""
Create a backup of the dock files in case the machine or server goes offline
"""
response = urlopen("%s%s.json" % (dock_url, dock_name))
return response
def get_munki_manifests():
"""
Returns a list of munki_manifests
"""
manifests = "/Library/Managed Installs/manifests"
munki_manifests = []
for manifest in os.listdir(manifests):
munki_manifests.append(manifest)
return munki_manifests
def get_app_list(target, key):
"""
Returns a list of applications from target
"""
target_dock_dict = get_applications(target)
target_applications = target_dock_dict[key]
return target_applications
def main():
"""
Run the program
"""
dock = Dock()
if dock.mod_count > 3:
sys.exit()
applications_pa = []
applications_po = []
# Get standard applications
try:
applications_pa = get_app_list("global_staff", "persistent-apps")
applications_po = get_app_list("global_staff", "persistent-others")
except:
pass
# Check for names of munki manifests
munki_manifests = get_munki_manifests()
# Check for existence of dock for manifests and clear if one doesn't want global
for munki_manifest in munki_manifests:
try:
if get_app_list(munki_manifest, "use-global") is False:
applications_pa = []
applications_po = []
except:
pass
# Add the applications
for munki_manifest in munki_manifests:
try:
applications_pa = applications_pa + get_app_list(
munki_manifest, "persistent-apps"
)
applications_po = applications_po + get_app_list(
munki_manifest, "persistent-others"
)
except:
pass
# Iterate over applications
dock.items["persistent-apps"] = []
for item in applications_pa:
if os.path.exists(item):
item = dock.makeDockAppEntry(item)
dock.items["persistent-apps"].append(item)
# iterate over others
dock.items["persistent-others"] = []
for item in applications_po:
if "~" in item:
item = dock.makeDockOtherEntry(
os.path.expanduser(item), arrangement=1, displayas=1, showas=3
)
else:
item = dock.makeDockOtherEntry(item, arrangement=1, displayas=1, showas=3)
dock.items["persistent-others"] = [item] + dock.items["persistent-others"]
dock.save()
if __name__ == "__main__":
main()
``` |
{
"source": "joncrawf/mime",
"score": 3
} |
#### File: mime/envs/dog_navigation.py
```python
import gym
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from gym import spaces
from gym.utils import seeding
class DogNavigation2D(gym.Env):
"""2D navigation problems, as described in [1]. The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/maml_examples/point_env_randgoal.py
At each time step, the 2D agent takes an action (its velocity, clipped in
[-0.1, 0.1]), and receives a rewards which is the inverse of
its L2 distance to the goal when it is close to the goal position.
(ie. the reward is `1/distance`). The 2D navigation tasks are
generated by sampling goal positions from the uniform distribution
on [-0.5, 0.5]^2.
[1] <NAME>, <NAME>, <NAME>, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
"""
def __init__(self, task={}, low=-0.5, high=0.5, sparse=True):
super(DogNavigation2D, self).__init__()
self.low = low
self.high = high
self.observation_space = spaces.Box(low=0, high=10,
shape=(3,), dtype=np.float32)
self.action_space = spaces.Box(low=-0.1, high=0.1,
shape=(2,), dtype=np.float32)
self._task = task
self.sparse = sparse
self._goal = task.get('goal', np.zeros(2, dtype=np.float32))
self._state = np.zeros(2, dtype=np.float32)
self.seed()
# Create figure and axes
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
img = mpimg.imread('dog_house.png')
gray = np.flipud(rgb2gray(img))
self.map = gray
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def sample_tasks(self, num_tasks):
goals = []
# goals.append(np.array([25, 45])/10)
goals.append(np.array([20, 90])/10)
goals.append(np.array([53, 40])/10)
# goals.append(np.array([85, 85])/10)
goals.append(np.array([85, 13])/10)
tasks = [{'goal': goal} for goal in goals]
return tasks
def sample_test_tasks(self, num_tasks):
goals = []
# goals.append(np.array([25, 45])/10)
goals.append(np.array([20, 90])/10) # seed 25
# goals.append(np.array([53, 40])/10)
# goals.append(np.array([85, 85])/10)
goals.append(np.array([85, 13])/10) # seeds 10 - 30
tasks = [{'goal': goal} for goal in goals]
return tasks
def reset_task(self, task):
self._task = task
self._goal = task['goal']
def reset(self, env=True):
self._state = np.array([13., 20., 0.])/10
return self._state
def step(self, action):
reward = + 0.
action = np.clip(action, -0.1, 0.1)
assert self.action_space.contains(action)
discrete_state = list(np.rint((self._state[:2] + action)*10))
pos_y = int(discrete_state[0])
pos_x = int(discrete_state[1])
neighbourhood = self.map[pos_x-1:pos_x+2,pos_y-1:pos_y+2]
if not (np.array(neighbourhood.shape) == 0).any():
if (neighbourhood > 0.9).all():
self._state[:2] = self._state[:2] + action
self._state[2] = 0.
else:
self._state[2] = 1.
else:
self._state[2] = 1.
# self._state = self._state + action
x = self._state[0] - self._goal[0]
y = self._state[1] - self._goal[1]
distance = np.sqrt(x ** 2 + y ** 2)
if self.sparse:
if distance < 0.5: #(np.abs(x) < 1.) and (np.abs(y) < 1.):
reward = + 1. # / (distance + 1e-8)
success = True
else:
success = False
info = {'task': self._task, 'success': float(success)}
else:
reward = -distance
if distance < 0.5: #(np.abs(x) < 1.) and (np.abs(y) < 1.):
success = True
else:
success = False
info = {'task': self._task, 'success': float(success)}
done = False # ((np.abs(x) < 0.05) and (np.abs(y) < 0.05))
has_end = False
if has_end and success:
done = True
return self._state, reward, done, info
```
#### File: mime/envs/navigation.py
```python
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class Navigation2DEnv(gym.Env):
"""2D navigation problems, as described in [1]. The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/maml_examples/point_env_randgoal.py
At each time step, the 2D agent takes an action (its velocity, clipped in
[-0.1, 0.1]), and receives a penalty equal to its L2 distance to the goal
position (ie. the reward is `-distance`). The 2D navigation tasks are
generated by sampling goal positions from the uniform distribution
on [-0.5, 0.5]^2.
[1] <NAME>, <NAME>, <NAME>, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
"""
def __init__(self, task={}, low=-0.5, high=0.5):
super(Navigation2DEnv, self).__init__()
self.low = low
self.high = high
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
shape=(2,), dtype=np.float32)
self.action_space = spaces.Box(low=-0.1, high=0.1,
shape=(2,), dtype=np.float32)
self._task = task
self._goal = task.get('goal', np.zeros(2, dtype=np.float32))
self._state = np.zeros(2, dtype=np.float32)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def sample_tasks(self, num_tasks):
goals = self.np_random.uniform(self.low, self.high, size=(num_tasks, 2))
tasks = [{'goal': goal} for goal in goals]
return tasks
def reset_task(self, task):
self._task = task
self._goal = task['goal']
def reset(self, env=True):
self._state = np.zeros(2, dtype=np.float32)
return self._state
def step(self, action):
action = np.clip(action, -0.1, 0.1)
assert self.action_space.contains(action)
self._state = self._state + action
x = self._state[0] - self._goal[0]
y = self._state[1] - self._goal[1]
reward = -np.sqrt(x ** 2 + y ** 2)
done = ((np.abs(x) < 0.01) and (np.abs(y) < 0.01))
return self._state, reward, done, {'task': self._task}
class Navigation2DEnvSparse(gym.Env):
"""2D navigation problems, as described in [1]. The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/maml_examples/point_env_randgoal.py
At each time step, the 2D agent takes an action (its velocity, clipped in
[-0.1, 0.1]), and receives a rewards which is the inverse of
its L2 distance to the goal when it is close to the goal position.
(ie. the reward is `1/distance`). The 2D navigation tasks are
generated by sampling goal positions from the uniform distribution
on [-0.5, 0.5]^2.
[1] <NAME>, <NAME>, <NAME>, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
"""
def __init__(self, task={}, low=-0.5, high=0.5, sparse=True):
super(Navigation2DEnvSparse, self).__init__()
self.low = low
self.high = high
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
shape=(2,), dtype=np.float32)
self.action_space = spaces.Box(low=-0.1, high=0.1,
shape=(2,), dtype=np.float32)
self._task = task
self.sparse = sparse
self._goal = task.get('goal', np.zeros(2, dtype=np.float32))
self._state = np.zeros(2, dtype=np.float32)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def sample_tasks(self, num_tasks):
goals = np.empty((num_tasks,2))
i = 0
while i < num_tasks:
goal = self.np_random.randn(1, 2) * self.high
# if (self.low < np.abs(goal[0, 0]) < self.high or self.low < np.abs(goal[0, 1]) < self.high) \
# and -self.high < goal[0, 0] < self.high and -self.high < goal[0, 1] < self.high:
distance = np.sqrt( goal[0,0] ** 2 + goal[0,1] ** 2)
if self.low < distance < self.high:
goals[i] = goal
i += 1
tasks = [{'goal': goal} for goal in goals]
return tasks
def sample_test_tasks(self, num_tasks):
goals = []
dog = False
if dog:
diag = 8.0 * np.sqrt(2) / 2
goals.append(np.array([0,8.0]))
goals.append(np.array([8.0,0]))
goals.append(np.array([diag,diag]))
goals.append(np.array([0,8.0]))
goals.append(np.array([8.0,0]))
goals.append(np.array([diag,diag]))
goals.append(np.array([0,8.0]))
goals.append(np.array([8.0,0]))
goals.append(np.array([diag,diag]))
elif self.low == 5.0 and self.high == 5.5:
medium = (self.high + self.low)/2
diag_medium = medium * np.sqrt(2) / 2
goals.append(np.array([0,medium]))
goals.append(np.array([medium,0]))
goals.append(np.array([0,-medium]))
goals.append(np.array([-medium,0]))
goals.append(np.array([diag_medium,diag_medium]))
goals.append(np.array([diag_medium,-diag_medium]))
goals.append(np.array([-diag_medium,-diag_medium]))
goals.append(np.array([-diag_medium,diag_medium]))
elif self.low == 8.0 and self.high == 9.0:
medium = (self.high + self.low) / 2
diag_medium = medium * np.sqrt(2) / 2
goals.append(np.array([0,medium]))
goals.append(np.array([medium,0]))
goals.append(np.array([0,-medium]))
goals.append(np.array([-medium,0]))
goals.append(np.array([diag_medium,diag_medium]))
goals.append(np.array([diag_medium,-diag_medium]))
goals.append(np.array([-diag_medium,-diag_medium]))
goals.append(np.array([-diag_medium,diag_medium]))
elif self.low == 5.0 and self.high == 9.0:
medium = 5.5
diag_medium = medium * np.sqrt(2) / 2
goals.append(np.array([0,medium]))
goals.append(np.array([medium,0]))
goals.append(np.array([0,-medium]))
goals.append(np.array([-medium,0]))
goals.append(np.array([diag_medium,diag_medium]))
goals.append(np.array([diag_medium,-diag_medium]))
goals.append(np.array([-diag_medium,-diag_medium]))
goals.append(np.array([-diag_medium,diag_medium]))
medium = 8.5
diag_medium = medium * np.sqrt(2) / 2
goals.append(np.array([0,medium]))
goals.append(np.array([medium,0]))
goals.append(np.array([0,-medium]))
goals.append(np.array([-medium,0]))
goals.append(np.array([diag_medium,diag_medium]))
goals.append(np.array([diag_medium,-diag_medium]))
goals.append(np.array([-diag_medium,-diag_medium]))
goals.append(np.array([-diag_medium,diag_medium]))
else:
goals = np.empty((num_tasks, 2))
i = 0
while i < num_tasks:
goal = self.np_random.randn(1, 2) * self.high
# if (self.low < np.abs(goal[0, 0]) < self.high or self.low < np.abs(goal[0, 1]) < self.high) \
# and -self.high < goal[0, 0] < self.high and -self.high < goal[0, 1] < self.high:
distance = np.sqrt(goal[0, 0] ** 2 + goal[0, 1] ** 2)
if self.low < distance < self.high:
goals[i] = goal
i += 1
tasks = [{'goal': goal} for goal in goals]
return tasks
def reset_task(self, task):
self._task = task
self._goal = task['goal']
def reset(self, env=True):
self._state = np.zeros(2, dtype=np.float32)
return self._state
def step(self, action):
action = np.clip(action, -0.1, 0.1)
assert self.action_space.contains(action)
self._state = self._state + action
x = self._state[0] - self._goal[0]
y = self._state[1] - self._goal[1]
distance = np.sqrt(x ** 2 + y ** 2)
if self.sparse:
if distance < 1: #(np.abs(x) < 1.) and (np.abs(y) < 1.):
reward = +1. # / (distance + 1e-8)
success = True
else:
success = False
reward = + 0.
info = {'task': self._task, 'success': float(success)}
else:
reward = -distance
if distance < 1: #(np.abs(x) < 1.) and (np.abs(y) < 1.):
success = True
else:
success = False
info = {'task': self._task, 'success': float(success)}
done = False # ((np.abs(x) < 0.05) and (np.abs(y) < 0.05))
has_end = False
if has_end and success:
done = True
return self._state, reward, done, info
```
#### File: mime/metalearners/maml_vime.py
```python
import torch
from torch.nn.utils.convert_parameters import parameters_to_vector
from torch.distributions.kl import kl_divergence
from mime.samplers import MultiTaskSampler
from mime.metalearners.base import GradientBasedMetaLearner
from mime.utils.torch_utils import (weighted_mean, detach_distribution,
to_numpy, vector_to_parameters)
from mime.utils.optimization import conjugate_gradient
from mime.utils.helpers import get_inputs_targets_dynamics
from mime.utils.reinforcement_learning import reinforce_loss
from dowel import tabular
import numpy as np
class MAMLVIME(GradientBasedMetaLearner):
"""Model-Agnostic Meta-Learning (MAML, [1]) for Reinforcement Learning
application, with an outer-loop optimization based on TRPO [2].
Parameters
----------
policy : `mime.policies.Policy` instance
The policy network to be optimized. Note that the policy network is an
instance of `torch.nn.Module` that takes observations as input and
returns a distribution (typically `Normal` or `Categorical`).
fast_lr : float
Step-size for the inner loop update/fast adaptation.
num_steps : int
Number of gradient steps for the fast adaptation. Currently setting
`num_steps > 1` does not resample different trajectories after each
gradient steps, and uses the trajectories sampled from the initial
policy (before adaptation) to compute the loss at each step.
first_order : bool
If `True`, then the first order approximation of MAML is applied.
device : str ("cpu" or "cuda")
Name of the device for the optimization.
References
----------
.. [1] <NAME>., <NAME>., and <NAME>. (2017). Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks. International
Conference on Machine Learning (ICML) (https://arxiv.org/abs/1703.03400)
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>.
(2015). Trust Region Policy Optimization. International Conference on
Machine Learning (ICML) (https://arxiv.org/abs/1502.05477)
"""
def __init__(self,
policy,
dynamics,
eta,
baseline,
fast_lr=0.5,
first_order=False,
device='cpu',
epochs_counter=None,
inverse_dynamics=False,
gae_lambda=1.0,
dynamics_fast_lr=0.05,
eta_lr=100,
pre_epochs=None,
benchmark=None
):
super(MAMLVIME, self).__init__(policy, device=device)
self.fast_lr = fast_lr
self.first_order = first_order
self.dynamics = dynamics
self.baseline = baseline
self.eta = eta
self.epochs_counter = epochs_counter
self.inverse_dynamics = inverse_dynamics
self.dynamics_fast_lr = dynamics_fast_lr
self.gae_lambda = gae_lambda
self.eta_lr = eta_lr
self.pre_epochs = pre_epochs
self.benchmark=benchmark
async def adapt(self, train_futures, first_order=None):
if first_order is None:
first_order = self.first_order
# Loop over the number of steps of adaptation
params = None
for futures in train_futures:
episodes = await futures
if self.epochs_counter.value > self.pre_epochs and self.eta.requires_grad:
episodes._rewards = (1.05 - self.eta.to_sigmoid()) * episodes._extrinsic_rewards + ( (self.eta.to_sigmoid() + 0.05) * episodes._intrinsic_rewards)
# episodes._rewards = episodes._extrinsic_rewards + (self.eta.to_sigmoid() * episodes._intrinsic_rewards)
self.baseline.fit(episodes)
episodes.compute_advantages(self.baseline,
gae_lambda=self.gae_lambda,
normalize=True)
inner_loss = reinforce_loss(self.policy,
episodes,
params=params)
params = self.policy.update_params(inner_loss,
params=params,
step_size=self.fast_lr,
first_order=first_order)
return params
async def adapt_dynamics(self, train_futures, first_order=None):
if first_order is None:
first_order = self.first_order
device = self.device
dyn_params = None
# Loop over the number of steps of adaptation
for futures in train_futures:
episodes = await futures # TODO can remove?
obs = episodes.observations
act = episodes.actions
obs_nxt = obs[1:]
_inputs, _targets = get_inputs_targets_dynamics(obs, act, device, inverse=self.inverse_dynamics, benchmark=self.benchmark)
# steps = _inputs.shape[0]
# start = 0
# end = 1
# elbo_total = 0
# for _ in range(steps):
# elbo = self.dynamics.loss_with_params(_inputs[start:end], _targets[start:end], params=dyn_params)
# elbo_total += elbo / _inputs.shape[0]
# start += 1
# end += 1
elbo = 0
for i in range(0, _inputs.shape[0], 128):
elbo += self.dynamics.loss_with_params(_inputs[i:i + 128],
_targets[i:i + 128],
params=dyn_params) / _inputs.shape[0]
dyn_params = self.dynamics.update_params(elbo,
params=dyn_params,
step_size=self.dynamics.learning_rate,
first_order=first_order)
with torch.no_grad():
_out = self.dynamics.pred_fn(_inputs)
old_mse_loss = torch.mean((_out - _targets) ** 2)
_out = self.dynamics.pred_sym_with_params(_inputs, dyn_params)
new_mse_loss = torch.mean((_out - _targets) ** 2)
# print("Out", _out)
# print("Targets", _targets)
return dyn_params, old_mse_loss, new_mse_loss
def hessian_vector_product(self, kl, damping=1e-2):
grads = torch.autograd.grad(kl,
self.policy.parameters(),
create_graph=True)
flat_grad_kl = parameters_to_vector(grads)
def _product(vector, retain_graph=True):
grad_kl_v = torch.dot(flat_grad_kl, vector)
grad2s = torch.autograd.grad(grad_kl_v,
self.policy.parameters(),
retain_graph=retain_graph)
flat_grad2_kl = parameters_to_vector(grad2s)
return flat_grad2_kl + damping * vector
return _product
async def surrogate_loss(self, train_futures, valid_futures, old_pi=None, use_dynamics=False):
first_order = (old_pi is not None) or self.first_order
params = await self.adapt(train_futures,
first_order=first_order)
with torch.set_grad_enabled(old_pi is None):
valid_episodes = await valid_futures
pi = self.policy(valid_episodes.observations, params=params)
if old_pi is None:
old_pi = detach_distribution(pi)
log_ratio = (pi.log_prob(valid_episodes.actions)
- old_pi.log_prob(valid_episodes.actions))
ratio = torch.exp(log_ratio)
extrinsic_losses = None
if self.epochs_counter.value > self.pre_epochs and self.eta.requires_grad:
valid_episodes._rewards = valid_episodes._extrinsic_rewards
# self.baseline.fit(valid_episodes)
# valid_episodes.compute_advantages(self.baseline,
# gae_lambda=self.gae_lambda,
# normalize=True)
extrinsic_losses = -weighted_mean(ratio * valid_episodes.rewards,
lengths=valid_episodes.lengths)
extrinsic_losses = extrinsic_losses.mean()
valid_episodes._rewards = (1.05 - self.eta.to_sigmoid()) * valid_episodes._extrinsic_rewards + (
(self.eta.to_sigmoid() + 0.05) * valid_episodes._intrinsic_rewards)
# valid_episodes._rewards = valid_episodes._extrinsic_rewards + (
# self.eta.to_sigmoid() * valid_episodes._intrinsic_rewards)
self.baseline.fit(valid_episodes)
valid_episodes.compute_advantages(self.baseline,
gae_lambda=self.gae_lambda,
normalize=True)
losses = -weighted_mean(ratio * valid_episodes.advantages,
lengths=valid_episodes.lengths)
kls = weighted_mean(kl_divergence(pi, old_pi),
lengths=valid_episodes.lengths)
if use_dynamics:
# for k,v in self.dynamics.named_parameters():
# print("Before")
# print(k,v)
# break
dyn_params, old_mse_loss, new_mse_loss = await self.adapt_dynamics(train_futures,
first_order=first_order)
# for k,v in dyn_params.items():
# print("After")
# print(k,v)
# break
device = self.device
with torch.set_grad_enabled(True):
obs = valid_episodes.observations
act = valid_episodes.actions
obs_nxt = obs[1:]
_inputs, _targets = get_inputs_targets_dynamics(obs, act, device, inverse=self.inverse_dynamics, benchmark=self.benchmark)
elbo = 0
for i in range(0, _inputs.shape[0], 128):
elbo = self.dynamics.loss_with_params(_inputs[i:i + 128],
_targets[i:i + 128],
params=dyn_params) / _inputs.shape[0]
# steps = _inputs.shape[0]
# start = 0
# end = 1
# elbo_total = 0
# for _ in range(steps):
# elbo = self.dynamics.loss_with_params(_inputs[start:end], _targets[start:end], params=dyn_params)
# elbo_total += elbo / _inputs.shape[0]
# start += 1
# end += 1
return losses.mean(), kls.mean(), old_pi, elbo, old_mse_loss, new_mse_loss, extrinsic_losses
else:
return losses.mean(), kls.mean(), old_pi
def step(self,
train_futures,
valid_futures,
max_kl=1e-3,
cg_iters=10,
cg_damping=1e-2,
ls_max_steps=10,
ls_backtrack_ratio=0.5):
num_tasks = len(train_futures[0])
logs = {}
# for k,v in self.dynamics.named_parameters():
# print("Before")
# print(k,v)
# break
epochs_counter = self.epochs_counter.value
if epochs_counter == 0: # self.pre_epochs:
length = 50
else:
length = 1
for _ in range(length):
optim = torch.optim.Adam(params=self.dynamics.parameters(), lr=self.dynamics_fast_lr * num_tasks**2) # 0.05 # 0.01
old_losses, old_kls, old_pis, dyn_losses, old_mse_losses, new_mse_losses, extrinsic_losses = self._async_gather([
self.surrogate_loss(train, valid, old_pi=None, use_dynamics=True)
for (train, valid) in zip(zip(*train_futures), valid_futures)])
dyn_loss = sum(dyn_losses) / num_tasks
old_mse_loss = sum(old_mse_losses) / num_tasks
new_mse_loss = sum(new_mse_losses) / num_tasks
if _ == 0:
tabular.record("Dynamics/OldMSELoss", np.float32(old_mse_loss))
if _ == length -1:
tabular.record("Dynamics/NewMSELoss", np.float32(new_mse_loss))
dyn_loss.backward(retain_graph=True)
optim.step()
# for k,v in self.dynamics.named_parameters():
# print("After")
# print(k,v)
# break
logs['loss_before'] = to_numpy(old_losses)
logs['kl_before'] = to_numpy(old_kls)
old_loss = sum(old_losses) / num_tasks
if self.eta.requires_grad:
if epochs_counter > self.pre_epochs:
# if old_loss != 0:
# Eta update
extrinsic_loss = sum(extrinsic_losses) / num_tasks
grads = torch.autograd.grad(extrinsic_loss, # old_loss,
self.eta.value,
retain_graph=True)
# print(grads)
# self.eta = torch.sigmoid(torch.log(self.eta/(1-self.eta)) - self.fast_lr * grads[0])
self.eta.value.data.copy_(self.eta.value.data - np.clip(grads[0] * self.eta_lr / torch.sigmoid(self.eta.value.data), -0.3, 0.3)) # * self.fast_lr
print(grads[0])
else:
pass
eta = float(np.float32(torch.sigmoid(self.eta.value.data)))
tabular.record("Eta", eta)
grads = torch.autograd.grad(old_loss,
self.policy.parameters(),
retain_graph=True)
grads = parameters_to_vector(grads)
# Compute the step direction with Conjugate Gradient
old_kl = sum(old_kls) / num_tasks
hessian_vector_product = self.hessian_vector_product(old_kl,
damping=cg_damping)
stepdir = conjugate_gradient(hessian_vector_product,
grads,
cg_iters=cg_iters)
# Compute the Lagrange multiplier
shs = 0.5 * torch.dot(stepdir,
hessian_vector_product(stepdir, retain_graph=False))
lagrange_multiplier = torch.sqrt(shs / max_kl)
step = stepdir / lagrange_multiplier
# Save the old parameters
old_params = parameters_to_vector(self.policy.parameters())
# Line search
step_size = 1.0
for _ in range(ls_max_steps):
vector_to_parameters(old_params - step_size * step,
self.policy.parameters())
losses, kls, _ = self._async_gather([
self.surrogate_loss(train, valid, old_pi=old_pi)
for (train, valid, old_pi)
in zip(zip(*train_futures), valid_futures, old_pis)])
improve = (sum(losses) / num_tasks) - old_loss
kl = sum(kls) / num_tasks
if (improve.item() < 0.0) and (kl.item() < max_kl):
logs['loss_after'] = to_numpy(losses)
logs['kl_after'] = to_numpy(kls)
break
step_size *= ls_backtrack_ratio
else:
vector_to_parameters(old_params, self.policy.parameters())
return logs
```
#### File: mime/utils/logger.py
```python
import numpy as np
from dowel import tabular
import matplotlib.pyplot as plt
import tensorflow as tf
import io
import matplotlib.image as mpimg
def plot_return_success(episodes, label):
episodes_return = 0
episodes_success = 0
has_success = "success" in episodes[0]._logs
# if success:
# num_trajectories = len(train_episodes[0][0]._logs["success"])
for t in episodes:
episodes_return += t._logs["return"]
if has_success and t._logs["success"]:
episodes_success += 1
episodes_return = (episodes_return / len(episodes))
tabular.record(f"{label}/Returns", np.float32(episodes_return))
if has_success:
tabular.record(f"{label}/Success", np.float32(episodes_success / len(episodes)))
def plot_dynamics_return(episodes, label):
intrinsic_return = 0
combined_return = 0
for t in episodes:
intrinsic_return += t._logs["intrinsic_return"]
combined_return += t._logs["combined_return"]
intrinsic_return = (intrinsic_return / len(episodes))
combined_return = (combined_return / len(episodes))
tabular.record(f"{label}/IntrinsicReturns", np.float32(intrinsic_return))
tabular.record("Valid/CombinedReturns", np.float32(combined_return))
def plot_benchmark(benchmark, train_episodes, valid_episodes, is_testing):
env_names_list = list(benchmark.train_classes.keys())
if is_testing:
env_names_list = list(benchmark.test_classes.keys())
for train_step, train_eps in enumerate(train_episodes):
envs = dict()
for episodes in train_eps:
env_name = episodes._logs["env_name"]
if env_name in env_names_list:
env_names_list.remove(env_name)
if env_name not in envs:
envs[env_name] = { "return" : np.float32(episodes._logs["return"]),
"success" : float(episodes._logs["success"]),
"count" : 1}
else:
envs[env_name]["return"] += np.float32(episodes._logs["return"])
envs[env_name]["success"] += float(episodes._logs["success"])
envs[env_name]["count"] += 1
for env_name, env_stats in envs.items():
tabular.record(f'Train-{train_step}/{env_name}/Returns', env_stats["return"] / env_stats["count"])
tabular.record(f'Train-{train_step}/{env_name}/Success', env_stats["success"] / env_stats["count"])
for env_name in env_names_list:
tabular.record(f'Train-{train_step}/{env_name}/Returns', np.nan)
tabular.record(f'Train-{train_step}/{env_name}/Success', np.nan)
envs = dict()
for episodes in valid_episodes:
env_name = episodes._logs["env_name"]
# env_names_list.remove(env_name)
if env_name not in envs:
envs[env_name] = {"return": np.float32(episodes._logs["return"]),
"success": float(episodes._logs["success"]),
"count": 1}
else:
envs[env_name]["return"] += np.float32(episodes._logs["return"])
envs[env_name]["success"] += float(episodes._logs["success"])
envs[env_name]["count"] += 1
for env_name, env_stats in envs.items():
tabular.record(f'Valid/{env_name}/Returns', env_stats["return"] / env_stats["count"])
tabular.record(f'Valid/{env_name}/Success', env_stats["success"] / env_stats["count"])
for env_name in env_names_list:
tabular.record(f'Valid/{env_name}/Returns', np.nan)
tabular.record(f'Valid/{env_name}/Success', np.nan)
def log_returns(train_episodes,
valid_episodes,
batch,
log_dynamics=False,
pre_exploration_epochs=-1,
benchmark=None,
is_testing=False,
env=None,
env_name=""):
if env:
if env_name == "MetaPendulum-v0":
for train_step, train_eps in enumerate(train_episodes):
if 'success' in train_eps[0]._logs:
break
for train_episode in train_eps:
success = False
for traj_index in range(train_episode._logs['observations'].shape[2]):
if not success:
trajectory = train_episode._logs['observations'][:, :, traj_index]
success = env.check_if_solved(trajectory)
train_episode._logs['success'] = success
for train_episode in valid_episodes:
if 'success' in train_episode._logs:
break
success = False
for traj_index in range(train_episode._logs['observations'].shape[2]):
if not success:
trajectory = train_episode._logs['observations'][:, :, traj_index]
success = env.check_if_solved(trajectory)
train_episode._logs['success'] = success
for train_step, train_eps in enumerate(train_episodes):
plot_return_success(train_eps, f'Train-{train_step}')
plot_return_success(valid_episodes, "Valid")
if log_dynamics:
if batch > pre_exploration_epochs:
for train_step, train_eps in enumerate(train_episodes):
plot_dynamics_return(train_eps, f'Train-{train_step}')
plot_dynamics_return(valid_episodes, "Valid")
else:
for train_step in range(len(train_episodes)):
tabular.record(f"Train-{train_step}/IntrinsicReturns", 0)
tabular.record(f"Train-{train_step}/CombinedReturns", 0)
tabular.record("Valid/IntrinsicReturns", 0)
tabular.record("Valid/CombinedReturns", 0)
if benchmark:
plot_benchmark(benchmark, train_episodes, valid_episodes, is_testing)
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def log_trajectories(env_name, output_folder, train_episodes, valid_episodes, batch, is_test=False):
if env_name in ["2DNavigationSparse-v0","2DNavigationSparseLong-v0"]:
if is_test:
pass
else:
# Train
figure, ax = plt.subplots(figsize=(8, 8))
# ax.fill([-10, -10, 10, 10], [7.5, 10, 10, 7.5], color="red", alpha=0.3, lw=0)
# ax.fill([-10, -10, 10, 10], [-7.5, -10, -10, -7.5], color="red", alpha=0.3, lw=0)
# ax.fill([-7.5, -7.5, -10, -10], [-7.5, 7.5, 7.5, -7.5], color="red", alpha=0.3, lw=0)
# ax.fill( [7.5, 7.5, 10, 10], [-7.5, 7.5, 7.5, -7.5], color="red", alpha=0.3, lw=0)
num_tasks = len(train_episodes[0])
colors = ['firebrick', 'chocolate', 'darkorange', 'gold', 'darkolivegreen', 'lightgreen', 'turquoise', 'teal', 'dodgerblue', 'purple']
times_colors = int( num_tasks / len(colors)) + 1
colors = (colors * times_colors)[:num_tasks]
for i, color in enumerate(colors):
# for train_episode in train_episodes[0]:
train_episode = train_episodes[0][i]
trajectories = train_episode._logs['observations'] # steps, n_trajectories, space_dims
for t in range(trajectories.shape[1]):
plt.plot(trajectories[:, t, 0], trajectories[:, t, 1], color=color, alpha=0.4)
for task_dict in train_episode._logs['infos']:
goal = task_dict["task"]["goal"]
circle_reward = plt.Circle(goal, 1., color=color, alpha=0.3)
# circle_goal = plt.Circle(goal, 0.05, color=color, alpha=0.6)
ax.add_artist(circle_reward)
# ax.add_artist(circle_goal)
break # Because it is repeated several times
circle_goals = plt.Circle([0,0], radius=7, fill=False, linewidth=2.0, linestyle='--', color='darkslateblue')
ax.add_artist(circle_goals)
plt.axis([-10, +10, -10, +10])
image = plot_to_image(figure)
logdir = output_folder + '/train_trajectories'
file_writer = tf.summary.create_file_writer(logdir)
with file_writer.as_default():
tf.summary.image("TrainTrajectories", image, step=batch)
# Valid
figure, ax = plt.subplots(figsize=(8, 8))
# ax.fill([-10, -10, 10, 10], [7.5, 10, 10, 7.5], color="red", alpha=0.3, lw=0)
# ax.fill([-10, -10, 10, 10], [-7.5, -10, -10, -7.5], color="red", alpha=0.3, lw=0)
# ax.fill([-7.5, -7.5, -10, -10], [-7.5, 7.5, 7.5, -7.5], color="red", alpha=0.3, lw=0)
# ax.fill( [7.5, 7.5, 10, 10], [-7.5, 7.5, 7.5, -7.5], color="red", alpha=0.3, lw=0)
for i, color in enumerate(colors):
valid_episode = valid_episodes[i]
# for valid_episode in valid_episodes:
trajectories = valid_episode._logs['observations'] # steps, n_trajectories, space_dims
for t in range(trajectories.shape[1]):
plt.plot(trajectories[:, t, 0], trajectories[:, t, 1], color=color, alpha=0.4)
for task_dict in valid_episode._logs['infos']:
goal = task_dict["task"]["goal"]
circle_reward = plt.Circle(goal, 1., color=color, alpha=0.3)
# circle_goal = plt.Circle(goal, 0.05, color='blue', alpha=0.6)
ax.add_artist(circle_reward)
# ax.add_artist(circle_goal)
break
circle_goals = plt.Circle([0,0], radius=7, fill=False, linewidth=2.0, linestyle='--', color='darkslateblue')
ax.add_artist(circle_goals)
plt.axis([-10, +10, -10, +10])
image = plot_to_image(figure)
with file_writer.as_default():
tf.summary.image("ValidTrajectories", image, step=batch)
if env_name == "DogNavigation2D-v0":
# Train
figure, ax = plt.subplots(figsize=(8, 8))
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
img = mpimg.imread('dog_house.png')
gray = np.flipud(rgb2gray(img))
plt.imshow(gray, cmap=plt.get_cmap('gray'), vmin=0, vmax=1, origin='lower')
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
# ax.fill([-10, -10, 10, 10], [7.5, 10, 10, 7.5], color="red", alpha=0.3, lw=0)
# ax.fill([-10, -10, 10, 10], [-7.5, -10, -10, -7.5], color="red", alpha=0.3, lw=0)
# ax.fill([-7.5, -7.5, -10, -10], [-7.5, 7.5, 7.5, -7.5], color="red", alpha=0.3, lw=0)
# ax.fill( [7.5, 7.5, 10, 10], [-7.5, 7.5, 7.5, -7.5], color="red", alpha=0.3, lw=0)
num_tasks = len(train_episodes[0])
colors = ['darkorange', 'darkolivegreen', 'dodgerblue', 'purple']
times_colors = int(num_tasks / len(colors)) + 1
colors = (colors * times_colors)[:num_tasks]
for i, color in enumerate(colors):
# for train_episode in train_episodes[0]:
train_episode = train_episodes[0][i]
trajectories = train_episode._logs['observations']*10 # steps, n_trajectories, space_dims
for t in range(trajectories.shape[1]):
plt.plot(trajectories[:, t, 0], trajectories[:, t, 1], color=color, alpha=0.4)
for task_dict in train_episode._logs['infos']:
goal = task_dict["task"]["goal"]*10
circle_reward = plt.Circle(goal, 5., color=color, alpha=0.3)
# circle_goal = plt.Circle(goal, 0.05, color=color, alpha=0.6)
ax.add_artist(circle_reward)
# ax.add_artist(circle_goal)
break # Because it is repeated several times
# # Create figure and axes
# fig, ax = plt.subplots(1, figsize=(9, 9))
#
# # Display the image
# ax.axes.get_xaxis().set_ticks([])
# ax.axes.get_yaxis().set_ticks([])
# ax.imshow(img, cmap=plt.get_cmap('gray'))
# plt.plot([13, 14], [80] * 2) # Start: 13x80
# plt.plot([23, 24], [55] * 2) # Goal1: 25x55
# plt.plot([20, 21], [10] * 2) # Goal2: 20x10
# plt.plot([85, 86], [15] * 2) # Goal3: 85x15
# plt.plot([85, 86], [87] * 2) # Goal4: 85x87
# # plt.imshow(gray, cmap=plt.get_cmap('gray'), vmin=0, vmax=1, )
# # plt.show()
# Create figure and axes
image = plot_to_image(figure)
logdir = output_folder + '/train_trajectories'
file_writer = tf.summary.create_file_writer(logdir)
with file_writer.as_default():
tf.summary.image("TrainTrajectories", image, step=batch)
# Valid
figure, ax = plt.subplots(figsize=(8, 8))
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
img = mpimg.imread('dog_house.png')
gray = np.flipud(rgb2gray(img))
plt.imshow(gray, cmap=plt.get_cmap('gray'), vmin=0, vmax=1, origin='lower')
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
# ax.fill([-10, -10, 10, 10], [7.5, 10, 10, 7.5], color="red", alpha=0.3, lw=0)
# ax.fill([-10, -10, 10, 10], [-7.5, -10, -10, -7.5], color="red", alpha=0.3, lw=0)
# ax.fill([-7.5, -7.5, -10, -10], [-7.5, 7.5, 7.5, -7.5], color="red", alpha=0.3, lw=0)
# ax.fill( [7.5, 7.5, 10, 10], [-7.5, 7.5, 7.5, -7.5], color="red", alpha=0.3, lw=0)
for i, color in enumerate(colors):
valid_episode = valid_episodes[i]
# for valid_episode in valid_episodes:
trajectories = valid_episode._logs['observations']*10 # steps, n_trajectories, space_dims
for t in range(trajectories.shape[1]):
plt.plot(trajectories[:, t, 0], trajectories[:, t, 1], color=color, alpha=0.4)
for task_dict in valid_episode._logs['infos']:
goal = task_dict["task"]["goal"]*10
circle_reward = plt.Circle(goal, 5., color=color, alpha=0.3)
# circle_goal = plt.Circle(goal, 0.05, color='blue', alpha=0.6)
ax.add_artist(circle_reward)
# ax.add_artist(circle_goal)
break
# circle_goals = plt.Circle([0, 0], radius=7, fill=False, linewidth=2.0, linestyle='--', color='darkslateblue')
# ax.add_artist(circle_goals)
# plt.axis([-10, +10, -10, +10])
image = plot_to_image(figure)
with file_writer.as_default():
tf.summary.image("ValidTrajectories", image, step=batch)
``` |
{
"source": "jonculver/timeline",
"score": 4
} |
#### File: timeline/test/timeline_test.py
```python
from timeline.timeline import Timeline
from timeline.event import Event
def test_timeline_iterate():
"""
Create a timeline with 3 events and check that we can iterate over them correcly
"""
event1 = Event("1", "1978", "__")
event2 = Event("2", "1 Sep 1997", "31 Jul 2000")
event3 = Event("3", "01/12/1990", "31/08/1997")
timeline = Timeline([event1, event2, event3])
output = []
for event in timeline:
output.append(event)
assert output[0] == event1
assert output[1] == event3
assert output[2] == event2
```
#### File: timeline/timeline/timeline.py
```python
from __future__ import annotations
from timeline.event import Event
class Timeline():
def __init__(self,
events:list[Event]) -> None:
"""
Create new timeline object from a list of events
"""
self.events = sorted(events, key=lambda item: item.start)
def __iter__(self) -> Timeline:
"""
Set the current event to the first one and return this object
"""
self.next = 0
return self
def __next__(self) -> Event:
"""
Return the next object in the (sorted) list of events
"""
if self.next < len(self.events):
result = self.events[self.next]
self.next += 1
return result
else:
raise StopIteration
def _create_cycles(self) -> list[list[Event]]:
"""
Return a list of lists of events with non-overlapping dates
"""
result = []
for entry in sorted(self.events, key=lambda item: item.start):
done = False
for cycle in result:
if entry.after(cycle[-1]):
cycle.append(entry)
done = True
break
if not done:
result.append([entry])
return result
``` |
{
"source": "joncutrer/rtf2txt",
"score": 3
} |
#### File: src/rtf2txt/main.py
```python
from pathlib import Path
from typing import Optional
import striprtf
import typer
from striprtf.striprtf import rtf_to_text
from . import __version__
app = typer.Typer()
def version_callback(value: bool):
if value:
typer.echo(f"rtf2txt {__version__}")
raise typer.Exit()
@app.command()
def main(
infile: Path,
outfile: Optional[Path] = typer.Argument(None),
version: Optional[bool] = typer.Option(
None, "--version", "-V", callback=version_callback
),
):
"""Convert RTF file to plain text"""
if infile is None:
typer.echo(f"[Errno 2] No such file or directory: '{infile}'")
raise typer.Abort()
if infile.is_dir():
typer.echo(f"[Errno 3] directory provided instead of file: '{infile}'")
raise typer.Abort()
if (outfile != None) and outfile.is_dir():
typer.echo(f"[Errno 3] directory provided instead of file: '{outfile}'")
raise typer.Abort()
try:
with open(infile, mode="r") as f:
file_contents = f.read()
except FileNotFoundError as e:
typer.echo(str(e))
exit(-1)
text = rtf_to_text(file_contents)
if outfile is None:
typer.echo(text)
return
try:
with open(outfile, mode="w+", encoding="utf-8") as f:
f.write(text)
typer.echo(f"{len(text)} bytes written")
except FileNotFoundError as e:
typer.echo(str(e))
exit(-1)
except PermissionError as e:
typer.echo(str(e))
exit(-1)
if __name__ == "__main__":
# execute only if run as a script
app()
``` |
{
"source": "jond01/imageio",
"score": 3
} |
#### File: imageio/plugins/grab.py
```python
import threading
import numpy as np
from .. import formats
from ..core import Format
class BaseGrabFormat(Format):
""" Base format for grab formats.
"""
_pillow_imported = False
_ImageGrab = None
def __init__(self, *args, **kwargs):
super(BaseGrabFormat, self).__init__(*args, **kwargs)
self._lock = threading.RLock()
def _can_write(self, request):
return False
def _init_pillow(self):
with self._lock:
if not self._pillow_imported:
self._pillow_imported = True # more like tried to import
import PIL
if not hasattr(PIL, "__version__"): # pragma: no cover
raise ImportError("Imageio Pillow requires " "Pillow, not PIL!")
try:
from PIL import ImageGrab
except ImportError:
return None
self._ImageGrab = ImageGrab
return self._ImageGrab
class Reader(Format.Reader):
def _open(self):
pass
def _close(self):
pass
def _get_data(self, index):
return self.format._get_data(index)
class ScreenGrabFormat(BaseGrabFormat):
""" The ScreenGrabFormat provided a means to grab screenshots using
the uri of "<screen>".
This functionality is provided via Pillow. Note that "<screen>" is
only supported on Windows and OS X.
Parameters for reading
----------------------
No parameters.
"""
def _can_read(self, request):
if request.mode[1] not in "i?":
return False
if request.filename != "<screen>":
return False
return bool(self._init_pillow())
def _get_data(self, index):
ImageGrab = self._init_pillow()
assert ImageGrab
pil_im = ImageGrab.grab()
assert pil_im is not None
im = np.asarray(pil_im)
return im, {}
class ClipboardGrabFormat(BaseGrabFormat):
""" The ClipboardGrabFormat provided a means to grab image data from
the clipboard, using the uri "<clipboard>"
This functionality is provided via Pillow. Note that "<clipboard>" is
only supported on Windows.
Parameters for reading
----------------------
No parameters.
"""
def _can_read(self, request):
if request.mode[1] not in "i?":
return False
if request.filename != "<clipboard>":
return False
return bool(self._init_pillow())
def _get_data(self, index):
ImageGrab = self._init_pillow()
assert ImageGrab
pil_im = ImageGrab.grabclipboard()
if pil_im is None:
raise RuntimeError(
"There seems to be no image data on the " "clipboard now."
)
im = np.asarray(pil_im)
return im, {}
# Register. You register an *instance* of a Format class.
format = ScreenGrabFormat(
"screengrab", "Grab screenshots (Windows and OS X only)", [], "i"
)
formats.add_format(format)
format = ClipboardGrabFormat(
"clipboardgrab", "Grab from clipboard (Windows only)", [], "i"
)
formats.add_format(format)
``` |
{
"source": "jond01/iqsharp",
"score": 2
} |
#### File: iqsharp/build/test-selenium.py
```python
import os
import pytest
import time
import sys
from notebook.notebookapp import list_running_servers
from notebook.tests.selenium.test_interrupt import interrupt_from_menu
from notebook.tests.selenium.utils import Notebook, wait_for_selector
from selenium.common.exceptions import JavascriptException
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import WebDriverWait
def setup_module():
'''
Wait for notebook server to start
'''
# workaround for missing os.uname attribute on some Windows systems
if not hasattr(os, "uname"):
os.uname = lambda: ["Windows"]
total_wait_time = 180
remaining_time = total_wait_time
iteration_time = 5
print("Waiting for notebook server to start...")
while not list(list_running_servers()) and remaining_time > 0:
time.sleep(iteration_time)
remaining_time -= iteration_time
if not list(list_running_servers()):
raise Exception(f"Notebook server did not start in {total_wait_time} seconds")
def create_notebook():
'''
Returns a new IQ# notebook in a Firefox web driver
'''
driver = None
max_retries = 5
for _ in range(max_retries):
try:
driver = Firefox()
break
except:
print(f"Exception creating Firefox driver, retrying. Exception info: {sys.exc_info()}")
if not driver:
raise Exception(f"Failed to create Firefox driver in {max_retries} tries")
server = list(list_running_servers())[0]
driver.get('{url}?token={token}'.format(**server))
return Notebook.new_notebook(driver, kernel_name='kernel-iqsharp')
def get_sample_operation():
'''
Returns a sample Q# operation to be entered into a Jupyter Notebook cell
'''
# Jupyter Notebook automatically adds a closing brace when typing
# an open brace into a code cell. To work around this, we omit the
# closing braces from the string that we enter.
return '''
operation DoNothing() : Unit {
use q = Qubit();
H(q);
H(q);
'''
def test_kernel_startup():
'''
Check that basic functionality works
'''
nb = create_notebook()
# Check that %version executes and outputs an expected result
nb.add_and_execute_cell(index=0, content='%version')
outputs = nb.wait_for_cell_output(index=0, timeout=120)
assert len(outputs) > 0
assert "iqsharp" in outputs[0].text, outputs[0].text
# Check that the expected IQ# JavaScript modules are properly loaded
with pytest.raises(JavascriptException):
nb.browser.execute_script("require('fake/module')")
nb.browser.execute_script("require('codemirror/addon/mode/simple')")
nb.browser.execute_script("require('plotting')")
nb.browser.execute_script("require('telemetry')")
nb.browser.execute_script("require('@microsoft/quantum-viz.js')")
nb.browser.quit()
def test_trace_magic():
'''
Check that the IQ# %trace command works correctly
'''
nb = create_notebook()
cell_index = 0
nb.add_and_execute_cell(index=cell_index, content=get_sample_operation())
outputs = nb.wait_for_cell_output(index=cell_index, timeout=120)
assert len(outputs) > 0
assert "DoNothing" == outputs[0].text, outputs[0].text
cell_index = 1
nb.add_and_execute_cell(index=cell_index, content='%trace DoNothing')
outputs = nb.wait_for_cell_output(index=cell_index, timeout=120)
assert len(outputs) > 0
# Verify expected text output
expected_trace = '|0\u27E9 q0 H H' # \u27E9 is mathematical right angle bracket
WebDriverWait(nb.browser, 60).until(
lambda b: expected_trace == nb.get_cell_output(index=cell_index)[0].text
)
outputs = nb.get_cell_output(index=cell_index)
assert expected_trace == outputs[0].text, outputs[0].text
nb.browser.quit()
def test_debug_magic():
'''
Check that the IQ# %debug command works correctly
'''
nb = create_notebook()
cell_index = 0
nb.add_and_execute_cell(index=cell_index, content=get_sample_operation())
outputs = nb.wait_for_cell_output(index=cell_index, timeout=120)
assert len(outputs) > 0
assert "DoNothing" == outputs[0].text, outputs[0].text
def validate_debug_outputs(index, expected_trace):
WebDriverWait(nb.browser, 60).until(
lambda b: len(nb.get_cell_output(index=index)) >= 4 and \
expected_trace == nb.get_cell_output(index=index)[2].text
)
outputs = nb.get_cell_output(index=index)
assert len(outputs) >= 4
assert 'Starting debug session' in outputs[0].text, outputs[0].text
assert 'Debug controls' in outputs[1].text, outputs[1].text
assert expected_trace == outputs[2].text, outputs[2].text
assert 'Finished debug session' in outputs[3].text, outputs[3].text
debug_button_selector = ".iqsharp-debug-toolbar .btn"
def wait_for_debug_button():
wait_for_selector(nb.browser, debug_button_selector, single=True)
def click_debug_button():
nb.browser.find_element_by_css_selector(debug_button_selector).click()
cell_index = 1
# Run %debug and interrupt kernel without clicking "Next step"
nb.add_and_execute_cell(index=cell_index, content='%debug DoNothing')
wait_for_debug_button()
interrupt_from_menu(nb)
validate_debug_outputs(index=cell_index, expected_trace='')
nb.browser.quit()
```
#### File: conda-recipes/qsharp/tests.py
```python
import os
import pytest
os.environ["QSHARP_PY_ISCONDA"] = "True"
from importlib import import_module
from attr import attr
import qsharp
def test_simple_compile():
"""
Verifies that compile works
"""
op = qsharp.compile( """
operation HelloQ() : Result
{
Message($"Hello from quantum world!");
return One;
}
""")
r = op.simulate()
assert r == qsharp.Result.One
def test_user_agent_extra():
"""
Verifies that the extra information sent with the user agent for this
package correctly marks that the package was installed with conda.
"""
import qsharp.version
assert getattr(qsharp.version, "is_conda", False)
assert qsharp.version._user_agent_extra == f"[{qsharp.__version__}](qsharp:conda)"
# Forward tests from the unit testing modules.
def _forward_tests(module_name) -> None:
module = import_module(module_name)
for attr_name in dir(module):
if attr_name.startswith("test_") or attr_name.startswith("Test"):
print(f"Forwarding {attr_name} from {module_name}.")
globals()[attr_name] = getattr(module, attr_name)
_forward_tests("qsharp.tests.test_iqsharp")
_forward_tests("qsharp.tests.test_serialization")
```
#### File: qsharp-core/qsharp/azure.py
```python
import qsharp
import json
from typing import List, Dict, Callable, Any, Union
from enum import Enum
## LOGGING ##
import logging
logger = logging.getLogger(__name__)
## EXPORTS ##
__all__ = [
'connect',
'target',
'submit',
'execute',
'status',
'output',
'jobs',
'AzureTarget',
'AzureJob',
'AzureError'
]
## CLASSES ##
class AzureTarget(object):
"""
Represents an instance of an Azure Quantum execution target for Q# job submission.
"""
def __init__(self, data: Dict):
self.__dict__ = data
self.id = data["id"]
self.current_availability = data["current_availability"]
self.average_queue_time = data["average_queue_time"]
def __repr__(self) -> str:
return self.__dict__.__repr__()
def __eq__(self, other) -> bool:
if not isinstance(other, AzureTarget):
# don't attempt to compare against unrelated types
return NotImplemented
return self.__dict__ == other.__dict__
class AzureJob(object):
"""
Represents an instance of an Azure Quantum job.
"""
def __init__(self, data: Dict):
self.__dict__ = data
self.id = data["id"]
self.name = data["name"]
self.status = data["status"]
self.uri = data["uri"]
self.provider = data["provider"]
self.target = data["target"]
self.creation_time = data["creation_time"]
self.begin_execution_time = data["begin_execution_time"]
self.end_execution_time = data["end_execution_time"]
def __repr__(self) -> str:
return self.__dict__.__repr__()
def __eq__(self, other) -> bool:
if not isinstance(other, AzureJob):
# don't attempt to compare against unrelated types
return NotImplemented
return self.__dict__ == other.__dict__
class AzureError(Exception):
"""
Contains error information resulting from an attempt to interact with Azure.
"""
def __init__(self, data: Dict):
self.__dict__ = data
self.error_code = data["error_code"]
self.error_name = data["error_name"]
self.error_description = data["error_description"]
def __repr__(self) -> str:
return self.__dict__.__repr__()
def __eq__(self, other) -> bool:
if not isinstance(other, AzureError):
# don't attempt to compare against unrelated types
return NotImplemented
return self.__dict__ == other.__dict__
## FUNCTIONS ##
def connect(**params) -> List[AzureTarget]:
"""
Connects to an Azure Quantum workspace or displays current connection status.
See https://docs.microsoft.com/qsharp/api/iqsharp-magic/azure.connect for more details.
"""
result = qsharp.client._execute_magic(f"azure.connect", raise_on_stderr=False, **params)
if "error_code" in result: raise AzureError(result)
return [AzureTarget(target) for target in result]
def target(name : str = '', **params) -> AzureTarget:
"""
Sets or displays the active execution target for Q# job submission in an Azure Quantum workspace.
See https://docs.microsoft.com/qsharp/api/iqsharp-magic/azure.target for more details.
"""
result = qsharp.client._execute_magic(f"azure.target {name}", raise_on_stderr=False, **params)
if "error_code" in result: raise AzureError(result)
return AzureTarget(result)
def submit(op : qsharp.QSharpCallable, **params) -> AzureJob:
"""
Submits a job to an Azure Quantum workspace.
See https://docs.microsoft.com/qsharp/api/iqsharp-magic/azure.submit for more details.
"""
result = qsharp.client._execute_callable_magic("azure.submit", op, raise_on_stderr=False, **params)
if "error_code" in result: raise AzureError(result)
return AzureJob(result)
def execute(op : qsharp.QSharpCallable, **params) -> Dict:
"""
Submits a job to an Azure Quantum workspace and waits for completion.
See https://docs.microsoft.com/qsharp/api/iqsharp-magic/azure.execute for more details.
"""
result = qsharp.client._execute_callable_magic("azure.execute", op, raise_on_stderr=False, **params)
if "error_code" in result: raise AzureError(result)
return result
def status(jobId : str = '', **params) -> AzureJob:
"""
Displays status for a job in the current Azure Quantum workspace.
See https://docs.microsoft.com/qsharp/api/iqsharp-magic/azure.status for more details.
"""
result = qsharp.client._execute_magic(f"azure.status {jobId}", raise_on_stderr=False, **params)
if "error_code" in result: raise AzureError(result)
return AzureJob(result)
def output(jobId : str = '', **params) -> Dict:
"""
Displays results for a job in the current Azure Quantum workspace.
See https://docs.microsoft.com/qsharp/api/iqsharp-magic/azure.output for more details.
"""
result = qsharp.client._execute_magic(f"azure.output {jobId}", raise_on_stderr=False, **params)
if "error_code" in result: raise AzureError(result)
return result
def jobs(filter : str = '', count : int = 30, **params) -> List[AzureJob]:
"""
Displays a list of jobs in the current Azure Quantum workspace.
See https://docs.microsoft.com/qsharp/api/iqsharp-magic/azure.jobs for more details.
"""
result = qsharp.client._execute_magic(f"azure.jobs \"{filter}\" count={count}", raise_on_stderr=False, **params)
if "error_code" in result: raise AzureError(result)
return [AzureJob(job) for job in result]
```
#### File: qsharp/tests/utils.py
```python
import importlib
import os
import qsharp
import sys
def set_environment_variables():
'''
Sets environment variables for test execution and restarts the IQ# kernel.
'''
os.environ["AZURE_QUANTUM_ENV"] = "mock"
os.environ["IQSHARP_AUTO_LOAD_PACKAGES"] = "$null"
importlib.reload(qsharp)
if "qsharp.chemistry" in sys.modules:
importlib.reload(qsharp.chemistry)
``` |
{
"source": "jond01/medio",
"score": 2
} |
#### File: medio/backends/pdcm_io.py
```python
from pathlib import Path
import nibabel as nib
import numpy as np
import pydicom
from dicom_numpy import combine_slices
from medio.backends.nib_io import NibIO
from medio.backends.pdcm_unpack_ds import unpack_dataset
from medio.metadata.convert_nib_itk import inv_axcodes
from medio.metadata.metadata import MetaData
from medio.metadata.pdcm_ds import convert_ds, MultiFrameFileDataset
from medio.utils.files import parse_series_uids
class PdcmIO:
coord_sys = 'itk'
# channels axes in the transposed image for pydicom and dicom-numpy. The actual axis is the first or the second
# value of the tuple, according to the planar configuration (which is either 0 or 1)
DEFAULT_CHANNELS_AXES_PYDICOM = (0, -1)
DEFAULT_CHANNELS_AXES_DICOM_NUMPY = (0, 2)
@staticmethod
def read_img(input_path, desired_ornt=None, header=False, channels_axis=None, globber='*',
allow_default_affine=False, series=None):
"""
Read a dicom file or folder (series) and return the numpy array and the corresponding metadata
:param input_path: path-like object (str or pathlib.Path) of the file or directory to read
:param desired_ornt: str, tuple of str or None - the desired orientation of the image to be returned
:param header: whether to include a header attribute with additional metadata in the returned metadata (single
file only)
:param channels_axis: if not None and the image is channeled (e.g. RGB) move the channels to channels_axis in
the returned image array
:param globber: relevant for a directory - globber for selecting the series files (all files by default)
:param allow_default_affine: whether to allow default affine when some tags are missing (multiframe file only)
:param series: str or int of the series to read (in the case of multiple series in a directory)
:return: numpy array and metadata
"""
input_path = Path(input_path)
temp_channels_axis = -1 # if there are channels, they must be in the last axis for the reorientation
if input_path.is_dir():
img, metadata, channeled = PdcmIO.read_dcm_dir(input_path, header, globber,
channels_axis=temp_channels_axis, series=series)
else:
img, metadata, channeled = PdcmIO.read_dcm_file(
input_path, header, allow_default_affine=allow_default_affine, channels_axis=temp_channels_axis)
img, metadata = PdcmIO.reorient(img, metadata, desired_ornt)
# move the channels after the reorientation
if channeled and channels_axis != temp_channels_axis:
img = np.moveaxis(img, temp_channels_axis, channels_axis)
return img, metadata
@staticmethod
def read_dcm_file(filename, header=False, allow_default_affine=False, channels_axis=None):
"""
Read a single dicom file.
Return the image array, metadata, and whether it has channels
"""
ds = pydicom.dcmread(filename)
ds = convert_ds(ds)
if ds.__class__ is MultiFrameFileDataset:
img, affine = unpack_dataset(ds, allow_default_affine=allow_default_affine)
else:
img, affine = combine_slices([ds])
metadata = PdcmIO.aff2meta(affine)
if header:
metadata.header = {str(key): ds[key] for key in ds.keys()}
samples_per_pixel = ds.SamplesPerPixel
img = PdcmIO.move_channels_axis(img, samples_per_pixel=samples_per_pixel, channels_axis=channels_axis,
planar_configuration=ds.get('PlanarConfiguration', None),
default_axes=PdcmIO.DEFAULT_CHANNELS_AXES_PYDICOM)
return img, metadata, samples_per_pixel > 1
@staticmethod
def read_dcm_dir(input_dir, header=False, globber='*', channels_axis=None, series=None):
"""
Reads a 3D dicom image: input path can be a file or directory (DICOM series).
Return the image array, metadata, and whether it has channels
"""
# find all dicom files within the specified folder, read every file separately and sort them by InstanceNumber
slices = PdcmIO.extract_slices(input_dir, globber=globber, series=series)
img, affine = combine_slices(slices)
metadata = PdcmIO.aff2meta(affine)
if header:
# TODO: add header support, something like
# metdata.header = [{str(key): ds[key] for key in ds.keys()} for ds in slices]
raise NotImplementedError("header=True is currently not supported for a series")
samples_per_pixel = slices[0].SamplesPerPixel
img = PdcmIO.move_channels_axis(img, samples_per_pixel=samples_per_pixel, channels_axis=channels_axis,
planar_configuration=slices[0].get('PlanarConfiguration', None),
default_axes=PdcmIO.DEFAULT_CHANNELS_AXES_DICOM_NUMPY)
return img, metadata, samples_per_pixel > 1
@staticmethod
def extract_slices(input_dir, globber='*', series=None):
"""Extract slices from input_dir and return them sorted"""
files = Path(input_dir).glob(globber)
slices = [pydicom.dcmread(filename) for filename in files]
# filter by Series Instance UID
datasets = {}
for slc in slices:
key = slc.SeriesInstanceUID
datasets[key] = datasets.get(key, []) + [slc]
series_uid = parse_series_uids(input_dir, datasets.keys(), series, globber)
slices = datasets[series_uid]
slices.sort(key=lambda ds: ds.get('InstanceNumber', 0))
return slices
@staticmethod
def aff2meta(affine):
return MetaData(affine, coord_sys=PdcmIO.coord_sys)
@staticmethod
def move_channels_axis(array, samples_per_pixel, channels_axis=None, planar_configuration=None,
default_axes=DEFAULT_CHANNELS_AXES_PYDICOM):
"""Move the channels axis from the original axis to the destined channels_axis"""
if (samples_per_pixel == 1) or (channels_axis is None):
# no rearrangement is needed
return array
# extract the original channels axis
if planar_configuration not in [0, 1]:
raise ValueError(f'Invalid Planar Configuration value: {planar_configuration}')
orig_axis = default_axes[planar_configuration]
flag = True # original channels axis is assigned
shape = array.shape
# validate that the assigned axis matches samples_per_pixel, if not - try to search for it
if shape[orig_axis] != samples_per_pixel:
flag = False
for i, sz in enumerate(shape):
if sz == samples_per_pixel:
orig_axis = i
flag = True
break
if not flag:
raise ValueError('The original channels axis was not detected')
return np.moveaxis(array, orig_axis, channels_axis)
@staticmethod
def reorient(img, metadata, desired_ornt):
"""
Reorient img array and affine (in the metadata) to desired_ornt (str) using nibabel.
desired_ornt is in itk convention.
Note that if img has channels (RGB for example), they must be in last axis
"""
if (desired_ornt is None) or (desired_ornt == metadata.ornt):
return img, metadata
# convert from pydicom (itk) to nibabel convention
metadata.convert(NibIO.coord_sys)
orig_ornt = metadata.ornt
desired_ornt = inv_axcodes(desired_ornt)
# use nibabel for the reorientation
img_struct = nib.spatialimages.SpatialImage(img, metadata.affine)
reoriented_img_struct = NibIO.reorient(img_struct, desired_ornt)
img = np.asanyarray(reoriented_img_struct.dataobj)
metadata = MetaData(reoriented_img_struct.affine, orig_ornt=orig_ornt, coord_sys=NibIO.coord_sys,
header=metadata.header)
# convert back to pydicom convention
metadata.convert(PdcmIO.coord_sys)
return img, metadata
@staticmethod
def save_arr2dcm_file(output_filename, template_filename, img_arr, dtype=None, keep_rescale=False):
"""
Writes a dicom single file image using template file, without the intensity transformation from template dataset
unless keep_rescale is True
:param output_filename: path-like object of the output file to be saved
:param template_filename: the single dicom scan whose metadata is used
:param img_arr: numpy array of the image to be saved, should be in the same orientation as template_filename
:param dtype: the dtype for the numpy array, for example 'int16'. If None - will use the dtype of the template
:param keep_rescale: whether to keep intensity rescale values
"""
ds = pydicom.dcmread(template_filename)
ds = convert_ds(ds)
if not keep_rescale:
if isinstance(ds, MultiFrameFileDataset):
ds.del_intensity_trans()
else:
del ds.RescaleSlope
del ds.RescaleIntercept
if dtype is None:
img_arr = img_arr.astype(ds.pixel_array.dtype, copy=False)
else:
img_arr = img_arr.astype(dtype, copy=False)
ds.PixelData = img_arr.tobytes()
ds.save_as(output_filename)
```
#### File: medio/backends/pdcm_unpack_ds.py
```python
import logging
import numpy as np
from dicom_numpy.combine_slices import _validate_image_orientation, _extract_cosines, _requires_rescaling
logger = logging.getLogger(__name__)
def unpack_dataset(dataset, rescale=None, allow_default_affine=False):
"""
Given a pydicom dataset of a single image file return three-dimensional numpy array.
Also calculate a 4x4 affine transformation matrix that converts the ijk-pixel-indices
into the xyz-coordinates in the DICOM patient's coordinate system.
Returns a two-tuple containing the 3D-ndarray and the affine matrix.
If `rescale` is set to `None` (the default), then the image array dtype
will be preserved, unless any of the DICOM images contain either the
`Rescale Slope
<https://dicom.innolitics.com/ciods/ct-image/ct-image/00281053>`_ or the
`Rescale Intercept <https://dicom.innolitics.com/ciods/ct-image/ct-image/00281052>`_
attributes. If either of these attributes are present they will be applied.
If `rescale` is `True` the voxels will be cast to `float32`, if set to
`False`, the original dtype will be preserved even if DICOM rescaling information is present.
The returned array has the column-major byte-order.
This function requires that the datasets:
- Be in same series (have the same
`Series Instance UID <https://dicom.innolitics.com/ciods/ct-image/general-series/0020000e>`_,
`Modality <https://dicom.innolitics.com/ciods/ct-image/general-series/00080060>`_,
and `SOP Class UID <https://dicom.innolitics.com/ciods/ct-image/sop-common/00080016>`_).
- The binary storage of each slice must be the same (have the same
`Bits Allocated <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280100>`_,
`Bits Stored <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280101>`_,
`High Bit <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280102>`_, and
`Pixel Representation <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280103>`_).
- The image slice must approximately form a grid. This means there can not
be any missing internal slices (missing slices on the ends of the dataset
are not detected).
- It also means that each slice must have the same
`Rows <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280010>`_,
`Columns <https://dicom.innolitics.com/ciods/ct-image/image-pixel/00280011>`_,
`Pixel Spacing <https://dicom.innolitics.com/ciods/ct-image/image-plane/00280030>`_, and
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute values.
- The direction cosines derived from the
`Image Orientation (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200037>`_
attribute must, within 1e-4, have a magnitude of 1. The cosines must
also be approximately perpendicular (their dot-product must be within
1e-4 of 0). Warnings are displayed if any of these approximations are
below 1e-8, however, since we have seen real datasets with values up to
1e-4, we let them pass.
- The `Image Position (Patient) <https://dicom.innolitics.com/ciods/ct-image/image-plane/00200032>`_
values must approximately form a line.
If any of these conditions are not met, a `dicom_numpy.DicomImportException` is raised.
"""
try:
_validate_image_orientation(dataset.ImageOrientationPatient)
transform = _ijk_to_patient_xyz_transform_matrix(dataset)
except AttributeError as e:
if allow_default_affine:
transform = np.eye(4)
else:
raise AttributeError(str(e) + "\nTry using: allow_default_affine=True")
voxels = _unpack_pixel_array(dataset, rescale)
return voxels, transform
def _unpack_pixel_array(dataset, rescale=None):
voxels = dataset.pixel_array.T
if rescale is None:
rescale = _requires_rescaling(dataset)
if rescale:
voxels = voxels.astype('int16', copy=False) # TODO: it takes time! Consider view.
slope = getattr(dataset, 'RescaleSlope', 1)
intercept = getattr(dataset, 'RescaleIntercept', 0)
if int(slope) == slope and int(intercept) == intercept:
slope = int(slope)
intercept = int(intercept)
voxels = voxels * slope + intercept
return voxels
def _ijk_to_patient_xyz_transform_matrix(dataset):
image_orientation = dataset.ImageOrientationPatient
row_cosine, column_cosine, slice_cosine = _extract_cosines(image_orientation)
row_spacing, column_spacing = dataset.PixelSpacing
# slice_spacing = dataset.get('SpacingBetweenSlices', 0)
transform = np.identity(4, dtype=np.float32)
transform[:3, 0] = row_cosine * column_spacing
transform[:3, 1] = column_cosine * row_spacing
transform[:3, 2] = (np.array(dataset.slice_position(-1)) - dataset.slice_position(0)
) / (dataset.NumberOfFrames - 1)
# transform[:3, 2] = slice_cosine * slice_spacing
transform[:3, 3] = dataset.ImagePositionPatient
return transform
```
#### File: medio/metadata/convert_nib_itk.py
```python
import numpy as np
from medio.metadata.affine import Affine
from medio.utils.two_way_dict import TwoWayDict
# store compactly axis directions codes
axes_inv = TwoWayDict()
axes_inv['R'] = 'L'
axes_inv['A'] = 'P'
axes_inv['S'] = 'I'
def inv_axcodes(axcodes):
"""Inverse axes codes chars, for example: SPL -> IAR"""
if axcodes is None:
return None
new_axcodes = ''
for code in axcodes:
new_axcodes += axes_inv[code]
return new_axcodes
def convert_affine(affine):
# conversion matrix of the affine from itk to nibabel and vice versa
convert_aff_mat = np.diag([-1, -1, 1, 1])
# for 2d image:
if affine.shape[0] == 3:
convert_aff_mat = np.diag([-1, -1, 1])
new_affine = convert_aff_mat @ affine
if isinstance(affine, Affine):
new_affine = Affine(new_affine)
return new_affine
def convert_nib_itk(affine, *axcodes):
"""Convert affine and orientations (original and current orientations) from nibabel to itk and vice versa"""
new_affine = convert_affine(affine)
new_axcodes = []
for axcode in axcodes:
new_axcodes += [inv_axcodes(axcode)]
return (new_affine, *new_axcodes)
``` |
{
"source": "jond64/salt",
"score": 2
} |
#### File: salt/beacons/inotify.py
```python
from __future__ import absolute_import
import collections
# Import third party libs
try:
import pyinotify
HAS_PYINOTIFY = True
DEFAULT_MASK = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY
except ImportError:
HAS_PYINOTIFY = False
DEFAULT_MASK = None
__virtualname__ = 'inotify'
def __virtual__():
if HAS_PYINOTIFY:
return __virtualname__
return False
def _enqueue(revent):
'''
Enqueue the event
'''
__context__['inotify.que'].append(revent)
def _get_notifier():
'''
Check the context for the notifier and construct it if not present
'''
if 'inotify.notifier' in __context__:
return __context__['inotify.notifier']
__context__['inotify.que'] = collections.deque()
wm = pyinotify.WatchManager()
__context__['inotify.notifier'] = pyinotify.Notifier(wm, _enqueue)
return __context__['inotify.notifier']
def beacon(config):
'''
Watch the configured files
'''
ret = []
notifier = _get_notifier()
wm = notifier._watch_manager
# Read in existing events
# remove watcher files that are not in the config
# update all existing files with watcher settings
# return original data
if notifier.check_events(1):
notifier.read_events()
notifier.process_events()
while __context__['inotify.que']:
sub = {}
event = __context__['inotify.que'].popleft()
sub['tag'] = event.path
sub['path'] = event.pathname
sub['change'] = event.maskname
ret.append(sub)
current = set()
for wd in wm.watches:
current.add(wm.watches[wd].path)
need = set(config)
for path in current.difference(need):
# These need to be removed
for wd in wm.watches:
if path == wm.watches[wd].path:
wm.rm_watch(wd)
for path in config:
if isinstance(config[path], dict):
mask = config[path].get('mask', DEFAULT_MASK)
rec = config[path].get('rec', False)
auto_add = config[path].get('auto_add', False)
else:
mask = DEFAULT_MASK
rec = False
auto_add = False
# TODO: make the config handle more options
if path not in current:
wm.add_watch(
path,
mask,
rec=rec,
auto_add=auto_add)
else:
for wd in wm.watches:
if path == wm.watches[wd].path:
update = False
if wm.watches[wd].mask != mask:
update = True
if wm.watches[wd].auto_add != auto_add:
update = True
if update:
wm.update_watch(
wd,
mask=mask,
rec=rec,
auto_add=auto_add)
return ret
``` |
{
"source": "Jondalar/plex",
"score": 2
} |
#### File: Contents/Code/__init__.py
```python
ART = 'art-default.jpg'
ICON = 'icon-default.png'
REGEX = '%s = new Array\((.+?)\);'
CHANNELS_URL = 'http://%s:%s/channels.xml'
LISTGROUPS_URL = 'http://%s:%s/channels/groups.xml'
SINGLEGROUP_URL= 'http://%s:%s/channels.xml?group=%s'
CHANNELIMAGE_URL = 'http://%s:%s/channels/image/%s'
CHANNELEPG_CURRENT_URL = 'http://%s:%s/events/%s.xml?timespan=%s'
CHANNELEPG_IMAGE_URL = 'http://%s:%s/events/image/%s/%s'
STREAM_URL = 'http://%s:%s/%s/%s'
NAMESPACESGROUP = {'group': 'http://www.domain.org/restfulapi/2011/groups-xml'}
NAMESPACESCHANNEL = {'channel': 'http://www.domain.org/restfulapi/2011/channels-xml'}
NAMESPACEEPG = {'epg': 'http://www.domain.org/restfulapi/2011/events-xml'}
####################################################################################################
def Start():
Plugin.AddViewGroup('List', viewMode='List', mediaType='items')
ObjectContainer.art = R(ART)
ObjectContainer.title1 = 'VDR Live TV'
DirectoryObject.thumb = R(ICON)
Resource.AddMimeType('image/png','png')
# Da sollte noch ein "initial Channel/EPG" und eine Art "Close Stream" hook hin, oder ?
####################################################################################################
@handler('/video/vdr', 'VDR Streamdev Client', art=ART, thumb=ICON)
def MainMenu():
groupListContainer = ObjectContainer(view_group='List', no_cache=True)
if Prefs['host'] and Prefs['port'] and Prefs['stream']:
xml = LISTGROUPS_URL % (Prefs['host'], Prefs['restapi'])
try:
GroupList = XML.ElementFromURL(xml)
except:
Log("VDR Plugin: Couldn't connect to VDR.")
return None
Log("Loading VDR GroupsList via restfulapi-plugin at Port %s" % (Prefs['restapi']))
numberOfGroups = int(GroupList.xpath('//group:count/text()', namespaces=NAMESPACESGROUP)[0])
item = 0
for item in range(numberOfGroups):
#jeden Namen einzeln holen aus dem XPath Object statt aus der Liste
groupName = GroupList.xpath('//group:group/text()', namespaces=NAMESPACESGROUP)[item]
groupListContainer.add(DirectoryObject(key = Callback(DisplayGroupChannels, name=groupName), title = groupName))
groupListContainer.add(PrefsObject(title='Preferences', thumb=R('icon-prefs.png')))
return groupListContainer
#################################################################################
@route("/video/vdr/DisplayGroupChannels")
def DisplayGroupChannels(name):
Log("Aufruf von DisplayGroupChannels %s" % (name))
groupNameURLconform = name.replace(" ", "%20")
xml = SINGLEGROUP_URL % (Prefs['host'], Prefs['restapi'], groupNameURLconform)
try:
Log("VDR Plugin: Loading channels.")
channelGroupList = XML.ElementFromURL(xml)
except:
Log("VDR Plugin: Couldn't get channels.")
return None
numberOfChannels = int(channelGroupList.xpath('//channel:count/text()', namespaces=NAMESPACESCHANNEL)[0])
channelListContainer = ObjectContainer(title2=name, view_group='List', no_cache=True)
item = 0
Channel_ID = ""
Channel_Name= ""
for item in range(numberOfChannels):
Channel_Name = channelGroupList.xpath('//channel:param[@name="name"]/text()', namespaces=NAMESPACESCHANNEL)[item]
Channel_ID = channelGroupList.xpath('//channel:param[@name="channel_id"]/text()', namespaces=NAMESPACESCHANNEL)[item]
hasChannelLogo = channelGroupList.xpath('//channel:param[@name="image"]/text()', namespaces=NAMESPACESCHANNEL)[item]
channelListContainer.add(LiveTVMenu(sender=Channel_Name, channel=Channel_ID, thumb=hasChannelLogo))
return channelListContainer
####################################################################################################
@route("/video/vdr/LiveTVMenu")
def LiveTVMenu(sender, channel, thumb, include_oc=False):
if (thumb == "true"):
Log("Channellogo found")
thumb = CHANNELIMAGE_URL % (Prefs['host'], Prefs['restapi'], channel)
else:
Log("No channel Logo in data")
thumb = R(ICON)
currentEpgXml = CHANNELEPG_CURRENT_URL % (Prefs['host'], Prefs['restapi'], channel, "1")
currentEpgImage = thumb
try:
Log("Loading current EPG for %s" % channel)
currentEpg = XML.ElementFromURL(currentEpgXml, encoding='UTF8')
except:
Log("VDR Plugin: Couldn't get EPG")
#try:
# currentEvent = currentEpg.xpath('//epg:param[@name="id"]/text()', namespaces=NAMESPACEEPG)
#except:
# Log("VDR Plugin: Couldn't get EPG")
try:
currentTitle = currentEpg.xpath('//epg:param[@name="title"]/text()', namespaces=NAMESPACEEPG)[0]
#currentEpgImage = CHANNELEPG_IMAGE_URL % (Prefs['host'], Prefs['restapi'], currentEvent, "0")
except:
currentTitle = "no data"
try:
currentDescription = currentEpg.xpath('//epg:param[@name="description"]/text()', namespaces=NAMESPACEEPG)[0]
except:
currentDescription = "no data"
try:
#currentSubtitle = currentEpg.xpath('//epg:param[@name="short_text"]/text()', namespaces=NAMESPACEEPG)[0]
currentStartTime = currentEpg.xpath('//epg:param[@name="start_time"]/text()', namespaces=NAMESPACEEPG)[0]
except:
currentStartTime = "0"
try:
currentDuration = currentEpg.xpath('//epg:param[@name="duration"]/text()', namespaces=NAMESPACEEPG)[0]
except:
currentDuration = "0"
stream = STREAM_URL % (Prefs['host'], Prefs['port'], Prefs['stream'],channel)
if currentStartTime == "0":
currentStartTime = "no start time"
else:
currentStartTime = Datetime.FromTimestamp(int(currentStartTime))
currentDuration = int(currentDuration)*1000
video = VideoClipObject(
key = Callback(LiveTVMenu, sender=sender, channel=channel, thumb=currentEpgImage, include_oc=True),
#studio = sender,
title = ("%s | %s" % (sender, currentTitle)),
#original_title = currentTitle,
#source_title = sender,
#tagline = currentTitle,
#source_title = currentStartTimes,
summary = ("%s | %s" % (currentStartTime, currentDescription)),
duration = currentDuration,
rating_key = currentTitle,
thumb = thumb
)
monat = MediaObject(
parts = [PartObject(key = stream, duration = currentDuration)]
)
video.add(monat)
if include_oc:
oc = ObjectContainer()
oc.add(video)
return oc
else:
return video
``` |
{
"source": "JondareHM/Python-Prolog-Interpreter",
"score": 3
} |
#### File: JondareHM/Python-Prolog-Interpreter/editor.py
```python
from tkinter import Tk, Text, Menu, filedialog, Label, Button, END, W, E, FALSE
from tkinter.scrolledtext import ScrolledText
from prologpy.solver import Solver
def is_file_path_selected(file_path):
return file_path is not None and file_path != ""
def get_file_contents(file_path):
"""Return a string containing the file contents of the file located at the
specified file path """
with open(file_path, encoding="utf-8") as f:
file_contents = f.read()
return file_contents
class Editor(object):
def __init__(self, root_):
self.root = root_
self.file_path = None
self.root.title("Prolog Interpreter")
# Create a rule label
self.rule_editor_label = Label(
root, text="Prolog Rules: ", padx=10, pady=1
)
self.rule_editor_label.grid(
sticky="W", row=0, column=0, columnspan=2, pady=3
)
# Create rule editor where we can edit the rules we want to enter:
self.rule_editor = ScrolledText(
root, width=100, height=30, padx=10, pady=10
)
self.rule_editor.grid(
sticky=W + E, row=1, column=0, columnspan=2, padx=10
)
self.rule_editor.config(wrap="word", undo=True)
self.rule_editor.focus()
# Create a query label:
self.query_label = Label(root, text="Prolog Query:", padx=10, pady=1)
self.query_label.grid(sticky=W, row=2, column=0, columnspan=2, pady=3)
# Create the Prolog query editor we'll use to query our rules:
self.query_editor = Text(root, width=77, height=2, padx=10, pady=10)
self.query_editor.grid(sticky=W, row=3, column=0, pady=3, padx=10)
self.query_editor.config(wrap="word", undo=True)
# Create a run button which runs the query against our rules and outputs the
# results in our solutions text box / editor.
self.run_button = Button(
root,
text="Find Query Solutions",
height=2,
width=20,
command=self.run_query,
)
self.run_button.grid(sticky=E, row=3, column=1, pady=3, padx=10)
# Create a solutions label
self.solutions_label = Label(
root, text="Query Solutions:", padx=10, pady=1
)
self.solutions_label.grid(
sticky="W", row=4, column=0, columnspan=2, padx=10, pady=3
)
# Create a text box which we'll use to display our Prolog query solutions:
self.solutions_display = ScrolledText(
root, width=100, height=5, padx=10, pady=10
)
self.solutions_display.grid(
row=5, column=0, columnspan=2, padx=10, pady=7
)
# Finally, let's create the file menu
self.menu_bar = self.create_file_menu()
def create_file_menu(self):
"""Create a menu which will allow us to open / save our Prolog rules, run our
query, and exit our editor interface """
menu_bar = Menu(root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(
label="Open...", underline=1, command=self.open_file
)
file_menu.add_separator()
file_menu.add_command(
label="Save", underline=1, command=self.save_file
)
file_menu.add_command(
label="Save As...", underline=5, command=self.save_file_as
)
file_menu.add_separator()
file_menu.add_command(label="Run", underline=1, command=self.run_query)
file_menu.add_separator()
file_menu.add_command(
label="Exit", underline=2, command=self.root.destroy
)
menu_bar.add_cascade(label="File", underline=0, menu=file_menu)
self.root.config(menu=menu_bar)
return menu_bar
def set_busy(self):
# Show a busy cursor and update the UI
self.root.config(cursor="watch")
self.root.update()
def set_not_busy(self):
# Show a regular cursor
self.root.config(cursor="")
def run_query(self):
"""Interpret the entered rules and query and display the results in the
solutions text box """
# Delete all of the text in our solutions display text box
self.solutions_display.delete("1.0", END)
self.set_busy()
# Fetch the raw rule / query text entered by the user
rules_text = self.rule_editor.get(1.0, "end-1c")
query_text = self.query_editor.get(1.0, "end-1c")
# Create a new solver so we can try to query for solutions.
try:
solver = Solver(rules_text)
except Exception as e:
self.handle_exception("Error processing prolog rules.", str(e))
return
# Attempt to find the solutions and handle any exceptions gracefully
try:
solutions = solver.find_solutions(query_text)
except Exception as e:
self.handle_exception("Error processing prolog query.", str(e))
return
# If our query returns a boolean, we simply display a 'Yes' or a 'No'
# depending on its value
if isinstance(solutions, bool):
self.solutions_display.insert(END, "Yes." if solutions else "No.")
# Our solver returned a map, so we display the variable name to value mappings
elif isinstance(solutions, dict):
self.solutions_display.insert(
END,
"\n".join(
"{} = {}"
# If our solution is a list contining one item, we show that
# item, otherwise we display the entire list
.format(variable, value[0] if len(value) == 1 else value)
for variable, value in solutions.items()
),
)
else:
# We know we have no matching solutions in this instance so we provide
# relevant feedback
self.solutions_display.insert(END, "No solutions found.")
self.set_not_busy()
def handle_exception(self, error_message, exception=""):
"""Handle the exception by printing an error message as well as exception in
our solution text editor / display """
self.solutions_display.insert(END, error_message + "\n")
self.solutions_display.insert(END, str(exception) + "\n")
self.set_not_busy()
def set_rule_editor_text(self, text):
self.rule_editor.delete(1.0, "end")
self.rule_editor.insert(1.0, text)
self.rule_editor.edit_modified(False)
def open_file(self, file_path=None):
# Open a a new file dialog which allows the user to select a file to open
if file_path is None:
file_path = filedialog.askopenfilename()
if is_file_path_selected(file_path):
file_contents = get_file_contents(file_path)
# Set the rule editor text to contain the selected file contents
self.set_rule_editor_text(file_contents)
self.file_path = file_path
def save_file(self):
"""If we have specified a file path, save the file - otherwise, prompt the
user to specify the file location prior to saving the file """
if self.file_path is None:
result = self.save_file_as()
else:
result = self.save_file_as(file_path=self.file_path)
return result
def write_editor_text_to_file(self, file):
editor_text = self.rule_editor.get(1.0, "end-1c")
file.write(bytes(editor_text, "UTF-8"))
self.rule_editor.edit_modified(False)
def save_file_as(self, file_path=None):
# If there is no file path specified, prompt the user with a dialog which
# allows him/her to select where they want to save the file
if file_path is None:
file_path = filedialog.asksaveasfilename(
filetypes=(
("Text files", "*.txt"),
("Prolog files", "*.pl *.pro"),
("All files", "*.*"),
)
)
try:
# Write the Prolog rule editor contents to the file location
with open(file_path, "wb") as file:
self.write_editor_text_to_file(file)
self.file_path = file_path
return "saved"
except FileNotFoundError:
return "cancelled"
def undo(self):
self.rule_editor.edit_undo()
def redo(self):
self.rule_editor.edit_redo()
if __name__ == "__main__":
root = Tk()
editor = Editor(root)
# Don't allow users to re-size the editor
root.resizable(width=FALSE, height=FALSE)
root.mainloop()
``` |
{
"source": "jondave/ARWACv4",
"score": 2
} |
#### File: ARWACv4/btTesting/evdev-test-friendly.py
```python
from evdev import InputDevice, categorize, ecodes
controller = InputDevice('/dev/input/event0')
print(controller)
print(controller.capabilities(verbose=True))
def readInput():
events = controller.read()
try:
for event in events:
print(event.code, event.type, event.value)
#print(event.type, event.code, event.value)
#print(event.ecode)
except IOError:
pass
#return state
while (True):
readInput()
``` |
{
"source": "jondavid-black/AutoDocTest",
"score": 3
} |
#### File: AutoDocTest/src/assembledocuments.py
```python
import os
import sys
import argparse
from m2r import convert
def processTocRow(docTitle, section, title, content):
#print(f'\tSection: {section} Heading Level: {level} Title: {title} Content File: {content}')
output = ""
level = len(section.split(".")) + 1
if (len(title) > 0):
# Add section heading
output += ('#' * level) + " " + title + "\n"
convertToRst = len(content) == 0 or content.endswith(".md")
# Process content (if present)
if (len(content) > 0):
reader = open(content, encoding="utf8")
try:
# strip off any Jekyll metadata at the top of the file
inMetadata = False
for line in reader.readlines():
if (line.strip() == "---"):
inMetadata = not inMetadata
continue
if (not inMetadata):
if (line.startswith("#")):
# make sure heading level is correct-ish
output += ('#' * level) + line + "\n"
else:
# append line to output
output += line
finally:
reader.close()
rst = ""
if (convertToRst):
rst = convert(output)
else:
rst = output
# add a page break
rst += "\n.. raw:: pdf\n \n PageBreak\n"
return rst
def processDocFile(docFile):
category = ""
title = ""
order = ""
level = 0
output = ""
reader = open(docFile, encoding="utf8")
try:
# strip off any Jekyll metadata at the top of the file
inMetadata = False
for line in reader.readlines():
if (line.strip() == "---"):
if (inMetadata):
output += ('#' * level) + title + "\n"
inMetadata = not inMetadata
continue
if (inMetadata):
parts = line.split(":")
if (parts[0].strip() == "category"):
category = parts[1].strip()
continue
if (parts[0].strip() == "title"):
title = parts[1].strip()
continue
if (parts[0].strip() == "order"):
order = parts[1].strip()
level = len(order.split("."))
#print("order=" + order + " level=" + str(level))
continue
if (not inMetadata):
if (line.startswith("#")):
# make sure heading level is correct-ish
output += ('#' * level) + line + "\n"
else:
# append line to output
output += line
finally:
reader.close()
output += "\n"
return category, order, title, output
def processDocDir(docDir):
docTitle = ""
docContent = ""
for _, _, files in os.walk(docDir):
files.sort()
for filename in files:
docTitle, _, _, content = processDocFile(docDir + "/" + filename)
docContent += content
return docTitle, docContent
parser = argparse.ArgumentParser()
parser.add_argument("docpath", help="directory containing documents")
parser.add_argument("outpath", help="location to write assembled markdown files")
args = parser.parse_args()
os.makedirs(args.outpath, exist_ok = True)
output = ""
# each directory contains a document, ignore files at this level of the file tree
for root, dirs, _ in os.walk(args.docpath):
for directoryname in dirs:
title, content = processDocDir(root + "/" + directoryname)
# write markdown version of the document
mdFileName = args.outpath + "/" + title.replace(" ", "_") + ".md"
f = open(mdFileName, "w")
f.write(content)
f.close()
# write reStructuredText version of the document
rst = """
.. header::
[[[TITLE]]]
.. footer::
|date| ___________________________________________________________________page ###Page###
.. sectnum::
:depth: 3
[[[DOC_TITLE]]]
.. raw:: pdf
PageBreak
.. contents:: Table of Contents
:depth: 3
.. raw:: pdf
PageBreak
.. |date| date:: %m-%d-%Y
"""
rst = rst.replace("[[[TITLE]]]", title).replace("[[[DOC_TITLE]]]", ('=' * len(title)) + "\n" + title + "\n" + ('=' * len(title)))
rst += convert(content)
rstFileName = args.outpath + "/" + title.replace(" ", "_") + ".rst"
f = open(rstFileName, "w")
f.write(rst)
f.close()
#print(output.replace("Release X.Y.Z", "Release " + args.rel))
``` |
{
"source": "jondavid-black/Doc_Pipeline",
"score": 3
} |
#### File: tools/python/spell_check.py
```python
import os, sys, json, re
def spellcheck(mdfile, dictionary, regex):
badWords = []
# print("Spell checking file " + mdfile + " using dictionary " + dictionary)
# run pandoc to generate HTML
# pandoc ./design-document/_posts/2019-07-26-Requirements.markdown -f markdown -t html -s -o req.html
pandocCmd = 'pandoc ' + mdfile + ' -f markdown -t html -s -o tmp.html'
os.system(pandocCmd)
# run hunspell
hunspellCmd = 'hunspell -H -l tmp.html'
hunspellOut = os.popen(hunspellCmd).read()
#clean up temp file
os.system('rm tmp.html')
# remove hunspell items that are in our dictionary
# print("dictionary words: " + str(dictionary))
for word in hunspellOut.splitlines():
# print("Checking dictionary for: " + word)
if word not in dictionary:
regexMatch = False
for regexEntry in regex:
if re.match(regexEntry, word):
regexMatch = True
# print("Match " + regexEntry + " for " + word)
continue
if not regexMatch:
badWords.append(word)
else:
# print("Found " + word + " in dictionary")
continue
# return list of misspelled words
return badWords
# print ("check spelling for markdown in " + sys.argv[2] + " using dictionary " + sys.argv[1])
dictionary = []
regex = []
spellingErrors = {}
with open(sys.argv[1]) as f:
dictionaryContent = f.read().splitlines()
for entry in dictionaryContent:
if entry.startswith("re="):
regex.append(entry.split("=")[1])
else:
dictionary.append(entry)
with os.scandir(sys.argv[2]) as it:
for entry in it:
if entry.path.endswith(".md") or entry.path.endswith(".markdown"):
# do spell checking
misspelledWords = spellcheck(entry.path, dictionary, regex)
if len(misspelledWords) == 0:
continue
else:
# keep track of words and files
spellingErrors[entry.path] = misspelledWords
# if spelling errors exist, print and write to file
if bool(spellingErrors):
print(json.dumps(spellingErrors))
if len(sys.argv) == 4:
with open(sys.argv[3], 'w') as fp:
json.dump(spellingErrors, fp)
``` |
{
"source": "jondavid-black/javahelloworld",
"score": 4
} |
#### File: javahelloworld/src/primes.py
```python
from time import perf_counter
# function check whether a number
# is prime or not
def isPrime(n):
# Corner case
if (n <= 1):
return False
# Check from 2 to n-1
for i in range(2, n):
if (n % i == 0):
return False
return True
# Driver Code
start = perf_counter()
max_val = 1000000
primes = []
for i in range (1, max_val):
if isPrime(i):
print(i, ": true")
primes.append(i)
else:
print(i, ": false")
for val in primes:
print(val)
end = perf_counter()
execution_time = (end - start)
print("Duration: ", execution_time, " ms")
# This code is contributed by <NAME>
``` |
{
"source": "JonDavis-AZED/usda-fns-ingest",
"score": 2
} |
#### File: usda_fns_ingestor/tests/test_extensive.py
```python
import json
from collections import defaultdict
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from parameterized import parameterized
def load(filename):
with open(filename, 'r', encoding='utf-8') as f:
return f.read()
def load_test_cases(file_ext):
import fnmatch
import os
# where is this module?
thisDir = os.path.dirname(__file__)
test_data_dir = os.path.join(thisDir, "test_data")
return [(os.path.join(test_data_dir, file), (os.path.join(test_data_dir, "test_results", file + ".json")))
for file in os.listdir(test_data_dir) if fnmatch.fnmatch(file, '*.' + file_ext)]
def get_errors_only(response):
out = []
results = response['tables'][0]
for row in results['rows']:
sfa_id = row['data']['sfa_id']
state_id = row['data']['state_id']
sfa_name = row['data']['sfa_name']
errors = [err['code'] for err in row['errors']]
out.append({'state_id': state_id, 'sfa_id': sfa_id,
'sfa_name': sfa_name, 'errors': errors})
return out
def get_total_errors(results):
out = defaultdict(lambda: 0)
for item in results:
for e in item['errors']:
out[e] += 1
return out
class ExtensiveTests(APITestCase):
fixtures = ['test_data.json']
@parameterized.expand(load_test_cases('csv'))
def test_validate_csv(self, name, expected):
print(name + expected)
url = reverse('data_ingest:validate')
data = load(name)
token = "<PASSWORD>"
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.post(url, data, content_type='text/csv')
self.assertEqual(response.status_code, status.HTTP_200_OK)
err_resp = get_errors_only(response.data)
total_err = (dict(get_total_errors(err_resp)))
exp_data = json.loads(load(expected))
print("exp_data")
print(exp_data)
print("actual_data")
print(total_err)
self.assertDictEqual(exp_data, total_err)
@parameterized.expand(load_test_cases('json'))
def test_validate_json(self, name, expected):
print(name + expected)
url = reverse('data_ingest:validate')
data = load(name)
token = "<PASSWORD>"
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.post(url, data, content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
err_resp = get_errors_only(response.data)
total_err = (dict(get_total_errors(err_resp)))
exp_data = json.loads(load(expected))
print("exp_data")
print(exp_data)
print("actual_data")
print(total_err)
self.assertDictEqual(exp_data, total_err)
``` |
{
"source": "jondbaker/django-critique",
"score": 2
} |
#### File: django-critique/critique/admin.py
```python
from django.contrib import admin
from .models import Critique
class CritiqueAdmin(admin.ModelAdmin):
fields = ("url", "message", "email", "user_agent", "created",)
list_display = ("id", "url", "get_message", "email", "user_agent", "created",)
list_filter = ("created",)
readonly_fields = ("created",)
search_fields = ("url", "email",)
def get_message(self, obj):
"""Returns a truncated 'message' field."""
return "{0}...".format(obj.message[:60])
get_message.short_description = "Message"
admin.site.register(Critique, CritiqueAdmin)
```
#### File: critique/migrations/0001_initial.py
```python
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Critique'
db.create_table(u'critique_critique', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('message', self.gf('django.db.models.fields.TextField')()),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('user_agent', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal(u'critique', ['Critique'])
def backwards(self, orm):
# Deleting model 'Critique'
db.delete_table(u'critique_critique')
models = {
u'critique.critique': {
'Meta': {'ordering': "['-created']", 'object_name': 'Critique'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['critique']
``` |
{
"source": "jondeaton/AgarAI",
"score": 3
} |
#### File: AgarAI/a2c/hyperparameters.py
```python
import json
class HyperParameters:
""" Simple class for storing model hyper-parameters """
def __init__(self):
self.seed = 42
# to fill in by sub_classes
self.env_name = None
self.EncoderClass = None
self.action_shape = None
self.num_envs = None
# optimizer
self.learning_rate = None
self.gamma = None
self.entropy_weight = None
self.action_shape = None
self.batch_size = None
self.agents_per_env = None
self.episode_length = None
self.num_episodes = None
self.save_frequency = 8
def override(self, params):
"""
Overrides attributes of this object with those of "params".
All attributes of "params" which are also attributes of this object will be set
to the values found in "params". This is particularly useful for over-riding
hyper-parameters from command-line arguments
:param params: Object with attributes to override in this object
:return: None
"""
for attr in vars(params):
if hasattr(self, attr) and getattr(params, attr) is not None:
value = getattr(params, attr)
setattr(self, attr, value)
def save(self, file):
""" save the hyper-parameters to file in JSON format """
with open(file, 'w') as f:
json.dump(self.__dict__, f, indent=4)
@staticmethod
def restore(file):
""" restore hyper-parameters from JSON file """
with open(file, 'r') as f:
data = json.load(f)
hp = HyperParameters()
hp.__dict__.update(data)
return hp
class GridEnvHyperparameters(HyperParameters):
def __init__(self):
super(GridEnvHyperparameters, self).__init__()
self.env_name = 'agario-grid-v0'
self.architecture = 'Basic'
self.encoder_class = 'CNN'
self.asynchronous = False
self.learning_rate = 0.01
self.num_episodes = 4096
self.gamma = 0.95
self.batch = False
self.batch_size = 64
self.num_envs = 4
self.entropy_weight = 1e-4
self.action_shape = (8, 1, 1)
self.episode_length = 512
# Agario Game parameters
self.difficulty = "normal"
self.agents_per_env = 32
self.ticks_per_step = 4 # set equal to 1 => bug
self.arena_size = 500
self.num_pellets = 1000
self.num_viruses = 25
self.num_bots = 0
self.pellet_regen = True
# observation parameters
self.num_frames = 1
self.grid_size = 32
self.observe_pellets = True
self.observe_viruses = True
self.observe_cells = True
self.observe_others = True
class CartPoleHyperparameters(HyperParameters):
def __init__(self):
super(CartPoleHyperparameters, self).__init__()
self.env_name = "CartPole-v1"
self.encoder_class = 'DenseEncoder'
self.num_envs = 128
self.learning_rate = 0.05
self.action_shape = (2, )
self.episode_length = 500
self.entropy_weight = 1e-4
```
#### File: AgarAI/a2c/remote_environment.py
```python
from enum import Enum
from multiprocessing import Pipe, Process
class RemoteCommand(Enum):
step = 1
reset = 2
close = 3
observation_space = 4
action_space = 5
class RemoteEnvironment:
""" encapsulates a multi-agent environment in a remote process """
def __init__(self, get_env):
self.get_env = get_env
def __enter__(self):
worker_pipe, self._pipe = Pipe()
self._worker = Process(target=worker_task, args=(worker_pipe, self.get_env))
self._worker.start()
return self
def __exit__(self, exc_type, exc_value, tb):
self._pipe.send(RemoteCommand.close)
self._worker.join()
def reset(self):
self._pipe.send(RemoteCommand.reset)
obs = self._pipe.recv()
return obs
def step(self, actions):
self._pipe.send((RemoteCommand.step, actions))
obs, rewards, dones, info = self._pipe.recv()
return obs, rewards, dones, info
def observation_space(self):
self._pipe.send(RemoteCommand.observation_space)
return self._pipe.recv()
def action_space(self):
self._pipe.send(RemoteCommand.action_space)
return self._pipe.recv()
def worker_task(pipe, get_env):
env = get_env()
while True:
try:
msg = pipe.recv()
except (KeyboardInterrupt, EOFError):
return
if type(msg) is tuple:
command, data = msg
else:
command = msg
if command == RemoteCommand.step:
step_data = env.step(data)
pipe.send(step_data)
elif command == RemoteCommand.reset:
ob = env.reset()
pipe.send(ob)
elif command == RemoteCommand.close:
pipe.close()
return
elif command == RemoteCommand.observation_space:
pipe.send(env.observation_space)
elif command == RemoteCommand.action_space:
pipe.send(env.action_space)
else:
raise ValueError(command)
```
#### File: AgarAI/baselines/monte_carlo.py
```python
import gym
import numpy as np
from tqdm import tqdm
import random
import matplotlib.pyplot as plt
import sys, argparse, logging
logger = logging.getLogger("root")
def to_obs(s: np.ndarray) -> np.ndarray:
theta = s[2]
theta_dot = s[3]
y = np.sin(theta)
x = np.cos(theta)
dy = np.sin(theta_dot)
dx = np.cos(theta_dot)
o = np.append(s, [x, y, dx, dy])
return np.append(o, np.outer(o, o).reshape(-1))
def main():
args = parse_args()
np.random.seed(42)
random.seed(42)
env = gym.make(args.env_name)
env.seed(42)
state_n, = env.observation_space.shape
action_n = env.action_space.n
obs_n = ((state_n + 4) + 1) * (state_n + 4)
w_shape = (action_n, obs_n)
w = np.random.randn(np.prod(w_shape)).reshape(w_shape)
eps_base = 0.5
lr_base = 0.00025
gamma = 0.95
ep_returns = np.zeros(args.num_episodes)
episode_iterator = tqdm(range(args.num_episodes), unit="Episode")
for ep in episode_iterator:
epsilon = eps_base * (1 / (1 + ep / 500))
lr = lr_base + 0.007 * np.exp(- ep / 1000.0)
s = env.reset()
rewards = list()
actions = list()
states = list()
G = 0 # return
done = False
while not done:
o = to_obs(s)
if np.random.rand() <= epsilon:
a = env.action_space.sample()
else:
a = np.argmax(np.matmul(w, o))
s, r, done, info = env.step(a)
G += r
rewards.append(r)
actions.append(a)
states.append(o)
episode_iterator.set_description(f"return: {G}, epsilon: {epsilon:.3f}")
ep_returns[ep] = G
# make returns
returns = np.zeros(len(rewards))
returns[-1] = rewards[-1]
for i in reversed(range(len(rewards) - 1)):
returns[i] = rewards[i] + gamma * returns[i + 1]
# update
for i in range(len(returns)):
a = actions[i]
G = returns[i]
s = states[i]
w[a] += lr * ((G - np.matmul(w, s)[a]) * s - 0.002 * w[a])
plt.figure()
plt.plot(ep_returns, 'blue')
plt.xlabel("episode")
plt.ylabel("return")
plt.show()
def parse_args():
parser = argparse.ArgumentParser(description="Train Q-Learning Agent")
env_options = parser.add_argument_group("Environment")
env_options.add_argument("--env", default="CartPole-v1",
dest="env_name",
choices=["CartPole-v1"])
hyperams_options = parser.add_argument_group("HyperParameters")
hyperams_options.add_argument("--episodes", type=int, default=4000,
dest="num_episodes",
help="Number of epochs to train")
logging_group = parser.add_argument_group("Logging")
logging_group.add_argument('--log', dest="log_level", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
default="DEBUG", help="Logging level")
args = parser.parse_args()
# Setup the logger
# Logging level configuration
log_level = getattr(logging, args.log_level.upper())
log_formatter = logging.Formatter('[%(asctime)s][%(levelname)s][%(funcName)s] - %(message)s')
# For the console
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
logger.setLevel(log_level)
return args
if __name__ == "__main__":
main()
```
#### File: AgarAI/configuration/__init__.py
```python
import os
from typing import *
from google.protobuf import text_format
from configuration import config_pb2, environment_pb2
Config = config_pb2.Config
Agario = environment_pb2.Agario
Environment = environment_pb2.Environment
Observation = environment_pb2.Observation
Action = environment_pb2.Action
HyperParameters = config_pb2.HyperParameters
def _configs_dir() -> str:
"""Gives directory containing configurations."""
return os.path.join(os.path.dirname(__file__), "configs")
def load(name: str) -> Config:
"""Gets a configuration by name."""
config_path = os.path.join(_configs_dir(), f'{name}.textproto')
with open(config_path, 'r') as f:
return text_format.MergeLines(f, Config())
_gym_names = {
Observation.Type.GRID: "agario-grid-v0",
Observation.Type.RAM: "agario-ram-v0",
Observation.Type.SCREEN: "agario-screen-v0",
}
def gym_env_name(environment: Environment) -> str:
return _gym_names[environment.observation.type]
def gym_env_config(environment: Environment) -> Dict[str, Any]:
"""Makes the Gym environment configuration dict from a Config."""
difficulty = environment_pb2.Agario.Difficulty.DESCRIPTOR.values_by_number[environment.agario.difficulty].name
env_config = {
'num_agents': environment.num_agents,
'multi_agent': True, # todo: put this into the proto
'difficulty': difficulty.lower(),
'ticks_per_step': environment.observation.ticks_per_step,
'arena_size': environment.agario.arena_size,
'num_pellets': environment.agario.num_pellets,
'num_viruses': environment.agario.num_viruses,
'num_bots': environment.agario.num_bots,
'pellet_regen': environment.agario.pellet_regen,
}
if environment.observation.type == Observation.Type.GRID:
env_config["grid_size"] = environment.observation.grid_size
env_config["num_frames"] = environment.observation.num_frames
env_config["observe_cells"] = environment.observation.cells
env_config["observe_others"] = environment.observation.others
env_config["observe_pellets"] = environment.observation.pellets
env_config["observe_viruses"] = environment.observation.viruses
return env_config
```
#### File: AgarAI/test/returns_test.py
```python
import numpy as np
import unittest
from a2c.training import make_returns
class ReturnsTest(unittest.TestCase):
""" tests the 'make_returns' function """
def test_len(self):
for length in (0, 1, 10):
rewards = np.zeros(length)
returns = make_returns(rewards, 1)
self.assertEqual(len(rewards), len(returns))
def test_zero_rewards(self):
rewards = np.zeros(10)
returns = make_returns(rewards, 1)
self.assertEqual(returns.sum(), 0)
def test_zero_discount(self):
rewards = 4 + np.arange(10)
returns = make_returns(rewards, 0)
self.assertEqual(len(rewards), len(returns))
for rew, ret in zip(rewards, returns):
self.assertEqual(rew, ret)
def test_returns_discounted(self):
np.random.seed(10)
rewards = np.random.randn(30)
gamma = 0.75
returns = make_returns(rewards, gamma)
self.assertEqual(len(rewards), len(returns))
ret = 0
for i in reversed(range(len(rewards))):
ret = rewards[i] + gamma * ret
self.assertEqual(ret, returns[i])
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jondelmil/artinvestor-server",
"score": 2
} |
#### File: artinvestor_server/artists/models.py
```python
from django.db import models
from django.template.defaultfilters import slugify
class Artist(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
def save(self, *args, **kwargs):
if not self.id:
# Newly created object, so set slug
self.slug = slugify(self.name)
super(Artist, self).save(*args, **kwargs)
def __str__(self):
return self.name
``` |
{
"source": "jondelmil/friends-weekend",
"score": 2
} |
#### File: friends_weekend/flights/views.py
```python
from django.views.generic import CreateView, ListView, DetailView, UpdateView
from django.core.urlresolvers import reverse, reverse_lazy
from braces.views import LoginRequiredMixin
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from .models import Flight
from .serializers import FlightSerializer
from .forms import FlightForm
class FlightCreateView(LoginRequiredMixin, CreateView):
model = Flight
form_class = FlightForm
success_url = reverse_lazy('flights:list')
def get_form_kwargs(self):
kwargs = super(FlightCreateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class FlightUpdateView(LoginRequiredMixin, UpdateView):
model = Flight
form_class = FlightForm
success_url = reverse_lazy('flights:list')
def get_form_kwargs(self):
kwargs = super(FlightUpdateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class FlightDetailView(LoginRequiredMixin, DetailView):
model = Flight
template_name = '../templates/flights/flight_detail.html'
class FlightListView(ListView):
model = Flight
template_name = '../templates/flights/flight_list.html'
class FlightListCreateAPIView(LoginRequiredMixin, ListCreateAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
lookup_field = 'number'
class FlightReadUpdateDeleteView(LoginRequiredMixin, RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
lookup_field = 'number'
``` |
{
"source": "jondgoodwin/bitfunnel-play",
"score": 2
} |
#### File: bitfunnel-play/testkit/mg4j.py
```python
import os
import platform
class Mg4j:
def __init__(self,
corpus): # Corpus object handling docs & queries
self.corpus = corpus
corpus.add_engine('mg4j', self)
# Establish full path names for files and folders
self.index_folder = os.path.join(self.corpus.docs_folder, "mg4jindex")
self.mg4j_basename = os.path.join(self.index_folder, "index")
# Build MG4J index from manifest.txt listed files
# This only performs work if index is missing
def build_index(self):
if os.path.exists(self.index_folder):
return self
# Create index folder
os.makedirs(self.index_folder)
args = ("it.unimi.di.big.mg4j.tool.IndexBuilder "
"-o \"org.bitfunnel.reproducibility.ChunkManifestDocumentSequence({0})\" "
"{1}").format(self.corpus.manifest, self.mg4j_basename)
self.corpus.mg4j_execute(args, "mg4j_build_index.log")
return self
# Run query log using the specified number/range of threads
def run_queries(self, querylog, threads=1):
self.mg4j_results_file = os.path.join(self.corpus.test_folder, "mgj4results.csv")
args = ("org.bitfunnel.reproducibility.QueryLogRunner "
"mg4j {0} {1} {2} {3}").format(self.mg4j_basename,
os.path.join(self.corpus.data_folder, querylog),
self.mg4j_results_file,
threads)
self.corpus.mg4j_execute(args, "mg4j_run_queries.log")
return self
def analyze_index(self):
results = IndexCharacteristics("BitFunnel", self.ingestion_thread_count, self.thread_counts)
with open(self.bf_run_queries_log, 'r') as myfile:
run_queries_log = myfile.read()
results.set_float_field("bits_per_posting", "Bits per posting:", run_queries_log)
results.set_float_field("total_ingestion_time", "Total ingestion time:", run_queries_log)
for i, threads in enumerate(self.thread_counts):
query_summary_statistics = os.path.join(self.bf_index_path,
"results-{0}".format(threads),
"QuerySummaryStatistics.txt")
with open(query_summary_statistics, 'r') as myfile:
data = myfile.read()
results.append_float_field("planning_overhead", r"Planning overhead:", data)
results.append_float_field("qps", "QPS:", data)
results.append_float_field("mps", "MPS:", data)
results.append_float_field("mpq", "MPQ:", data)
results.append_float_field("mean_query_latency", "Mean query latency:", data)
self.compute_false_positive_rate(results);
return results
def analyze_corpus(self, gov2_directories):
results = CorpusCharacteristics(gov2_directories, self.min_term_count, self.max_term_count)
with open(self.bf_build_statistics_log, 'r') as input:
build_statistics_log = input.read()
results.set_int_field("documents", "Document count:", build_statistics_log)
results.set_int_field("terms", "Raw DocumentFrequencyTable count:", build_statistics_log)
results.set_int_field("postings", "Posting count:", build_statistics_log)
results.set_int_field("bytes", "Total bytes read:", build_statistics_log)
run_queries_log_file = os.path.join(self.bf_index_path, "results-1", "QuerySummaryStatistics.txt")
with open(run_queries_log_file) as input:
run_log = input.read()
results.set_float_field("matches_per_query", "MPQ:", run_log)
return results
```
#### File: bitfunnel-play/testkit/pef.py
```python
import os
class Pef:
def __init__(self,
corpus, # Corpus object handling docs & queries
pef_path): # Full path to PEF program
self.corpus = corpus
corpus.add_engine('pef', self)
self.pef_creator = os.path.join(pef_path, "create_freq_index")
self.pef_runner = os.path.join(pef_path, "Runner")
# Establish full path names for files and folders
self.mg4jindex_folder = os.path.join(self.corpus.docs_folder, "mg4jindex")
self.pefindex_folder = os.path.join(self.corpus.docs_folder, "pefindex")
self.pef_index_type = "opt"
self.pef_index_file = os.path.join(self.pefindex_folder, "index." + self.pef_index_type)
# Build PEF index from MG4J index
# This only performs work if needed data is missing
def build_index(self):
if os.path.exists(self.pefindex_folder):
return self
os.makedirs(self.pefindex_folder)
# Export info needed to build pef index from mg4j index
args = ("org.bitfunnel.reproducibility.IndexExporter "
"{0} {1} --index").format(os.path.join(self.mg4jindex_folder, "index"),
os.path.join(self.pefindex_folder, "index"))
self.corpus.mg4j_execute(args, "pef_build_collection.log")
# Create PEF index
args = ("{0} {1} {2} {3}").format(self.pef_creator,
self.pef_index_type,
os.path.join(self.pefindex_folder, "index"),
self.pef_index_file)
self.corpus.execute(args, "pef_build_index.log")
return self
# Run query log using the specified number/range of threads
def run_queries(self, querylog, threads=1):
self.pef_results_file = os.path.join(self.corpus.test_folder, "pefresults.csv")
args = ("{0} {1} {2} {3} {4} {5}").format(self.pef_runner,
self.pef_index_type,
self.pef_index_file,
os.path.join(self.corpus.data_folder, querylog),
threads,
self.pef_results_file)
self.corpus.execute(args, "pef_run_queries.log")
return self
``` |
{
"source": "JonDHo/Discovery-asf_search",
"score": 3
} |
#### File: Discovery-asf_search/asf_search/ASFProduct.py
```python
from typing import Iterable
import numpy as np
import json
from collections import UserList
import requests
from asf_search.download import download_url
from asf_search import ASFSession
class ASFProduct:
def __init__(self, args: dict):
self.properties = args['properties']
self.geometry = args['geometry']
def __str__(self):
return json.dumps(self.geojson(), indent=2, sort_keys=True)
def geojson(self) -> dict:
return {
'type': 'Feature',
'geometry': self.geometry,
'properties': self.properties
}
def download(self, path: str, filename: str = None, session: ASFSession = None) -> None:
"""
Downloads this product to the specified path and optional filename.
:param path: The directory into which this product should be downloaded.
:param filename: Optional filename to use instead of the original filename of this product.
:param session: The session to use, in most cases should be authenticated beforehand
:return: None
"""
if filename is None:
filename = self.properties['fileName']
download_url(url=self.properties['url'], path=path, filename=filename, session=session)
def stack(self) -> UserList:
"""
Builds a baseline stack from this product.
:return: ASFSearchResults(list) of the stack, with the addition of baseline values (temporal, perpendicular) attached to each ASFProduct.
"""
from .search.baseline_search import stack_from_product
return stack_from_product(self)
def centroid(self) -> (Iterable[float]):
"""
Finds the centroid of a product
Shamelessly lifted from https://stackoverflow.com/a/23021198 and https://stackoverflow.com/a/57183264
"""
arr = np.array(self.geometry['coordinates'][0])
length, dim = arr.shape
return [np.sum(arr[:, i]) / length for i in range(dim)]
``` |
{
"source": "jondiquattro/Machine-Learning-Capstone",
"score": 3
} |
#### File: jondiquattro/Machine-Learning-Capstone/team-Crawler.py
```python
from bs4 import BeautifulSoup
import requests
import csv
address = 'http://www.nfl.com/stats/categorystats?archive=true&conference=null&role=TM&offensiveStatisticCategory=RUSHING&defensiveStatisticCategory=null&season=1980&seasonType=REG&tabSeq=2&qualified=false&Submit=Go'
def WRscraper(web, file):
source = requests.get(web).text
soup = BeautifulSoup(source, 'lxml')
csv_file = file
csv_writer = csv.writer(csv_file) #decides what file to write to
body = soup.find('tbody')
# anchor = body.a #gets names and is working
rowes = body.find_all('tr')
# divs = rowes.find('td', class_="sorted right") #gets receptions and is working
for i in range(len(rowes)):
# divs = rowes[i].find_all('td', class_="sorted right")
divs2 = rowes[i].find_all('td', class_="right")
anchor = rowes[i].find_all('a')
team = anchor[0].text
# print(name)
attempts =str(divs2[0].text)
attemptePerGame =str(divs2[1].text)
yards =str(divs2[2].text)
avgYards = str(divs2[3].text)
yardsGame = str(divs2[4].text)
TDs = str(divs2[5].text)
# str.strip(yards.replace(',',''))
csv_writer.writerow([team, str.strip(attempts),str.strip(yards.replace(',','')),str.strip(attemptePerGame), str.strip(avgYards),str.strip(yardsGame), str.strip(TDs) ])
def InnerpageCrawler(url, season, file): #this function populates urlArray with urls to crawl
year =season
source = requests.get(url).text
soup = BeautifulSoup(source, 'lxml')
csv_file = file
LinkClass = soup.find(class_='linkNavigation floatRight')
linkCount = LinkClass.find_all('a')
for i in range(len(linkCount)-1):
Innerlink = linkCount[i]
InnernextLink = 'http://www.nfl.com/stats/categorystats?tabSeq=0&season='+year+'&seasonType=REG&Submit=Go&archive=true&d-447263-p='+Innerlink.text+'&statisticCategory=RECEIVING&conference=null&qualified=true'
#call scrape and pass InnernextLink
WRscraper(InnernextLink, csv_file)
# print("innernextLink = ", InnernextLink)
def pageCrawler(year, finish): #this function populates urlArray with urls to crawl
year = year
finish = finish + 1
for year in range(year, finish):
#open file here with name of year.csv
name = str(year)
# print('file opens' )
csv_file = open('TEAM_'+ name +'.csv', 'w')
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Team','Attempts','Yards', 'AttemptsPerGame', 'AYardsPerCarry', 'YardsPerGame','TDs']) #writes header
link = str(year)
nextLink = 'http://www.nfl.com/stats/categorystats?archive=true&conference=null&role=TM&offensiveStatisticCategory=RUSHING&defensiveStatisticCategory=null&season='+link+'&seasonType=REG&tabSeq=2&qualified=false&Submit=Go'
# hrefArray.append(nextLink)
year +=1
# yearArray.append(year)
# print(year)
#call scrape function and pass next link
WRscraper(nextLink, csv_file)
# InnerpageCrawler(nextLink, link, csv_file) #this function gets each inner page
# print("next link", nextLink)
csv_file.close()
def main():
# to scrape change the start year to whatever you want
start = 1980
finish = 2017
pageCrawler(start, finish)
# csv_file.close()
main()
``` |
{
"source": "jondiquattro/Y-intercept-Calculator",
"score": 4
} |
#### File: jondiquattro/Y-intercept-Calculator/COS-205_assignment3_JRD.py
```python
from graphics import *
import math
def main():
# Draws window size 800 x 600
win = GraphWin("Intersect Calculator", 800, 600)
win.setCoords(-10.0, -10.0, 10.0, 10.0) #sets grid left, right, columb, row
button = Text(Point(3.5, 7.8),"Enter")
button.draw(win)
# right bottom left top
Rectangle(Point(4.2,7.2), Point(2.85,8.5)).draw(win)
#Gets user inputs/displays a box for them to type into
Text(Point(-1.3,9), "Enter Radius:").draw(win)
input = Entry(Point(1,9), 5)
input.setText("0")
input.draw(win)
win.getMouse()
radius = eval(input.getText())
Text(Point(-1.6,8), "Enter Y intercept:").draw(win)
input = Entry(Point(1,8), 5)
input.setText("0")
input.draw(win)
win.getMouse()
yCoordinate =eval(input.getText())
print("radius is ", radius)
print("yCoordinate is ", yCoordinate)
#variables for intercept y
rightY = yCoordinate
leftY = yCoordinate
#draws circle
center = Point(0,0)
circ = Circle(center, radius)
circ.setFill('green')
circ.draw(win)
#draws intercept line
Line(Point(-10,yCoordinate), Point(10,yCoordinate)).draw(win)
#gets r^ and y^2
radius = int(radius * radius)
yCoordinate = int(yCoordinate * yCoordinate)
#Handles yCoordinate greater than radius
if(yCoordinate > radius):
right = "The line does not intersect"
output2 = Text(Point(-6.4,8),"")
output2.draw(win)
output2.setText(right)
button.setText("Quit")
win.getMouse()
win.close()
#Calculates intercepts
intercepts = math.sqrt(radius - yCoordinate)
print("intercepts are -", intercepts, " and ", intercepts)
#point(x,y)
#right intercept
center = Point(intercepts, rightY)
rightCirc = Circle(center, .1)
rightCirc.setFill('red')
rightCirc.draw(win)
#left intercept
center = Point(-intercepts, leftY)
leftCirc = Circle(center, .1) #.1 is size of circle
leftCirc.setFill('red')
leftCirc.draw(win)
#Could not get leftCirc.move(1,0) to draw a circle
#leftCirc = rightCirc.clone()
#leftCirc.move(1, 0)
Text(Point(-8.4,9), "Left intercept:").draw(win)
Text(Point(-8.4,8), "Right intercept:").draw(win)
# if radius is equal or greater than yCoordinate display intercepts
if (radius >= yCoordinate):
right = format(intercepts, ",.2f")
left = format(-1*intercepts, ",.2f")
output1 = Text(Point(-6.4,9),"")
output1.draw(win)
output1.setText(left)
output2 = Text(Point(-6.4,8),"")
output2.draw(win)
output2.setText(right)
button.setText("Quit")
win.getMouse()
win.close()
main()
``` |
{
"source": "jondkelley/carphunter",
"score": 3
} |
#### File: carphunter/carphunter/itemsearch.py
```python
from datetime import date, datetime
import json
import prettytable
def print_statistics(data, total_time):
print("\nStatistics:")
print(" * Cache contains {} routers, {} switches".format(
len(data['db']['routers']), len(data['db']['switches'])))
print(" * Cache updated {}\n * Last poller took {}".format(
data['lastUpdate'], data['pollTime']))
print(" * This lookup took {}".format(total_time))
def search_arp_table(args):
"""
searches the json cache for an arp entry
"""
start_benchmark = datetime.now()
print("Checking network for `{}`...".format(args.ip))
data = json.load(open(args.cachedbfile))
for router, arpitem in data['db']['routers'].iteritems():
print_results = 0
#print router, arp
table = prettytable.PrettyTable(
["hwaddr", "ip", "vlan", "age", "proto"])
for ip, values in arpitem.iteritems():
if not values['vlan']:
values['vlan'] = "N/A"
if args.mac in values['mac']:
table.add_row([values['mac'], ip, values['vlan'],
values['age'], values['proto']])
print_results = 1
if print_results:
print("\nRouting Device: {} found match!".format(router))
print("{}".format(table))
for switch, arpitem in data['db']['switches'].iteritems():
print_results = 0
table = prettytable.PrettyTable(
["hwaddr", "interface", "vlan", "mode"])
for mac, values in arpitem.iteritems():
if args.mac in mac:
if not args.allports:
if "/" in values['interface']:
table.add_row([mac, values['interface'],
values['vlan'], values['mode']])
print_results = 1
else:
table.add_row([mac, values['interface'],
values['vlan'], values['mode']])
print_results = 1
if print_results:
print("\nSwitching Device: {} found match!".format(switch))
print("{}".format(table))
end_benchmark = datetime.now()
total_time = end_benchmark - start_benchmark
print_statistics(data, total_time)
def ip2arp(args):
"""
find arpa in json cache based on ip address
"""
start_benchmark = datetime.now()
if not args.json:
print("Checking network for `{}` ARPA entry...".format(args.ip))
data = json.load(open(args.cachedbfile))
jsondata = {}
for router, arpitem in data['db']['routers'].iteritems():
print_results = 0
exact_match = 0 # if exact match is found, hide ambiguous results
#print router, arp
jsondata[router] = {}
table = prettytable.PrettyTable(
["ip", "hwaddr", "vlan", "age", "proto"])
for ip, values in arpitem.iteritems():
if values['vlan'] == '':
values['vlan'] = "?"
if args.ip == values['ip']:
table.add_row([ip, values['mac'], values['vlan'],
values['age'], values['proto']])
print_results = 1
exact_match = 1
elif args.ip in values['ip']:
if not exact_match:
table.add_row(
[ip, values['mac'], values['vlan'], values['age'], values['proto']])
print_results = 1
if print_results:
print("\nRouting Device: {} found a matching IP!".format(router))
print("{}\n".format(table))
print("")
end_benchmark = datetime.now()
total_time = end_benchmark - start_benchmark
if not args.json:
print_statistics(data, total_time)
def print_table(header, values):
table = prettytable.PrettyTable(header)
table.add_row(values)
print(table)
``` |
{
"source": "jondkelley/dxmini-update_agent",
"score": 2
} |
#### File: dxmini-update_agent/dxmini/agent.py
```python
from configparser import (ConfigParser, MissingSectionHeaderError,
ParsingError, DEFAULTSECT)
from collections import defaultdict
import datetime
from dateutil import parser
from distutils.version import StrictVersion
from docopt import docopt
from dxmini import DXMINI_MANIFEST_URL
from dxmini import __version__ as VERSION
from dxmini.lib.utils import AnsiColor as color
from dxmini.lib.utils import get_arg_option
from dxmini.lib.utils import print_arguements
from os import environ
from subprocess import Popen, PIPE
from uptime import uptime
import configparser
import fileinput
import hashlib
import json
import logging
import os
import platform
import requests
import shutil
import socket
import stat
import subprocess
import subprocess
import sys
import tarfile
import time
import uuid
logger = logging.getLogger(__name__)
os.chdir('/')
def flash_writable(mntpoint):
"""
return if root device is writable using procfs
"""
mounts = dict()
with open('/proc/mounts','r') as f:
for partition in f.readlines():
mount = partition.split()[1]
mode = partition.split()[3].split(',')[0]
opts = partition.split()[3].split(',')
mounts[mount] = mode
if mounts.get(mntpoint, None):
if mounts.get(mntpoint, None) == "rw":
return True
else:
return False
else:
logger.error("root partition missing")
return False
class MicroSdCard():
"""
context manager to mount / dismount flash device
"""
def __init__(self, mnt):
self.mnt = mnt
def __enter__(self):
self.keep_readwrite_mode = flash_writable(self.mnt)
if not flash_writable(self.mnt):
logger.warn("{} : remounting flash in write mode".format(self.mnt))
p = subprocess.Popen("mount -o remount,rw {}".format(self.mnt), stdout=subprocess.PIPE, shell=True)
(out, err) = p.communicate()
else:
logger.warn("{} : flash already in write mode".format(self.mnt))
def __exit__(self, *args):
# don't do anything if the device was never in readonly mode
if not self.keep_readwrite_mode:
logger.warn("{} : remounting flash in read mode".format(self.mnt))
p = subprocess.Popen("mount -o remount,ro {}".format(self.mnt), stdout=subprocess.PIPE, shell=True)
(out, err) = p.communicate()
################################
# Initial provisioning and stuff
################################
def serial_generator():
"""
function generates service tag number as string
"""
return str(
int(
"".join(str(uuid.uuid4()).split('-')[:3]
),
16
)
)
def touch(path):
with open(path, 'a'):
os.utime(path, None)
def mk_ping_crontab():
"""
sets up hourly dxmini registration update
"""
frequency = "daily"
cronfile = "/etc/cron.{sched}/dxmini_registration".format(sched=frequency)
if os.path.isfile(cronfile):
logger.info("{crontab} : crontab already exists, skipping installe".format(crontab=cronfile))
else:
logger.info("{crontab} : installing crontab".format(crontab=cronfile))
crontab = (
"#!/bin/bash" "\n"
"# Update the DXMINI registration service" "\n"
"# while sleeping randomly over 2 hours to distribute load" "\n"
"sleep $[ ( $RANDOM % 7200 ) + 30 ]s" "\n"
"sudo dxmini agent --register" "\n"
)
with open(cronfile,"w") as fi:
fi.write(crontab)
os.chmod(cronfile, 755);
##########################
# activity log functions and stuff
##########################
def fetch_activity():
"""
fetches link activites
"""
records = dict()
records['DExtra'] = list()
records['Repeater'] = list()
#
with open('/var/log/pi-star/Headers.log',"r") as fi:
try:
for line in fi:
if "DExtra" in line:
introspect0 = line.split(' ')
date = introspect0[0]
time = introspect0[1].rstrip(':')
introspect1 = line.split(':')
fieldmy = introspect1[4].rstrip(' Your').strip().split(' ')[0]
fieldmy1 = fieldmy.split('/')[0].strip().split(' ')[0]
try:
fieldmy2 = fieldmy.split('/')[1].strip().split(' ')[0]
except IndexError:
fieldmy2 = None
fieldur = introspect1[5].rstrip('Rpt1').strip()
fieldrpt1 = introspect1[6].rstrip('Rpt2').strip()
fieldrpt1 = { "call": fieldrpt1.split(' ')[0], "interface": fieldrpt1.strip().split(' ')[1] }
fieldrpt2 = introspect1[7].rstrip('Flags').strip()
fieldrpt2 = { "call": fieldrpt2.strip().split(' ')[0], "interface": fieldrpt2.split(' ')[1] }
fieldflags1 = introspect1[8]
fieldflags2 = introspect1[9]
fieldflags = "{}{}".format(fieldflags1, fieldflags2).split('(')[0].strip().split(' ')
source = "{}:{}".format(fieldflags1, fieldflags2).split('(')[1].strip().rstrip(')')
datetime = "{} {}".format(date, time)
dt = int(parser.parse(datetime).timestamp())
records['DExtra'].append({"dt": dt, "datetime": datetime, "my": {"raw": fieldmy, "call": fieldmy1, "suffix": fieldmy2}, "ur": fieldur, "rpt1": fieldrpt1, "rpt2": fieldrpt2, "flags": fieldflags, "source": source})
elif "Repeater header" in line:
introspect0 = line.split(' ')
date = introspect0[0]
time = introspect0[1].rstrip(':')
introspect1 = line.split(':')
fieldmy = introspect1[4].rstrip(' Your').strip()
fieldmy1 = fieldmy.split('/')[0].strip()
try:
fieldmy2 = fieldmy.split('/')[1].strip().split(' ')[0]
except IndexError:
fieldmy2 = None
fieldur = introspect1[5].rstrip('Rpt1').strip()
fieldrpt1 = introspect1[6].rstrip('Rpt2').strip()
fieldrpt1 = { "call": fieldrpt1.split(' ')[0], "interface": fieldrpt1.strip().split(' ')[1] }
fieldrpt2 = introspect1[7].rstrip('Flags').strip()
fieldrpt2 = { "call": fieldrpt2.strip().split(' ')[0], "interface": fieldrpt2.split(' ')[1] }
fieldflags1 = introspect1[8]
fieldflags2 = introspect1[9]
fieldflags = "{}{}".format(fieldflags1, fieldflags2).split('(')[0].strip().split(' ')
source = "{}:{}".format(fieldflags1, fieldflags2).split('(')[1].strip().rstrip(')')
datetime = "{} {}".format(date, time)
dt = int(parser.parse(datetime).timestamp())
records['Repeater'].append({"dt": dt, "datetime": datetime, "my": {"raw": fieldmy, "call": fieldmy1, "suffix": fieldmy2}, "ur": fieldur, "rpt1": fieldrpt1, "rpt2": fieldrpt2, "flags": fieldflags, "source": source})
except:
pass
return records
def fetch_keyevents():
"""
fetches link activites
"""
keyevents = []
linepairs = []
now = datetime.datetime.now()
logfile = '/var/log/pi-star/MMDVM-{dt}.log'.format(dt=now.strftime("%Y-%m-%d"))
with open(logfile,"r") as fi:
for line in fi:
if ("received RF header" in line) or ("received RF end of transmission" in line):
linepairs.append(line)
print(line)
try:
COMPLETE = True
linepairs = [{linepairs[i].split(' ')[2]: {linepairs[i]:linepairs[i+1]}} for i in range(0,len(linepairs),2)]
except:
linepairs = []
print("Transmissions are incomplete, no data can be captured")
for pair in linepairs:
for k, pairs in pair.items():
for start, end in pairs.items():
time = start.split(' ')
time = "{} {}".format(time[1], time[2])
callsign = start.split('to')[0].rstrip().split('from')[1].lstrip().split(' ')[0]
suffix = start.split('to')[0].rstrip().split('from')[1].lstrip().split('/')[1]
to = start.split('to')[1].strip()
timekeyed = end.split('transmission, ')[1].split(' ')[0]
ber = end.split('transmission, ')[1].split(':')[1].split()[0].rstrip(',').split('%')[0]
rssi = end.split('transmission, ')[1].split(':')[2].strip().split(' ')[0].split('/')
rssi = [int(x) for x in rssi]
keyevents.append({ time: {"callsign": callsign, "suffix": suffix, "to": to, "keytime": float(timekeyed), "ber": float(ber), "rssi": rssi}})
return keyevents
##########################
# ping functions and stuff
##########################
class StrictConfigParser(ConfigParser):
"""
transform ini files into python dictionaries
"""
def _read(self, fp, fpname):
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
raise ValueError('Duplicate section %r' % sectname)
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
##cursect['__python_style_key__'] = sectname.lower().replace(' ', '_')
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
try:
mo = self._optcre.match(line) # 2.7
except AttributeError:
mo = self.OPTCRE.match(line) # 2.6
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(self._sections.values())
for options in all_sections:
for name, val in options.items():
if isinstance(val, list):
options[name] = '\n'.join(val)
def dget(self, section, option, default=None, type=str):
if not self.has_option(section, option):
return default
if type is str:
return self.get(section, option)
elif type is int:
return self.getint(section, option)
elif type is bool:
return self.getboolean(section, option)
else:
raise NotImplementedError()
def load_ini_as_dict(infile):
"""
loads an ini file as a dictionary avoiding the load of passwords
"""
cfg = StrictConfigParser()
config = {}
try:
with open(infile) as fi:
cfg.readfp(fi)
except:
return config
for section in cfg.sections():
config[section] = {}
for name, value in cfg.items(section):
if "pass" in name.lower():
value = "###__REDACTED__###"
value = value.strip("\"")
value = "".join(value)
###section_new = section.lower().replace(' ', '_')
config[section][name] = [x.strip() for x in value.split() if x]
if len(config[section][name]) == 1:
config[section][name] = config[section][name][0]
elif len(config[section][name]) == 0:
config[section][name] = ''
return config
def creation_date(path_to_file):
"""
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
"""
if platform.system() == 'Windows':
return os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
return stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return stat.st_mtime
def get_temp():
"""
returns system temp
"""
logger.info("Reading CORE TEMP...")
with open("/sys/class/thermal/thermal_zone0/temp") as f:
CPUTemp = f.read()
F = str(int(CPUTemp)/1000.0 * 1.8 + 32)
C = (float(F) - 32) * 5.0/9.0
return F, C
def get_hostname():
"""
return system hostname
"""
return socket.gethostname()
def uptime1():
'''
Return uptimes based on seconds since last boot
'''
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
seconds = str(int(uptime_seconds % 60))
minutes = str(int(uptime_seconds /60 % 60))
hours = str(int(uptime_seconds / 60 / 60 % 24))
days = str(int(uptime_seconds / 60 /60 / 24))
# Time unit strings
time_d = ' days, '
time_h = ' hours, '
time_m = ' minutes'
time_s = ' seconds.'
# Change time strings for lower units, prepend zeros
if int(days) == 1:
time_d = ' day, '
if int(hours) <= 9:
hours = '0' + hours
if int(hours) == 1:
time_h = 'hour '
if int(minutes) <= 9:
minutes = '0' + minutes
if int(minutes) == 1:
time_m = ' minute '
if int(seconds) <= 9:
seconds = '0' + seconds
if int(seconds) == 1:
time_s = ' second.'
#print("")
#print(days + time_d + hours + ':' + minutes + ':' + seconds)
#print('Uptime is ' +days + time_d + hours + time_h + minutes + time_m +' and ' + seconds + time_s)
return days, hours, minutes, seconds
def get_gateway_ping():
"""
pings the gateway to get an idea of lan quality
"""
logger.info("Reading GW LATENCY...")
cmd = """gw=$(route -n | grep UG | awk '{ print $2 }'); ping -c1 $gw | grep '64 bytes'| cut -d'=' -f4"""
return subprocess.check_output(cmd, shell=True).decode('utf-8').strip()
def get_internet_ping():
"""
pings the internet to get an idea of wan quality
"""
logger.info("Reading LATENCY...")
cmd = """ping -c1 dxmini.com| grep '64 bytes'| cut -d'=' -f4"""
return subprocess.check_output(cmd, shell=True).decode('utf-8').strip()
def uptime():
"""
return raspi uptime
"""
logger.info("Reading UPTIME")
return float(subprocess.check_output(['cat', '/proc/uptime']).decode('utf-8').split()[0])
def get_service_tag():
"""
returns service tag from flash memory
generates one if it's not found
"""
serialfile = '/etc/dxmini_serial'
if os.path.isfile(serialfile):
logger.info("Reading SERVICE_TAG...")
with open(serialfile,"r") as fi:
serial = fi.read()
return int(serial)
else:
serial = "".join(str(uuid.uuid4()).split('-')[3:]).upper()
logger.warn("Generating new DXMNI service_tag {}".format(serial))
serial = str(int(serial, 16))
with open(serialfile,"w") as fi:
fi.write(serial)
fi.close()
return int(serial)
def get_shadow_tag():
"""
returns shadow tag from flash memory
generates one if it's not found
"""
serialfile = '/etc/dxmini_shadow'
if os.path.isfile(serialfile):
logger.info("Reading SHADOW_TAG...")
with open(serialfile,"r") as fi:
serial = fi.read()
return serial
else:
serial = "".join(str(uuid.uuid4()).split('-')).upper()
logger.warn("Generating new DXMNI shadow_tag {}".format(serial))
with open(serialfile,"w") as fi:
fi.write(serial)
fi.close()
return serial
def get_upnp_settings():
"""
retrieves current upnp configurations
"""
upnp_enabled_cmd = """/bin/grep '$DAEMON -a' /usr/local/sbin/pistar-upnp.service | /bin/grep -v -e '^#' | /usr/bin/awk '{ print $4 " " $5 " " $6}'"""
upnp_disabled_cmd = """/bin/grep '$DAEMON -a' /usr/local/sbin/pistar-upnp.service | /bin/grep -e '^#' | /usr/bin/awk '{ print $5 " " $6 " " $7}'"""
if os.path.isfile('/usr/local/sbin/pistar-upnp.service'):
enabled_upnp = subprocess.check_output(upnp_enabled_cmd, shell=True).decode('utf-8')
disabled_upnp = subprocess.check_output(upnp_disabled_cmd, shell=True).decode('utf-8')
enabled_ports = {'UDP': [], 'TCP': []}
disabled_ports = {'UDP': [], 'TCP': []}
for line in enabled_upnp.rstrip().split('\n'):
fields = line.split()
inside = int(fields[0])
outside = int(fields[1])
proto = fields[2]
enabled_ports[proto].append({"in": inside, "out": outside})
for line in disabled_upnp.rstrip().split('\n'):
fields = line.split()
inside = int(fields[0])
outside = int(fields[1])
proto = fields[2]
disabled_ports[proto].append({"in": inside, "out": outside})
return {"enabled": enabled_ports, "disabled": disabled_ports}
else:
return dict()
def get_dxmini_panel_version():
"""
ew this is hacky
"""
logger.info("Reading PHP_APPLICATION_VERSION_DATA...")
r = requests.get('http://localhost/config/version.php')
resp = r.json()
return (resp['VENDOR_PANEL_VERSION'], resp['VENDOR_PANEL_REVISION'], resp['PISTAR_VERSION'])
def get_model():
"""
returns model from flash
"""
logger.info("Reading DEVICE_MODEL...")
serialfile = '/etc/dxmini_model'
if os.path.isfile(serialfile):
with open(serialfile,"r") as fi:
serial = fi.read()
return serial.strip()
else:
return None
def get_timezone():
"""return tzinfo"""
logger.info("Reading TIMEZONE...")
myfile = '/etc/timezone'
if os.path.isfile(myfile):
with open(myfile,"r") as fi:
tz = fi.read()
return str(tz).strip()
else:
return None
def get_revision():
"""
returns revision from flash
"""
logger.info("Reading DEVICE_REVISION...")
serialfile = '/etc/dxmini_revision'
if os.path.isfile(serialfile):
with open(serialfile,"r") as fi:
serial = fi.read()
return serial.strip()
else:
return None
def get_issue():
"""
returns distro from /etc/issue
"""
logger.info("Reading DEVICE_ISSUE...")
serialfile = '/etc/issue.net'
if os.path.isfile(serialfile):
with open(serialfile,"r") as fi:
serial = fi.read()
return serial.strip()
else:
return None
def get_historical_calls(mmdvm_config):
"""
get current call
"""
histuser_file = '/etc/.callsign_history'
if not os.path.isfile(histuser_file):
logger.info("Scheduling rebuild of callsign index")
history = {"first_call": get_current_call(mmdvm_config), "callsign_history": [get_current_call(mmdvm_config)]}
with open(histuser_file,"w") as fi:
logger.info("Build new callsign index")
fi.write(json.dumps(history, indent=3))
return history
else:
with open(histuser_file,"r") as fi:
history = json.loads(fi.read())
if get_current_call(mmdvm_config) not in history['callsign_history']:
logger.info("Adding new CALL")
history['callsign_history'].append(get_current_call(mmdvm_config))
with open(histuser_file,"w") as fi:
logger.info("Write new callsign index")
fi.write(json.dumps(history, indent=3))
return history
else:
logger.info("Reading CALLSIGN...")
return history
def get_historical_rids(mmdvm_config):
"""
get historical radio ids
"""
histuser_file = '/etc/.rid_history'
if not os.path.isfile(histuser_file):
logger.info("Need to build DMR_ID index")
history = {"first_rid": get_current_rid(mmdvm_config), "rid_history": [get_current_rid(mmdvm_config)]}
with open(histuser_file,"w") as fi:
logger.info("Build new DMR_ID index")
fi.write(json.dumps(history, indent=3))
return history
else:
with open(histuser_file,"r") as fi:
history = json.loads(fi.read())
if get_current_rid(mmdvm_config) not in history['rid_history']:
logger.info("Adding new DMR_ID")
history['rid_history'].append(get_current_rid(mmdvm_config))
with open(histuser_file,"w") as fi:
logger.info("Write new DMR_ID index")
fi.write(json.dumps(history, indent=3))
return history
else:
logger.info("Reading DMR_ID...")
return history
def get_current_call(config):
"""
returns first call used in flash
"""
firstuser_file = '/etc/first_user'
first_user = config['general'].get('callsign', None)
if first_user:
with open(firstuser_file,"w") as fi:
fi.write(first_user)
else:
return "N0CALL"
return first_user
def get_current_rid(config):
"""
returns current radio id
"""
firstuser_file = '/etc/first_rid'
first_user = config['general'].get('id', None)
if first_user:
with open(firstuser_file,"w") as fi:
fi.write(first_user)
else:
return "0000000"
return first_user
def get_customer_production_date():
"""
returns when the DXMINI goes into production
"""
serialfile = '/.in_production'
if os.path.isfile(serialfile):
return int(creation_date('/.in_production'))
else:
return None
def file_age_in_seconds(pathname):
"""
return a files exact age in seconds
"""
if not os.path.isfile(pathname):
touch(pathname)
return time.time() - os.stat(pathname)[stat.ST_MTIME]
def get_interface():
"""
get interface stats
"""
logger.info("Reading INTERFACE COUNTERS...")
hwaddr = subprocess.check_output("ifconfig wlan0| head -1 | awk '{ print $5 }'", shell=True).decode('utf-8').strip()
meta_1 = [{ x.strip().split('=')[0].lower().replace(' ', '_') : x.strip().split('=')[1] } for x in subprocess.check_output("iwconfig wlan0 | grep Tx-Power", shell=True).decode('utf-8').strip().split(' ')]
meta_2 = [{ x.strip().split('=')[0].lower().replace(' ', '_') : x.strip().split('=')[1] } for x in subprocess.check_output("iwconfig wlan0 | grep 'Signal level'", shell=True).decode('utf-8').strip().split(' ')]
meta_3 = [{ x.strip().split(':')[0].lower().replace(' ', '_') : x.strip().split(':')[1] } for x in subprocess.check_output("iwconfig wlan0 | grep 'Access Point'", shell=True).decode('utf-8').strip().split(' ')]
meta_4 = [{ x.strip().split(':')[0].lower().replace(' ', '_') : x.strip().split(':')[1] } for x in subprocess.check_output("iwconfig wlan0 | grep 'Missed beacon'", shell=True).decode('utf-8').strip().split(' ')]
meta_5 = [{ x.strip().split(':')[0].lower().replace(' ', '_') : x.strip().split(':')[1] } for x in subprocess.check_output("iwconfig wlan0 | grep 'nvalid crypt'", shell=True).decode('utf-8').strip().split(' ')]
rx_packets = [{ x.split(':')[0].lower().replace(' ', '_') : x.split(':')[1]} for x in subprocess.check_output("ifconfig wlan0 | grep 'RX packets'", shell=True).decode('utf-8').strip().split(' ')[1:]]
tx_packets = [{ x.split(':')[0].lower().replace(' ', '_') : x.split(':')[1]} for x in subprocess.check_output("ifconfig wlan0 | grep 'TX packets'", shell=True).decode('utf-8').strip().split(' ')[1:]]
new_dict = {}
for item in meta_1:
for k, v in item.items():
new_dict[k] = v
for item in meta_2:
for k, v in item.items():
new_dict[k] = v
for item in meta_3:
for k, v in item.items():
new_dict[k] = v
for item in meta_4:
for k, v in item.items():
new_dict[k] = v
for item in meta_5:
for k, v in item.items():
new_dict[k] = v
new_dict['rx_counts'] = dict()
for item in rx_packets:
for k, v in item.items():
new_dict['rx_counts'][k] = v
new_dict['tx_counts'] = dict()
for item in tx_packets:
for k, v in item.items():
new_dict['tx_counts'][k] = v
del new_dict['access_point']
new_dict['hwaddr_ap'] = ":".join(subprocess.check_output("iwconfig wlan0 | grep 'Access Point'", shell=True).decode('utf-8').strip().split(' ')[2].split(':')[1:]).strip().lower()
new_dict['hwaddr'] = hwaddr.lower()
new_dict['ipaddr'] = get_nat_ip()
return new_dict
def get_nat_ip():
"""
get the local address
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable, neat trick eh?
s.connect(('10.255.255.255', 1))
a = s.getsockname()[0]
except:
a = '127.0.0.1'
finally:
s.close()
return a
def register_client():
mmdvm_config = get_mmdvm_config()
historical_calls = get_historical_calls(mmdvm_config)
historical_rids = get_historical_rids(mmdvm_config)
gw_ping = get_gateway_ping()
net_ping = get_internet_ping()
temp = get_temp()
web_panel_version, web_panel_rev, web_panel_upstream_version = get_dxmini_panel_version()
hello = {
"entry": {
"user": {
"api_token": get_shadow_tag(),
"tz": get_timezone(),
"activation_dt": { 'dt': get_customer_production_date(), 'datetime': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(get_customer_production_date()))},
"identities": {
"ham": {
"initial": historical_calls['first_call'],
"history": historical_calls['callsign_history'],
"current": get_current_call(mmdvm_config),
},
"dmr": {
"initial": historical_rids['first_rid'],
"history": historical_rids['rid_history'],
"current": get_current_rid(mmdvm_config),
},
},
"service_tag": get_service_tag(),
"configuration": {
#"mmdvm": get_mmdvm_config(),
"ircdbgateway": get_ircdbgateway_config(),
"dstarrepeater": get_dstarrepeater_config(),
"dapnetgateway": load_ini_as_dict('/etc/dapnetgateway'),
"dstar-radio.mmdvmhost": load_ini_as_dict('/etc/dstar-radio.mmdvmhost'),
"pistar-remote": load_ini_as_dict('/etc/pistar-remote'),
"ysf2dmr": load_ini_as_dict('/etc/ysf2dmr'),
"ysf2nxdn": load_ini_as_dict('/etc/ysf2nxdn'),
"ysf2p25": load_ini_as_dict('/etc/ysf2p25'),
"ysfgateway": load_ini_as_dict('/etc/ysfgateway'),
"pistar-keeper": load_ini_as_dict('/etc/pistar-keeper'),
"pistar-remote": load_ini_as_dict('/etc/pistar-remote'),
"p25gateway": load_ini_as_dict('/etc/p25gateway'),
"mmdvmhost": load_ini_as_dict('/etc/mmdvmhost')
}
},
"network": {
"upnp": get_upnp_settings(),
"latency": {
"lan": { "scale": gw_ping.split(" ")[1], "value": gw_ping.split(" ")[0]},
"wan": { "scale": net_ping.split(" ")[1], "value": net_ping.split(" ")[0]},
},
"hostname": get_hostname(),
"interface": get_interface(),
},
"dxmini": {
"system_temp": { "f": temp[0], "c": temp[1] },
"pistar_version": get_pistar_image_version(),
"os_version": get_issue(),
"web_version": {"version": web_panel_version, "rev": web_panel_rev, "upstream_pistar_release": web_panel_upstream_version},
"device_version": {"rev": get_revision(), "model": get_model() },
"device_uptime": { "days": uptime1()[0], "hours": uptime1()[1], "minutes": uptime1()[2], "seconds": uptime1()[3], "total_seconds": uptime() },
}
}
}
try:
logger.info("Sending registration...")
client_registration_url = "https://api.dxmini.com/dxm-api/v1.0/register"
logger.debug("JSON Payload : {}".format(hello))
announce = requests.post(client_registration_url, data=json.dumps(hello), verify=True, timeout=5)
if not announce.status_code == requests.codes.ok:
logger.error("dxmini api error, http status code fail {}".format(announce.status_code))
except requests.exceptions.HTTPError as errh:
logger.error("dxmini api error : HTTPError {}".format(errh))
except requests.exceptions.ConnectionError as errc:
logger.error("dxmini api error : ConnectionError {}".format(errc))
except requests.exceptions.Timeout as errt:
logger.error("dxmini api error : Timeout {}".format(errt))
except requests.exceptions.RequestException as err:
logger.error("dxmini api error : RequestException {}".format(err))
def register_activity():
service_tag = get_service_tag()
hello = {
"tz": get_timezone(),
"logs": fetch_activity(),
"keyevents": fetch_keyevents()
}
try:
logger.info("Sending activity...")
client_registration_url = "https://api.dxmini.com/dxm-api/v1.0/activity/{}".format(service_tag)
logger.info("JSON Payload : {}".format(hello))
announce = requests.post(client_registration_url, data=json.dumps(hello), verify=True, timeout=5)
if not announce.status_code == requests.codes.ok:
logger.error("dxmini api error, http status code fail {}".format(announce.status_code))
except requests.exceptions.HTTPError as errh:
logger.error("dxmini api error : HTTPError {}".format(errh))
except requests.exceptions.ConnectionError as errc:
logger.error("dxmini api error : ConnectionError {}".format(errc))
except requests.exceptions.Timeout as errt:
logger.error("dxmini api error : Timeout {}".format(errt))
except requests.exceptions.RequestException as err:
logger.error("dxmini api error : RequestException {}".format(err))
def get_pistar_image_version():
"""
retrieve some settings from the RPi base image
"""
#############################
config = configparser.ConfigParser()
logger.info("Reading pistar-release...")
config.read('/etc/pistar-release')
return config._sections['Pi-Star']
def get_dstarrepeater_config():
"""
retrieve some settings from dstarrepeater config while avoiding passwords
"""
distilled_config = {}
#############################
configfile = '/etc/dstarrepeater'
logger.info("Reading dstarrepeater...")
if os.path.isfile(configfile):
with open(configfile,"r") as fi:
for line in fi:
key = line.split('=')[0]
value = line.split('=')[1]
key = key.lower().strip()
if "pass" in key:
value = "###__REDACTED__###"
distilled_config[key] = value.strip()
else:
logger.error("{} : file not found".format(configfile))
return distilled_config
return distilled_config
def get_ircdbgateway_config():
"""
retrieve some settings from the ircdbgateway while avoiding leak of passwords
"""
distilled_config = {}
#############################
configfile = '/etc/ircddbgateway'
logger.info("Reading ircddbgateway...")
if os.path.isfile(configfile):
with open(configfile,"r") as fi:
for line in fi:
key = line.split('=')[0]
value = line.split('=')[1]
key = key.lower().strip()
if "pass" not in key:
distilled_config[key] = value.strip()
else:
logger.error("{} : file not found".format(configfile))
return distilled_config
return distilled_config
def get_mmdvm_config():
"""
retrieve some settings from the mmdvm if available
"""
distilled_config = {}
#############################
config = configparser.ConfigParser()
config.read('/etc/mmdvmhost')
logger.info("Reading MMDVM Host Config...")
try:
distilled_config['general'] = config._sections['General']
except:
distilled_config['general'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['Info'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['info'] = mmdvm_info
except:
distilled_config['info'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['CW Id'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['cw_id'] = mmdvm_info
except:
distilled_config['cw_id'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['Modem'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['modem'] = mmdvm_info
except:
distilled_config['modem'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['D-Star'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['d-star'] = mmdvm_info
except:
distilled_config['d-star'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['DMR'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['dmr'] = mmdvm_info
except:
distilled_config['dmr'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['System Fusion'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['ysf'] = mmdvm_info
except:
distilled_config['ysf'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['P25'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['p25'] = mmdvm_info
except:
distilled_config['p25'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['NXDN'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['nxdn'] = mmdvm_info
except:
distilled_config['nxdn'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['POCSAG'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['pocsag'] = mmdvm_info
except:
distilled_config['pocsag'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['Nextion'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['nextion'] = mmdvm_info
except:
distilled_config['nextion'] = {}
try:
mmdvm_info = {}
for k, v in config._sections['Remote Control'].items():
k = k.lower()
mmdvm_info[k] = v.strip("\"") # remove wrapping quotes on some strings
distilled_config['remote_control'] = mmdvm_info
except:
distilled_config['remote_control'] = {}
try:
distilled_config['networks'] = {
"dmr" : config.get('DMR', 'Enable'),
"dmr_net" : config.get('DMR Network', 'Enable'),
"nxdn" : config.get('NXDN', 'Enable'),
"nxdn_net" : config.get('NXDN Network', 'Enable'),
"dstar" : config.get('D-Star', 'Enable'),
"dstar_net" : config.get('D-Star Network', 'Enable'),
"system_fusion" : config.get('System Fusion', 'Enable'),
"system_fusion_net" : config.get('System Fusion Network', 'Enable'),
"pocsag" : config.get('POCSAG', 'Enable'),
"pocsag_net" : config.get('POCSAG Network', 'Enable'),
}
except:
distilled_config['network_summary'] = dict()
return distilled_config
############################
# Update functions and stuff
############################
def md5(fname):
"""
calculate md5sum for files
"""
hash_md5 = hashlib.md5()
try:
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
except:
return False
def download_file(url, filename):
"""
chunk download so we can support very large files
"""
logger.info("Downloading {}".format(url))
response = requests.get(url, stream=True)
# Throw an error for bad status codes
response.raise_for_status()
with open(filename, 'wb') as handle:
for block in response.iter_content(1024):
handle.write(block)
def web_panel_updater(manifest):
"""
update the dashboard code
"""
##########
# Download
dashboard_filename = 'dashboard.tar.gz'
latest_tag = manifest['latest_version']
latest_tarball = manifest['version_map'][latest_tag]['url']
latest_tarball_md5 = manifest['version_map'][latest_tag]['md5']
latest_revision = manifest['version_map'][latest_tag]['revision']
latest_version = manifest['version_map'][latest_tag]['version']
logger.debug("Dashboard md5sum {} remote md5sum {}".format(md5(dashboard_filename), latest_tarball_md5))
web_panel_version, web_panel_rev, web_panel_upstream_version = get_dxmini_panel_version()
if ((latest_version == web_panel_version) and int(latest_revision) <= int(web_panel_rev)):
UPDATE = False
logger.info("Dashboard is already up to date")
else:
if ((latest_version == web_panel_version) and int(latest_revision) != int(web_panel_rev)):
logger.info("New revision found, starting update...")
else:
lgoger.info("New version found, starting update...")
UPDATE = True
try:
os.unlink(dashboard_filename)
except:
pass
logger.info("Found {} downloading...".format(latest_tarball.split('/')[-1:][0]))
download_file(latest_tarball, dashboard_filename)
#########
# Update!
if UPDATE:
logger.info("DXMINI control panel extracting from tar archive")
try:
shutil.rmtree('./htdocs')
shutil.rmtree('/var/www/dashboard')
except:
pass
tf = tarfile.open(dashboard_filename)
tf.extractall("./htdocs")
##os.unlink("/var/www/dashboard")
os.rename("./htdocs/htdocs", "/var/www/dashboard")
def preinst_script(manifest):
"""
update the update shell script wrapper
"""
updater_filename = "postinst.sh"
##########
# Download
latest_script = manifest['preinstall']
latest_script_md5 = manifest['preinstall_script_md5']
logger.debug("Script md5sum {} remote md5sum {}".format(md5(updater_filename), latest_script_md5))
if md5(updater_filename) == latest_script_md5:
logger.info("Preinstall script is up to date!")
else:
try:
os.unlink(updater_filename)
except:
pass
logger.info("Update script downloading {}".format(latest_script))
download_file(latest_script, updater_filename)
#########
# Update!
cmd = "sudo mv {name} /usr/bin; chmod 755 /usr/bin/{name}; bash -x /usr/bin/{name}".format(name=updater_filename)
logger.info("Running pre install scripts...")
output = subprocess.check_output(cmd, shell=True).decode('utf-8').strip()
logger.warn(output)
def postinst_script(manifest):
"""
update the update shell script wrapper
"""
updater_filename = "postinst.sh"
##########
# Download
latest_script = manifest['postinstall']
latest_script_md5 = manifest['postinstall_script_md5']
logger.debug("Script md5sum {} remote md5sum {}".format(md5(updater_filename), latest_script_md5))
if md5(updater_filename) == latest_script_md5:
logger.info("Postinstall script is up to date!")
else:
try:
os.unlink(updater_filename)
except:
pass
logger.info("Postinstall script downloading {}".format(latest_script))
download_file(latest_script, updater_filename)
#########
# Update!
cmd = "sudo mv {name} /usr/bin; chmod 755 /usr/bin/{name}; bash -x /usr/bin/{name}".format(name=updater_filename)
logger.info("Running post install scripts...")
output = subprocess.check_output(cmd, shell=True).decode('utf-8').strip()
#logger.warn(output)
logger.info("Install complete")
def agent_updater_agent_thing(manifest):
latest_python_agent = manifest['latest_python_agent']
p = subprocess.Popen("dxmini --version", stdout=subprocess.PIPE, shell=True)
(current_version, err) = p.communicate()
# python3 dxmini-agent_updater_agent_thing-0.0.1/setup.py install
current_version = current_version.decode("utf-8").strip()
latest_python_agent = str(latest_python_agent).strip()
logger.debug("Python agent local ver {} remote ver {}".format(current_version, latest_python_agent))
if StrictVersion(current_version) != StrictVersion(latest_python_agent):
logger.info("Python agent needs to update.")
# Download agent
cmd = "rm -rf /dxmini-update_agent"
output = subprocess.check_output(cmd, shell=True).decode('utf-8').strip()
cmd = "git clone https://github.com/jondkelley/dxmini-update_agent.git"
logger.info("{}".format(cmd))
output = subprocess.check_output(cmd, shell=True).decode('utf-8').strip()
# REPO_PATH = "https://github.com/jondkelley/dxmini-update_agent/archive/{}.tar.gz".format(latest_python_agent)
# download_file(REPO_PATH, 'dxmini-agent.tar.gz')
# tf = tarfile.open('dxmini-agent.tar.gz')
# tf.extractall(".")
# Install new agent
logger.info("Running setup.py install")
#p = subprocess.Popen("sudo python3 dxmini-update_agent-{}/setup.py install".format(latest_python_agent), stdout=subprocess.PIPE, shell=True)
#(out, err) = p.communicate()
#print(out)
cmd = "cd /dxmini-update_agent/; python3 setup.py install".format(latest_python_agent)
output = subprocess.check_output(cmd, shell=True).decode('utf-8').strip()
logger.warn("Python {}".format(output))
cmd = "rm -rf /dxmini-update_agent"
output = subprocess.check_output(cmd, shell=True).decode('utf-8').strip()
logger.info("Removing install files {}".format(output))
logger.info("Agent update complete. Thanks for updating me!")
else:
logger.info("Agent is up to date!")
class RootCommand():
"""
agent commands for docopt
"""
def __init__(self, args):
self.args = args
def provision(self):
with MicroSdCard("/"):
if not os.path.isfile('/.in_production'):
touch('/.in_production')
else:
logger.info("Registration file, OK")
## Generate serial number
if not os.path.isfile('/etc/dxmini_serial'):
newly_service_tag = get_service_tag()
logger.info("Hooray, new service tag number {tag}".format(tag=newly_service_tag))
else:
logger.info("Support file, OK")
def update_agent(self):
"""
download and install latest agent
"""
with MicroSdCard("/"):
r = requests.get(DXMINI_MANIFEST_URL)
manifest = r.json()
if manifest['_self_federated']:
try:
r = requests.get(manifest['_self_federated_url'])
manifest = r.json()
except:
logger.error("Federation manifest request httpclient failure; defaulting to what github sent us")
pass
else:
logger.debug("Federation false; using github")
#print(json.dumps(manifest, indent=3))
agent_updater_agent_thing(manifest)
def update_web(self):
"""
update dxmini
"""
with MicroSdCard("/"):
r = requests.get(DXMINI_MANIFEST_URL)
manifest = r.json()
if manifest['_self_federated']:
try:
r = requests.get(manifest['_self_federated_url'])
manifest = r.json()
except:
logger.error("Federation manifest request httpclient failure; defaulting to what github sent us")
pass
else:
logger.debug("Federation false; using github")
#print(json.dumps(manifest, indent=3))
preinst_script(manifest)
web_panel_updater(manifest)
postinst_script(manifest)
def register(self):
"""
registers the dxmini
"""
with MicroSdCard("/"):
mk_ping_crontab()
register_client()
def activity(self):
"""
registers the dxmini activity
"""
register_activity()
def version(self):
"""
print module version and exit
"""
print(VERSION)
exit(0)
def main():
"""Parse the CLI"""
arguments = docopt(__doc__)
cmd = RootCommand(arguments)
method = get_arg_option(arguments)
getattr(cmd, method)()
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
``` |
{
"source": "jondkelley/graphite-plugin-agent",
"score": 2
} |
#### File: graphite-plugin-agent/graphite_plugin_agent/agent.py
```python
import graphitesend
from graphitesend import GraphiteSendException
import helper
import importlib
import json
import logging
import os
import requests
import socket
import sys
import Queue as queue
import threading
import time
import re
from graphite_plugin_agent import __version__
from graphite_plugin_agent import plugins
LOGGER = logging.getLogger(__name__)
class GraphitePluginAgent(helper.Controller):
"""The GraphitePluginAgent class implements a agent that polls plugins
every minute and reports the state to Graphite.
"""
IGNORE_KEYS = ['graphite_host', 'graphite_port',
'poll_interval', 'wake_interval']
MAX_METRICS_PER_REQUEST = 10000
WAKE_INTERVAL = 60
def __init__(self, args, operating_system):
"""Initialize the GraphitePluginAgent object.
:param argparse.Namespace args: Command line arguments
:param str operating_system: The operating_system name
"""
super(GraphitePluginAgent, self).__init__(args, operating_system)
self.derive_last_interval = dict()
self.last_interval_start = None
self.min_max_values = dict()
self._wake_interval = (self.config.application.get('wake_interval') or
self.config.application.get('poll_interval') or
self.WAKE_INTERVAL)
self.next_wake_interval = int(self._wake_interval)
self.publish_queue = queue.Queue()
self.threads = list()
info = tuple([__version__] + list(self.system_platform))
LOGGER.info('Agent v%s initialized, %s %s v%s', *info)
def setup(self):
"""Setup the internal state for the controller class. This is invoked
on Controller.run().
Items requiring the configuration object should be assigned here due to
startup order of operations.
"""
self.last_interval_start = time.time()
@property
def agent_data(self):
"""Return the agent data section of the Graphite Platform data payload
:rtype: dict
"""
return {'host': socket.gethostname(),
'pid': os.getpid(),
'version': __version__}
def poll_plugin(self, plugin_name, plugin, config):
"""Kick off a background thread to run the processing task.
:param graphite_plugin_agent.plugins.base.Plugin plugin: The plugin
:param dict config: The config for the plugin
"""
if not isinstance(config, (list, tuple)):
config = [config]
for instance in config:
thread = threading.Thread(target=self.thread_process,
kwargs={'config': instance,
'name': plugin_name,
'plugin': plugin,
'poll_interval':
int(self._wake_interval)})
thread.run()
self.threads.append(thread)
def process(self):
"""This method is called after every sleep interval. If the intention
is to use an IOLoop instead of sleep interval based daemon, override
the run method.
"""
start_time = time.time()
self.start_plugin_polling()
# Sleep for a second while threads are running
while self.threads_running:
time.sleep(1)
self.threads = list()
self.send_data_to_graphite()
duration = time.time() - start_time
self.next_wake_interval = self._wake_interval - duration
if self.next_wake_interval < 1:
LOGGER.warning('Poll interval took greater than %i seconds',
duration)
self.next_wake_interval = int(self._wake_interval)
LOGGER.info('Stats processed in %.2f seconds, next wake in %i seconds',
duration, self.next_wake_interval)
def process_min_max_values(self, component):
"""Agent keeps track of previous values, so compute the differences for
min/max values.
:param dict component: The component to calc min/max values for
"""
guid = component['guid']
name = component['name']
if guid not in self.min_max_values.keys():
self.min_max_values[guid] = dict()
if name not in self.min_max_values[guid].keys():
self.min_max_values[guid][name] = dict()
for metric in component['metrics']:
min_val, max_val = self.min_max_values[guid][name].get(metric,
(None, None))
value = component['metrics'][metric]['total']
if min_val is not None and min_val > value:
min_val = value
if max_val is None or max_val < value:
max_val = value
if component['metrics'][metric]['min'] is None:
component['metrics'][metric]['min'] = min_val or value
if component['metrics'][metric]['max'] is None:
component['metrics'][metric]['max'] = max_val
self.min_max_values[guid][name][metric] = min_val, max_val
def send_data_to_graphite(self):
metrics = 0
components = list()
while self.publish_queue.qsize():
(name, data, last_values) = self.publish_queue.get()
self.derive_last_interval[name] = last_values
if isinstance(data, list):
for component in data:
self.process_min_max_values(component)
components.append(component)
metrics += len(component['metrics'].keys())
if metrics >= self.MAX_METRICS_PER_REQUEST:
self.send_components(components, metrics)
components = list()
metrics = 0
elif isinstance(data, dict):
self.process_min_max_values(data)
components.append(data)
metrics += len(data['metrics'].keys())
if metrics >= self.MAX_METRICS_PER_REQUEST:
self.send_components(components, metrics)
components = list()
metrics = 0
LOGGER.debug('Done, will send remainder of %i metrics', metrics)
self.send_components(components, metrics)
def graphite_send(self, name, value, guid, suffix,
host_name, component_name="default"):
"""
call Graphite platform using graphitesend
"""
# replace fqdn with underscores
host_name = re.sub(r"\.", "_", host_name)
host_name = self.config.get('localhost_name', host_name)
suffix = "_{0}".format(suffix)
prefix = "graphite_agent.{0}.{1}".format(host_name, guid)
timeout = self.config.get('graphite_timeout', 2)
g = graphitesend.init(prefix=prefix, suffix=suffix,
graphite_server=self.config.application['graphite_host'],
graphite_port=self.config.application['graphite_port'],
system_name=component_name, timeout_in_seconds=timeout)
g.send(name, value)
def send_components(self, components, metrics):
"""Create the headers and payload to send to Graphite platform using
the graphitesend library
"""
if not metrics:
LOGGER.warning('No metrics to send to Graphite this interval')
return
LOGGER.info('Sending %i metrics to Graphite', metrics)
body = {'agent': self.agent_data, 'components': components}
LOGGER.debug(body)
for component in components:
host_name = self.agent_data['host']
component_name = component['name']
# filter NewRelic stuff away
guid = component['guid']
if "newrelic" in guid:
guid = re.sub(r"^com\.(.*)\.newrelic_", "", guid)
metrics = component['metrics']
host = component['name']
for metric in metrics:
objects = {}
objects['total'] = metrics[metric]['total']
objects['max'] = metrics[metric]['max']
objects['min'] = metrics[metric]['min']
objects['count'] = metrics[metric]['count']
# filter NewRelic stuff away
metric = re.sub(r"[\[/]", ".", metric) # match [ or /
metric = re.sub(r"\]", "", metric) # remove ]
metric = re.sub(r"^Component\.", "", metric) # wipe component
for suffix in objects:
try:
self.graphite_send(metric, objects[suffix], guid,
suffix, host_name, component_name)
except GraphiteSendException as error:
LOGGER.error('Graphite error: %s', error)
@staticmethod
def _get_plugin(plugin_path):
"""Given a qualified class name (eg. foo.bar.Foo), return the class
:rtype: object
"""
try:
package, class_name = plugin_path.rsplit('.', 1)
except ValueError:
return None
try:
module_handle = importlib.import_module(package)
class_handle = getattr(module_handle, class_name)
return class_handle
except ImportError:
LOGGER.exception('Attempting to import %s', plugin_path)
return None
def start_plugin_polling(self):
"""Iterate through each plugin and start the polling process."""
for plugin in [key for key in self.config.application.keys()
if key not in self.IGNORE_KEYS]:
LOGGER.info('Enabling plugin: %s', plugin)
plugin_class = None
# If plugin is part of the core agent plugin list
if plugin in plugins.available:
plugin_class = self._get_plugin(plugins.available[plugin])
# If plugin is in config and a qualified class name
elif '.' in plugin:
plugin_class = self._get_plugin(plugin)
# If plugin class could not be imported
if not plugin_class:
LOGGER.error('Enabled plugin %s not available', plugin)
continue
self.poll_plugin(plugin, plugin_class,
self.config.application.get(plugin))
@property
def threads_running(self):
"""Return True if any of the child threads are alive
:rtype: bool
"""
for thread in self.threads:
if thread.is_alive():
return True
return False
def thread_process(self, name, plugin, config, poll_interval):
"""Created a thread process for the given name, plugin class,
config and poll interval. Process is added to a Queue object which
used to maintain the stack of running plugins.
:param str name: The name of the plugin
:param graphite_plugin_agent.plugin.Plugin plugin: The plugin class
:param dict config: The plugin configuration
:param int poll_interval: How often the plugin is invoked
"""
instance_name = "%s:%s" % (name, config.get('name', 'unnamed'))
obj = plugin(config, poll_interval,
self.derive_last_interval.get(instance_name))
obj.poll()
self.publish_queue.put((instance_name, obj.values(),
obj.derive_last_interval))
@property
def wake_interval(self):
"""Return the wake interval in seconds as the number of seconds
until the next minute.
:rtype: int
"""
return self.next_wake_interval
def main():
helper.parser.description('The Graphite Plugin Agent polls various '
'services and sends the data to the Graphite '
'Platform')
helper.parser.name('graphite_plugin_agent')
argparse = helper.parser.get()
argparse.add_argument('-C',
action='store_true',
dest='configure',
help='Run interactive configuration')
args = helper.parser.parse()
if args.configure:
print('Configuration')
sys.exit(0)
helper.start(GraphitePluginAgent)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
``` |
{
"source": "jondlove/vincent-smoketest",
"score": 2
} |
#### File: jondlove/vincent-smoketest/smoketest.py
```python
import yaml
import requests
import json
import argparse
import logging
import re
import sys
from lxml import etree
from StringIO import StringIO
class tcol:
SUCCESS = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
class SmokeAssertions:
def responseCode(self, response, expected):
logging.debug ("{} : test expected response code".format(domain))
if response.status_code == expected:
return True
else:
logging.error("{} : FAIL : response code mismatch [e: {} || got: {}]".format(domain, expected, response.status_code))
return False
def responseContains(self, response, expected):
logging.debug ("{} : test expected response body".format(domain))
if re.search(expected, response.content):
return True
else:
logging.error("{} : FAIL : object not in response body [e: {}]".format(domain, expected))
return False
def responseEncoding(self, response, expected):
logging.debug ("{} : test expected response encoding".format(domain))
if 'content-encoding' in response.headers:
if expected.lower() in response.headers['content-encoding'].lower():
return True
else:
logging.error("{} : FAIL : content encoding mismatch [e: {} || got: {}]".format(domain, expected, response.headers['content-encoding']))
return False
else:
logging.error("{} : FAIL : missing header [e: content-encoding]".format(domain, response.url))
return False
def responseFormat(self, response, expected):
logging.debug ("{} : test response format", domain)
if expected.lower() == 'json':
try:
r.json()
return True
except ValueError:
logging.error("{} : FAIL : invalid format [e: json]".format(domain))
return False
logging.warn("cannot test for '{}'".format(expected))
return False
def responseUrl(self, response, expected):
logging.debug ("{} : test expected response url".format(domain))
if re.match(expected, response.url):
return True
else:
logging.error("{} : FAIL : url mismatch [e: /{}/ || got: '{}']".format(domain, expected, response.url))
return False
def var_substitute(body, var_bucket):
for c in var_bucket:
if '%{}%'.format(c) in body:
logging.debug('{} : substitute %{}%'.format(domain, c))
body = body.replace('%{}%'.format(c), str(capture_bucket[c]))
return body
if __name__=="__main__":
check = SmokeAssertions()
# Parse Arguments
parser = argparse.ArgumentParser(description="Smoke test a series of web endpoints")
parser.add_argument('sites', type=file, help="Name of sites YAML file to load (Default=main.yml)")
parser.add_argument('-q', '--quiet', action="store_true", help="Only show site-level pass/fails")
parser.add_argument('--loglevel', metavar="LEVEL", help="Log level (higher is more verbose). Overrides --quiet")
parser.add_argument('--version', action="version", version="%(prog)s 0.3", help="Program version")
args = parser.parse_args()
# Set up logging
logging.addLevelName(100, "FINAL")
loglevel = logging.INFO
if args.loglevel is not None:
if args.loglevel < 1:
loglevel = logging.WARN
elif args.loglevel > 1:
loglevel = logging.DEBUG
elif args.quiet:
loglevel = 100
logging.basicConfig(format='[%(levelname)7s] %(name)s : %(asctime)s : %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=loglevel)
# Load in the main endpoint list
sites = yaml.load (args.sites)
# Validate that it is, in fact, a hosts list
requests_module = requests #Store a reference to original requests module
app_in_error = False
for site in sites:
site_in_error = 0
#@TODO: Check the 'site' entry is valid
domain = site['domain']
protocol = 'https://' if site['secure'] else 'http://'
logging.info("{} : begin test".format(domain))
# Set up the cookie bucket
#cookie_bucket = {} if 'cookies' in site and site['cookies'] is True else None
requests = requests_module.session() if 'session' in site and site['session'] is True else requests_module
# Variable capture bucket!
capture_bucket = {}
# Load variables
if 'variables' in site:
for variable in site['variables']:
capture_bucket[variable] = site['variables'][variable]
# Load endpoints
endpoints = yaml.load(file(site['endpoints'], 'r'))
#@TODO: Check the 'endpoints' entry is valid
for endpoint in endpoints:
# Set up the request
url = ''.join([protocol, domain, endpoint['url']])
url = var_substitute(url, capture_bucket)
method = 'get' if not 'method' in endpoint else endpoint['method']
allow_redirects = False if not 'options' in endpoint or not 'allow_redirects' in endpoint['options'] else endpoint['options']['allow_redirects']
# Do we have any data?
payload = None
if 'data' in endpoint:
if 'body' in endpoint['data']:
body = endpoint['data']['body']
# Parse for values in capture bucket
body = var_substitute(body, capture_bucket)
# Now decide if it's raw, or form fields
if 'mode' in endpoint['data'] and endpoint['data']['mode'] != 'form':
payload = body
else:
payload = json.loads(body)
# Run the method!
r_method = getattr(requests, method)
logging.info("{} : {} '{}'".format(domain, method.upper(), url))
r = r_method (url, allow_redirects=allow_redirects, data=payload)
#@TODO: Check the 'request' method is valid
# Do we have variables to capture?
if 'capture' in endpoint:
for capture in endpoint['capture']:
mode = 'html' if not 'mode' in capture else capture['mode']
name = -1
name = name+1 if not 'name' in capture else capture['name']
capture_val = None
if mode == 'html':
parser = etree.HTMLParser()
root = etree.parse(StringIO(r.text), parser)
if 'path' in capture:
logging.debug('{} : capture {}'.format(domain, capture['path']))
results = root.xpath(capture['path'])
if len(results) == 0:
logging.warn("{} : captured nothing for {} ".format(domain, capture['path']))
else:
if len(results) > 1:
capture_val = []
for i in results:
if type(i) == etree._Element:
cval = etree.tostring(i, pretty_print=True)
else:
cval = i
if type(capture_val) is list:
capture_val.append(cval)
else:
capture_val = cval
capture_bucket[name] = capture_val
logging.debug("{} : '{}' has value(s) '{}'".format(domain, name, capture_val))
# Are we asserting that the response is valid?
if 'expected' in endpoint:
# Not all endpoints require validations (e.g. those we're capturing from)
expected = endpoint['expected']
didPass = True
if 'code' in expected:
didPass = didPass if check.responseCode(r, expected['code']) else False
if 'contains' in expected:
didPass = didPass if check.responseContains(r, expected['contains']) else False
if 'encoding' in expected:
didPass = didPass if check.responseEncoding(r, expected['encoding']) else False
if 'validate_format' in expected:
didPass = didPass if check.responseFormat(r, expected['validate_format']) else False
if 'url' in expected:
didPass = didPass if check.responseUrl(r, expected['url']) else False
if didPass:
logging.debug("{} : PASS : {}".format(domain, r.url))
else:
site_in_error = site_in_error + 1
if not didPass and 'stop_on_fail' in expected and expected['stop_on_fail'] is True:
# We can't keep processing for this site
logging.error ("{} : STOP : cannot continue processing".format(domain))
break
if site_in_error > 0:
logging.log(100, "{} : {}FAIL{} : {} errors encountered".format(domain, tcol.FAIL, tcol.ENDC, site_in_error))
app_in_error = True
else:
logging.log(100, "{} : {}SUCCESS{} : all tests passed".format(domain, tcol.SUCCESS, tcol.ENDC))
if app_in_error:
sys.exit(1)
else:
sys.exit(0)
``` |
{
"source": "jondoering/microservices",
"score": 3
} |
#### File: ml-model-microservice-template/mlmodel/index.py
```python
from flask import Flask, jsonify, request
app = Flask(__name__)
#GET for getting model description
@app.route("/description")
def get_description():
'''
prints a basic description of the implemented ML algorithm
within this container.
:return:
'''
print("ToDo")
pass
#GET for getting current model outcome
@app.route("/result")
def get_result():
'''
returns the output of the current machine learning algorithm
respecitvely a
:return:
'''
print("ToDo")
pass
#
@app.route("/data", methods=['POST'])
def post_data():
'''
Allows to push data into the ML algorithm. The data should
be packaged as a data frame and structured in a way,
the ml algo can use it.
:return:
'''
print("ToDo")
pass
@app.route("/data", methods=['GET'])
def get_data():
'''
allows to fetch current data
:return:
'''
print("ToDo")
pass
``` |
{
"source": "jondoesntgit/email-response-predictor",
"score": 3
} |
#### File: willireply/data/gmail.py
```python
import os
import time
import imaplib
import email
import sqlite3
from pathlib import Path
from tqdm import tqdm
OUTPUT_DIRECTORY = Path('../../data/raw')
SQLITE3_PATH = OUTPUT_DIRECTORY / 'data.sqlite3'
WILLIREPLY_EMAIL = os.getenv('WILLIREPLY_EMAIL')
if not WILLIREPLY_EMAIL:
WILLIREPLY_EMAIL = input('Email: ')
WILLIREPLY_PASSWORD = os.getenv('WILLIREPLY_PASSWORD')
if not WILLIREPLY_PASSWORD:
WILLIREPLY_PASSWORD = input('Password: ')
SMTP_SERVER = "imap.gmail.com"
SMTP_PORT = 993
EMAIL_FOLDERS = {
#'received' : 'inbox',
'received' : '[Gmail]/Archive',
'sent' : '"[Gmail]/Sent Mail"'
}
def download_emails_from_gmail():
mail = imaplib.IMAP4_SSL(SMTP_SERVER)
mail.login(WILLIREPLY_EMAIL,WILLIREPLY_PASSWORD)
for folder_type, folder_name in EMAIL_FOLDERS.items():
save_to_directory = Path(OUTPUT_DIRECTORY) / folder_type
save_to_directory.mkdir(parents=True, exist_ok=True)
res = mail.select(folder_name)
type_, data = mail.search(None, 'ALL')
mail_ids = data[0]
id_list = mail_ids.split()
num2download = 200
for file, i in tqdm(enumerate(id_list[-num2download:]), total=num2download, desc=folder_name):
typ, data = mail.fetch(i, '(RFC822)' )
for response_part in data:
with open(f'{save_to_directory}/{file:06d}.eml', 'wb') as f:
f.write(data[0][1])
def index_emails():
conn = sqlite3.connect(str(SQLITE3_PATH))
c = conn.cursor()
c.execute('''DROP TABLE IF EXISTS emails;''')
c.execute('''CREATE TABLE emails
(id INTEGER PRIMARY KEY AUTOINCREMENT,
filename text,
folder text,
sender text,
subject text,
body text,
bodytype text,
message_id text,
reply_id text,
did_reply bool);''')
for folder in EMAIL_FOLDERS:
folder_path = Path(OUTPUT_DIRECTORY) / folder
for email_file in folder_path.glob('**/*.eml'):
with open(email_file) as f:
try:
m = email.message_from_file(f)
except:
continue
body = ''
body_type = 'html'
for part in m.walk():
# each part is a either non-multipart, or another multipart message
# that contains further parts... Message is organized like a tree
if part.get_content_type() == 'text/plain':
body = part.get_payload()
body_type = 'plain'
reply_id = ''
if 'In-Reply-To' in m:
reply_id = m['In-Reply-To']
fields = (str(email_file.name), str(email_file.parent.name),
m['from'], m['subject'], body, body_type,
m['Message-Id'], reply_id, 0)
c.execute('''INSERT INTO emails (filename, folder, sender, subject, body, bodytype, message_id, reply_id, did_reply)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)''', fields)
conn.commit()
def flag_emails_with_responses():
conn = sqlite3.connect(str(SQLITE3_PATH))
c = conn.cursor()
c.execute('''
UPDATE emails
SET did_reply = 1
WHERE id in (
SELECT e_received.id
FROM emails as e_received
INNER JOIN emails as e_sent
WHERE e_sent.reply_id = e_received.message_id
);
''')
conn.commit()
if __name__ == "__main__":
download_emails_from_gmail()
index_emails()
flag_emails_with_responses()
```
#### File: willireply/features/features.py
```python
import re
import pandas as pd
import numpy as np
def was_forwarded(df):
"""Looks to see if something like fw or fwd is in the subject.
Uses regular expressions
"""
return df['subject'].str.contains('fwd?\:?\s', flags=re.IGNORECASE).values
def was_replied(df):
"""Looks to see if something like Re or RE: is in the subject. Uses regular expressions
"""
return df['subject'].str.contains('re?\:?\s', flags=re.IGNORECASE).values
def number_of_recipients(df):
"""Counts the number of recipients"""
return df['m_to'].apply(lambda x: len(x.split(','))).values
def common_words_body(df, words):
"""Given a list of common words (length N), returns an MxN matrix (M is length of df)
Each cell is the number of times word[N] occurs in df[M].body (case insensitive"""
return df[['body']].apply(lambda x: pd.Series([x['body'].lower().count(word.lower()) for word in words]), axis=1).values
def common_words_subject(df, words):
"""Given a list of common words (length N), returns an MxN matrix (M is length of df)
Each cell is the number of times word[N] occurs in df[M].body (case insensitive"""
return df[['subject']].apply(lambda x: pd.Series([x['subject'].lower().count(word.lower()) for word in words]), axis=1).values
def number_of_ccs(df):
"""Counts the number of CC'd"""
if df['m_cc'] is not None and len(df['m_cc'].apply(lambda x: len(x.split(','))).values) > 0:
return df['m_cc'].apply(lambda x: len(x.split(','))).values
else:
return 0
def words_in_subject(df):
return df['subject'].apply(lambda x: len(x.split()))
def words_in_body(df):
return df['body'].apply(lambda x: len(x.split()))
def thread_length(df):
return df['body'].apply(lambda x: len(re.split(r'-----Original Message-----|on\s[0-1][0-9]/[0-3][0-9]/[0-9]{2,4}', x)))
```
#### File: willireply/models/base_model.py
```python
import abc
import random
import numpy as np
from pathlib import Path
from willireply.features import features
from willireply.features.feature_extractor import FeatureExtractor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import classification_report, fbeta_score
from dotenv import find_dotenv, load_dotenv
load_dotenv(find_dotenv())
import os
from jinja2 import Template
from tqdm import tqdm
import pandas as pd
from willireply.data import enron
import dill
RESULTS_DIRECTORY = Path(os.environ.get('RESULTS_DIRECTORY')).expanduser()
SPLIT_FILE = os.environ.get('SPLIT_FILE')
SPLIT_FILE = Path(SPLIT_FILE).expanduser()
df_test = pd.read_pickle(SPLIT_FILE.parent / 'test_set.pkl')
class BaseModel(abc.ABC):
def __init__(self):
pass
@abc.abstractmethod
def train(self, train_x, train_y):
pass
@abc.abstractmethod
def test(self, test_x, test_y):
pass
def test(self, test_x, test_y):
pred_y = self.predict(test_x)
true_positives = np.where(test_y & pred_y)[0]
false_positives = np.where(test_y < pred_y)[0]
true_negatives = np.where(~test_y ^ ~pred_y)[0]
false_negatives = np.where(test_y > pred_y)[0]
destination = RESULTS_DIRECTORY
destination.mkdir(parents=True, exist_ok=True)
template_file = Path(__file__).parent / 'results_template.html'
with template_file.open('r') as f:
template = Template(f.read())
with (destination / self.name).with_suffix('.html').open('w') as f:
f.write(
template.render(
false_positives = df_test.iloc[false_positives],
false_negatives = df_test.iloc[false_negatives],
total = len(df_test),
classification = classification_report(test_y, pred_y, target_names=['no_reply', 'reply']),
title=self.name)
)
def serialize(self):
destination = RESULTS_DIRECTORY
destination.mkdir(parents=True, exist_ok=True)
with (destination / self.name ).with_suffix('.pkl').open('wb') as f:
dill.dump(self, f)
class LeastSquares(BaseModel):
def __init__(self, name='LeastSquares'):
self._model = LinearRegression()
self.name = name
def train(self, train_x, train_y):
self._model.fit(train_x, train_y)
def predict(self, test_x):
return self._model.predict(test_x) > 0
class HingeLoss(BaseModel):
def __init__(self, name='HingeLoss'):
self._model = SGDClassifier(loss="hinge", penalty="l2", max_iter=1000)
self.name = name
def train(self, train_x, train_y):
self._model.fit(train_x, train_y)
def predict(self, test_x):
return self._model.predict(test_x)
if __name__ == '__main__':
model = LeastSquares()
fe = FeatureExtractor.from_pickle('simple')
model.train(*fe.get_dataset('train'))
model.test(*fe.get_dataset('test'))
model.serialize()
# model = HingeLoss()
# model.train(*fe.get_dataset('train'))
# model.visualize(*fe.get_dataset('test'))
``` |
{
"source": "jondoesntgit/pydap.handlers.pytables",
"score": 2
} |
#### File: handlers/pytables/__init__.py
```python
import os
import re
import time
from stat import ST_MTIME
from email.utils import formatdate
import tables
from pkg_resources import get_distribution
from pydap.model import *
from pydap.handlers.lib import BaseHandler
from pydap.exceptions import OpenFileError
import re
_col_match_re = re.compile('FIELD_\d+_ATTR_')
class HDF5Handler(BaseHandler):
"""A simple handler for HDF5 files based on PyTables."""
__version__ = get_distribution("pydap.handlers.pytables").version
extensions = re.compile(r"^.*\.(h5|hdf5)$", re.IGNORECASE)
def __init__(self, filepath):
BaseHandler.__init__(self)
try:
self.fp = tables.open_file(filepath, 'r')
except Exception as exc:
message = 'Unable to open file %s: %s' % (filepath, exc)
raise OpenFileError(message)
self.additional_headers.append(
('Last-modified', (
formatdate(
time.mktime(
time.localtime(os.stat(filepath)[ST_MTIME]))))))
# build dataset
name = os.path.split(filepath)[1]
node = self.fp.root
self.dataset = DatasetType(name, attributes={
"NC_GLOBAL": get_attrs(node),
})
build_dataset(self.dataset, self.fp, self.fp.root)
self.fp.close()
def get_attrs(node):
attrs = {k: node._v_attrs[k] for k in node._v_attrs._v_attrnames
if k.upper() != k
}
return attrs
def build_dataset(target, fp, node):
"""Recursively build a dataset, mapping groups to structures."""
for node in fp.list_nodes(node):
if isinstance(node, tables.Group):
attrs = {k: node._v_attrs[k] for k in node._v_attrs._v_attrnames}
target[node._v_name] = StructureType(node._v_name, attributes=get_attrs(node)
)
build_dataset(target[node._v_name], fp, node,)
elif isinstance(node, tables.Array):
target[node._v_name] = BaseType(
node._v_name, node, None, attributes=get_attrs(node))
elif isinstance(node, tables.Table):
table = node
table_attrs = {k: v for k, v in get_attrs(node).items() if not _col_match_re.match(k)}
sequence = target[node._v_name] = SequenceType(node._v_name, attributes=table_attrs)
for name in table.colnames:
col_attrs = dict(getattr(table.cols, name).attrs)
sequence[name] = BaseType(name, table.coldtypes[name], attributes=col_attrs)
sequence.data = node.read()
if __name__ == "__main__":
import sys
from werkzeug.serving import run_simple
application = HDF5Handler(sys.argv[1])
run_simple('localhost', 8001, application, use_reloader=True)
``` |
{
"source": "JonDoNym/CloudWars",
"score": 3
} |
#### File: CloudWars/examples/myAi.py
```python
import socket
import math
import time
import json
# CONFIG
TCP_IP = '127.0.0.1'
TCP_PORT = 3333
CLOUD_NAME = "AlexAI"
CLOUD_COLOR = "red"
# TCP connection
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((TCP_IP, TCP_PORT))
# ------ Helper ------------------------------------------------------------------------------------------------------ #
def cmd_write_read(cmd):
# remove protocol break
cmd = cmd.replace('\n', '')
cmd = cmd.replace('\r', '')
# send command
conn.send(bytes(cmd, 'utf8') + b'\n')
print("SEND", cmd) # DEBUG !!!
# read response
resp = conn.makefile().readline()
print("RESP", resp) # DEBUG !!!
# return
return resp
# ------ Commands ---------------------------------------------------------------------------------------------------- #
# Close disconnects from the server.
# The controlled cloud remains unchanged (use Kill() before this call).
# Returns the server response (OK or ERR) as a string.
def close():
resp = cmd_write_read("quit")
conn.close()
return resp
# Stat returns the world status as a json string.
# Use core.World FromJson() to parse the string.
def stat():
return cmd_write_read("list")
# Name set the player name
# Use this before calling Play()
# Returns the server response (OK or ERR) as a string.
def name(n):
cmd = "name" + n
return cmd_write_read(cmd)
# Color set the player color.
# 'blue', 'gray', 'orange', 'purple' or 'red'
# Use this before calling Play()
# Returns the server response (OK or ERR) as a string.
def color(c):
cmd = "type" + c
return cmd_write_read(cmd)
# Play creates a new player cloud.
# The attributes of Name() and Color() are used.
# Returns the server response (OK or ERR) as a string.
def play():
return cmd_write_read("play")
# Move sends a move command for your player cloud to the server.
# Returns the server response (OK or ERR) as a string.
def move(x, y):
cmd = "move" + str(x) + ";" + str(y)
return cmd_write_read(cmd)
# MoveByAngle sends a move command for your player cloud to the server.
# Returns the server response (OK or ERR) as a string.
def move_by_angle(angle, strength):
x = math.cos(math.pi / 180 * angle) * strength * (-1)
y = math.sin(math.pi / 180 * angle) * strength * (-1)
return move(x, y)
# Kill blasts the controlled cloud and removes it from the game.
# Returns the server response (OK or ERR) as a string.
def kill():
return cmd_write_read("kill")
# ----- Alex TotalCloudWar AI ---------------------------------------------------------------------------------------- #
if __name__ == '__main__':
# set name, color and start the game
name(CLOUD_NAME)
color(CLOUD_COLOR)
play()
# get world status
json_str = stat()
world = json.loads(json_str)
# parse some world stats
world_width = world['Width'] # game board size (DEFAULT: 2048)
world_height = world['Height'] # game board size (DEFAULT: 1152)
world_game_speed = world['GameSpeed'] # updates per second (DEFAULT: 60)
world_iteration = world['Iteration'] # increases with every server update
world_vapor = world['WorldVapor'] # vapor of all clouds together
world_alive = world['Alive'] # active clouds
world_clouds = world['Clouds'] # cloud list
# cloud list
me = None # your controlled cloud (find in list)
for cloud in world_clouds:
cloud_name = cloud['Player'] # only player controlled clouds have names
cloud_color = cloud['Color'] # cloud color
cloud_vapor = cloud['Vapor'] # cloud vapor (mass)
cloud_pos_x = cloud['Pos']['X'] # x position
cloud_pos_y = cloud['Pos']['Y'] # y position
cloud_vel_x = cloud['Vel']['X'] # x velocity (speed)
cloud_vel_y = cloud['Vel']['Y'] # y velocity (speed)
if cloud_name == CLOUD_NAME:
me = cloud # set 'me'
# make some decisions
# move to the center
if me['Pos']['X'] < world_width:
time.sleep(2)
move_by_angle(180, 33) # move right
else:
time.sleep(2)
move_by_angle(0, 33) # move left
# move around
time.sleep(2)
move_by_angle(0, 10) # move left
time.sleep(2)
move_by_angle(90, 10) # move up
time.sleep(2)
move_by_angle(180, 10) # move right
time.sleep(2)
move_by_angle(270, 10) # move down
time.sleep(2)
# it makes no sense
kill()
close()
``` |
{
"source": "jondot/dotlinker",
"score": 2
} |
#### File: jondot/dotlinker/dotlinker.py
```python
from docopt import docopt
from os.path import expanduser, join, splitext, isdir
import os.path
from toolz.curried import pipe, filter, map, get, concat, mapcat
from os import symlink
class FileSystemIO(object):
def link(self, source, target):
try:
symlink(source, target)
print("{} -> {}".format(source, target))
except: # noqa
print("(exists) {}".format(target))
def ls(self, dir):
return os.listdir(dir)
def glob(self, root, recursive=True):
# walk: root [dirs] [files] -> for each dir (tup.2), for
# each file (tup.1), we need to join with root
return pipe(
os.walk(expanduser(root)),
mapcat(lambda tup:
map(lambda f: join(tup[0], f))(concat([tup[2], tup[1]]))),
list) # noqa
class DryRunIO(FileSystemIO):
def link(self, source, target):
print("[dry-run] {} -> {}".format(source, target))
def glob_symlinks(root, io):
return pipe(
io.glob(root), filter(lambda f: splitext(f)[1] == '.symlink'), list)
def symlinkify(s, source='', target=''):
(f, e) = splitext(s)
if e != '.symlink':
return None
return f.replace(source, target)
def make_targets(src, target, symlinkfiles):
return pipe(symlinkfiles, map(lambda p: (p, symlinkify(p, src, target))),
filter(lambda tup: tup[1]),
list) # noqa yapf: disable
def dirs(dir, io):
return pipe(io.ls(dir), filter(lambda d: not d.startswith('-')),
map(lambda d: join(dir, d)),
filter(isdir),
list) # noqa yapf: disable
def perform_link(source, target, io):
return pipe(make_targets(source, target, glob_symlinks(source, io)),
map(lambda tup: io.link(tup[0], tup[1])),
list) # noqa yapf: disable
def link(src, target, io):
(abs_src, abs_tgt) = (expanduser(src), expanduser(target))
return pipe(dirs(abs_src, io), map(lambda d: perform_link(d, abs_tgt, io)),
list) # noqa yapf: disable
def main():
doc = """dotlinker
Usage:
dotlinker <from> <to> [--dry-run]
dotlinker --version
Options:
-d --dry-run Perform dry run (don't apply changes to filesystem)
--version Show version.
-h --help Show this screen.
"""
args = docopt(doc, version="dotlinker 0.1.2")
(source, target, dry_run) = get(['<from>', '<to>', '--dry-run'])(args)
IO = DryRunIO if dry_run else FileSystemIO
link(source, target, IO())
if __name__ == '__main__':
main()
``` |
{
"source": "jondot/pgpipeline",
"score": 2
} |
#### File: pgpipeline/pgpipeline/__init__.py
```python
import logging
import dataset
from sqlalchemy.dialects.postgresql import JSONB
logger = logging.getLogger(__name__)
class PgPipeline(object):
def __init__(self, **kwargs):
self.args = kwargs
@classmethod
def from_crawler(cls, crawler):
args = crawler.settings.get('PG_PIPELINE', {})
return cls(**args)
def open_spider(self, spider):
if self.args.get('connection'):
self.db = dataset.connect(self.args.get('connection'))
self.table = self.db[self.args.get('table_name')]
self.pkey = self.args.get('pkey')
self.types = self.args.get('types', {})
self.ignore_identical = self.args.get('ignore_identical')
self.table.create_index([self.pkey])
self.table.create_index(self.ignore_identical)
self.onconflict = self.args.get('onconflict', 'ignore')
self.enabled = True
def process_item(self, item, spider):
if self.enabled:
if self.onconflict == 'ignore':
logger.debug("SAVE(ignore) %s", item)
self.table.insert_ignore(
item, self.ignore_identical, types=self.types)
elif self.onconflict == 'upsert':
logger.debug("SAVE(upsert) %s", item)
self.table.upsert(
item, self.ignore_identical, types=self.types)
elif self.onconflict == 'non-null':
logger.debug("SAVE(non-null) %s", item)
row, res = self.table._upsert_pre_check(
item, self.ignore_identical, None)
selected = item
if res is not None:
# remove keys with none value
selected = dict((k, v) for k, v in item.iteritems() if v)
self.table.upsert(
selected, self.ignore_identical, types=self.types)
else:
self.table.insert(
selected, self.ignore_identical, types=self.types)
else:
raise Exception("no such strategy: %s" % (self.onconflict))
else:
logger.debug("DISABLED")
return item
``` |
{
"source": "jondot/pytest-json-report",
"score": 2
} |
#### File: pytest-json-report/tests/test_jsonreport.py
```python
import json
import logging
import pytest
from pytest_jsonreport.plugin import JSONReport
# Some test cases borrowed from github.com/mattcl/pytest-json
FILE = """
import sys
import pytest
@pytest.fixture
def setup_teardown_fixture(request):
print('setup')
print('setuperr', file=sys.stderr)
def fn():
print('teardown')
print('teardownerr', file=sys.stderr)
request.addfinalizer(fn)
@pytest.fixture
def fail_setup_fixture(request):
assert False
@pytest.fixture
def fail_teardown_fixture(request):
def fn():
assert False
request.addfinalizer(fn)
def test_pass():
assert True
def test_fail_with_fixture(setup_teardown_fixture):
print('call')
print('callerr', file=sys.stderr)
assert False
@pytest.mark.xfail(reason='testing xfail')
def test_xfail():
assert False
@pytest.mark.xfail(reason='testing xfail')
def test_xfail_but_passing():
assert True
def test_fail_during_setup(fail_setup_fixture):
assert True
def test_fail_during_teardown(fail_teardown_fixture):
assert True
@pytest.mark.skipif(True, reason='testing skip')
def test_skip():
assert False
def test_fail_nested():
def baz(o=1):
c = 3
return 2 - c - None
def bar(m, n=5):
b = 2
print(m)
print('bar')
return baz()
def foo():
a = 1
print('foo')
v = [bar(x) for x in range(3)]
return v
foo()
@pytest.mark.parametrize('x', [1, 2])
def test_parametrized(x):
assert x == 1
"""
@pytest.fixture
def misc_testdir(testdir):
testdir.makepyfile(FILE)
return testdir
@pytest.fixture
def json_data(make_json):
return make_json()
@pytest.fixture
def tests(json_data):
return {test['domain'][5:]: test for test in json_data['tests']}
@pytest.fixture
def make_json(testdir):
def func(content=FILE, args=['-vv', '--json-report'], path='.report.json'):
testdir.makepyfile(content)
testdir.runpytest(*args)
with open(str(testdir.tmpdir / path)) as f:
data = json.load(f)
return data
return func
def test_arguments_in_help(misc_testdir):
res = misc_testdir.runpytest('--help')
res.stdout.fnmatch_lines([
'*json-report*',
'*json-report-file*',
'*json_report_file*',
])
def test_no_report(misc_testdir):
misc_testdir.runpytest()
assert not (misc_testdir.tmpdir / '.report.json').exists()
def test_create_report(misc_testdir):
misc_testdir.runpytest('--json-report')
assert (misc_testdir.tmpdir / '.report.json').exists()
def test_create_report_file_from_arg(misc_testdir):
misc_testdir.runpytest('--json-report', '--json-report-file=arg.json')
assert (misc_testdir.tmpdir / 'arg.json').exists()
def test_create_report_file_from_ini(misc_testdir):
misc_testdir.makeini("""
[pytest]
json_report_file = ini.json
""")
misc_testdir.runpytest('--json-report')
assert (misc_testdir.tmpdir / 'ini.json').exists()
def test_create_report_file_priority(misc_testdir):
misc_testdir.makeini("""
[pytest]
json_report_file = ini.json
""")
misc_testdir.runpytest('--json-report', '--json-report-file=arg.json')
assert (misc_testdir.tmpdir / 'arg.json').exists()
def test_report_keys(make_json):
data = make_json()
assert set(data) == set([
'created', 'duration', 'environment', 'collectors', 'tests', 'summary',
'root', 'exitcode'
])
assert isinstance(data['created'], float)
assert isinstance(data['duration'], float)
assert data['root'].startswith('/')
assert data['exitcode'] == 1
def test_report_collectors(make_json):
collectors = make_json()['collectors']
assert len(collectors) == 2
assert all(c['outcome'] == 'passed' for c in collectors)
assert collectors[0] == {
'nodeid': '',
'outcome': 'passed',
'children': [
{
'nodeid': 'test_report_collectors.py',
'type': 'Module',
}
]
}
assert {
'nodeid': 'test_report_collectors.py::test_pass',
'type': 'Function',
'path': 'test_report_collectors.py',
'lineno': 24,
'domain': 'test_pass',
} in collectors[1]['children']
def test_report_failed_collector(make_json):
data = make_json("""
syntax error
def test_foo():
assert True
""")
collectors = data['collectors']
assert data['tests'] == []
assert collectors[0]['outcome'] == 'passed'
assert collectors[1]['outcome'] == 'failed'
assert collectors[1]['children'] == []
assert 'longrepr' in collectors[1]
def test_report_failed_collector2(make_json):
data = make_json("""
import nonexistent
def test_foo():
pass
""")
collectors = data['collectors']
assert collectors[1]['longrepr'].startswith('ImportError')
def test_report_item_keys(tests):
assert set(tests['pass']) == set(['nodeid', 'path', 'lineno', 'domain',
'outcome', 'keywords', 'setup', 'call',
'teardown'])
def test_report_outcomes(tests):
assert len(tests) == 10
assert tests['pass']['outcome'] == 'passed'
assert tests['fail_with_fixture']['outcome'] == 'failed'
assert tests['xfail']['outcome'] == 'xfailed'
assert tests['xfail_but_passing']['outcome'] == 'xpassed'
assert tests['fail_during_setup']['outcome'] == 'error'
assert tests['fail_during_teardown']['outcome'] == 'error'
assert tests['skip']['outcome'] == 'skipped'
def test_report_summary(make_json):
assert make_json()['summary'] == {
'total': 10,
'passed': 2,
'failed': 3,
'skipped': 1,
'xpassed': 1,
'xfailed': 1,
'error': 2,
}
def test_report_longrepr(tests):
assert 'assert False' in tests['fail_with_fixture']['call']['longrepr']
def test_report_crash_and_traceback(tests):
assert 'traceback' not in tests['pass']['call']
call = tests['fail_nested']['call']
assert call['crash']['path'].endswith('test_report_crash_and_traceback.py')
assert call['crash']['lineno'] == 54
assert call['crash']['message'].startswith('TypeError: unsupported ')
assert call['traceback'] == [
{
'path': 'test_report_crash_and_traceback.py',
'lineno': 65,
'message': ''
},
{
'path': 'test_report_crash_and_traceback.py',
'lineno': 63,
'message': 'in foo'
},
{
'path': 'test_report_crash_and_traceback.py',
'lineno': 63,
'message': 'in <listcomp>'
},
{
'path': 'test_report_crash_and_traceback.py',
'lineno': 59,
'message': 'in bar'
},
{
'path': 'test_report_crash_and_traceback.py',
'lineno': 54,
'message': 'TypeError'
}
]
def test_no_traceback(make_json):
data = make_json(FILE, ['--json-report', '--json-report-no-traceback'])
tests_ = tests(data)
assert 'traceback' not in tests_['fail_nested']['call']
def test_pytest_no_traceback(make_json):
data = make_json(FILE, ['--json-report', '--tb=no'])
tests_ = tests(data)
assert 'traceback' not in tests_['fail_nested']['call']
def test_no_streams(make_json):
data = make_json(FILE, ['--json-report', '--json-report-no-streams'])
call = tests(data)['fail_with_fixture']['call']
assert 'stdout' not in call
assert 'stderr' not in call
def test_summary_only(make_json):
data = make_json(FILE, ['--json-report', '--json-report-summary'])
assert 'summary' in data
assert 'tests' not in data
assert 'collectors' not in data
assert 'warnings' not in data
def test_report_streams(tests):
test = tests['fail_with_fixture']
assert test['setup']['stdout'] == 'setup\n'
assert test['setup']['stderr'] == 'setuperr\n'
assert test['call']['stdout'] == 'call\n'
assert test['call']['stderr'] == 'callerr\n'
assert test['teardown']['stdout'] == 'teardown\n'
assert test['teardown']['stderr'] == 'teardownerr\n'
assert 'stdout' not in tests['pass']['call']
assert 'stderr' not in tests['pass']['call']
def test_json_metadata(make_json):
data = make_json("""
def test_metadata1(json_metadata):
json_metadata['x'] = 'foo'
json_metadata['y'] = [1, {'a': 2}]
def test_metadata2(json_metadata):
json_metadata['z'] = 1
assert False
def test_unused_metadata(json_metadata):
assert True
def test_empty_metadata(json_metadata):
json_metadata.update({})
def test_unserializable_metadata(json_metadata):
json_metadata['a'] = object()
""")
tests_ = tests(data)
assert tests_['metadata1']['metadata'] == {'x': 'foo', 'y': [1, {'a': 2}]}
assert tests_['metadata2']['metadata'] == {'z': 1}
assert 'metadata' not in tests_['unused_metadata']
assert 'metadata' not in tests_['empty_metadata']
assert tests_['unserializable_metadata']['metadata'].startswith('{\'a\':')
def test_environment_via_metadata_plugin(make_json):
data = make_json('', ['--json-report', '--metadata', 'x', 'y'])
assert 'Python' in data['environment']
assert data['environment']['x'] == 'y'
def test_modifyreport_hook(testdir, make_json):
testdir.makeconftest("""
def pytest_json_modifyreport(json_report):
json_report['foo'] = 'bar'
del json_report['summary']
""")
data = make_json("""
def test_foo():
assert False
""")
assert data['foo'] == 'bar'
assert 'summary' not in data
def test_warnings(make_json):
warnings = make_json("""
class TestFoo:
def __init__(self):
pass
def test_foo(self):
assert True
""")['warnings']
assert len(warnings) == 1
assert set(warnings[0]) == {'code', 'path', 'nodeid', 'message'}
assert warnings[0]['nodeid'] == 'test_warnings.py::TestFoo'
def test_process_report(testdir, make_json):
testdir.makeconftest("""
def pytest_sessionfinish(session):
assert session.config._json_report.report['exitcode'] == 0
""")
testdir.makepyfile("""
def test_foo():
assert True
""")
res = testdir.runpytest('--json-report')
assert res.ret == 0
def test_indent(testdir, make_json):
testdir.runpytest('--json-report')
with open(str(testdir.tmpdir / '.report.json')) as f:
assert len(f.readlines()) == 1
testdir.runpytest('--json-report', '--json-report-indent=4')
with open(str(testdir.tmpdir / '.report.json')) as f:
assert f.readlines()[1].startswith(' "')
def test_logging(make_json):
data = make_json("""
import logging
import pytest
@pytest.fixture
def fixture(request):
logging.info('log info')
def f():
logging.warn('log warn')
request.addfinalizer(f)
def test_foo(fixture):
logging.error('log error')
try:
raise
except RuntimeError:
logging.getLogger().debug('log %s', 'debug', exc_info=True)
""", ['--json-report', '--log-level=DEBUG'])
test = data['tests'][0]
assert test['setup']['log'][0]['msg'] == 'log info'
assert test['call']['log'][0]['msg'] == 'log error'
assert test['call']['log'][1]['msg'] == 'log debug'
assert test['teardown']['log'][0]['msg'] == 'log warn'
record = logging.makeLogRecord(test['call']['log'][1])
assert record.getMessage() == record.msg == 'log debug'
def test_direct_invocation(testdir):
test_file = testdir.makepyfile("""
def test_foo():
assert True
""")
plugin = JSONReport()
res = pytest.main([test_file.strpath], plugins=[plugin])
assert res == 0
assert plugin.report['exitcode'] == 0
``` |
{
"source": "JOndra91/siliness",
"score": 2
} |
#### File: siliness/remote-notify/client.py
```python
import argparse
from http import client
import json
def main():
argp = argparse.ArgumentParser()
argp.add_argument('--host', default='localhost')
argp.add_argument('--port', default=6969, type=int)
argp.add_argument(
'--notify', default='/notify-send', const='/notify-send',
dest='app', action='store_const')
argp.add_argument(
'--zenity', const='/zenity', dest='app', action='store_const')
# argp.add_argument('--password')
args, notify_args = argp.parse_known_args()
body = json.dumps(notify_args)
try:
httpc = client.HTTPConnection(args.host, args.port)
httpc.request('POST', args.app, body)
response = httpc.getresponse()
print(response.read())
finally:
httpc.close()
# server_addr = (args.host, args.port)
# httpd = server.HTTPServer(server_addr, NotifyHandler)
# httpd.serve_forever()
if __name__ == '__main__':
main()
``` |
{
"source": "jonducrou/eff_led_lights",
"score": 3
} |
#### File: jonducrou/eff_led_lights/configurator.py
```python
import json
import board
import neopixel
from picamera import PiCamera
from picamera.array import PiRGBArray
from time import sleep
NUM_PIXELS = 310
locations = []
RESOLUTION = (64,48)
data = [None for i in range(NUM_PIXELS)]
pixels = neopixel.NeoPixel(board.D18, NUM_PIXELS)
def collect(i, img0, imgR, imgG, imgB):
x = None
y = None
biggestR = 0
locR = (0,0)
biggestG = 0
locG = (0,0)
biggestB = 0
locB = (0,0)
for y in range(RESOLUTION[0]):
for x in range(RESOLUTION[1]):
if (imgR[x,y,1] < img0[x,y,1]):
rVal = 0
else:
rVal = imgR[x,y,1] - img0[x,y,1]
if (rVal > biggestR):
biggestR = rVal
locR = (x,y)
if (imgG[x,y,1] < img0[x,y,1]):
gVal = 0
else:
gVal = imgG[x,y,1] - img0[x,y,1]
if (gVal > biggestG):
biggestG = gVal
locG = (x,y)
if (imgB[x,y,1] < img0[x,y,1]):
bVal = 0
else:
bVal = imgB[x,y,1] - img0[x,y,1]
if (bVal > biggestB):
biggestB = bVal
locB = (x,y)
print("R: " + str(biggestR))
print (locR)
print("G: " + str(biggestG))
print (locG)
print("B: " + str(biggestB))
print (locB)
print()
data[i] = [locR, locG, locB]
return (x,y)
with PiCamera() as camera:
camera.resolution = RESOLUTION
with PiRGBArray(camera, size=RESOLUTION) as output:
# for each pixel
for i in range(NUM_PIXELS):
if (i<20):
continue
# read from camera -> img 1
camera.capture(output, 'rgb')
#camera.capture(str(i) + "-0.jpg")
img0 = output.array
output.truncate(0)
# set pixel red
pixels[i] = (255,0,0)
# read from camera -> img 2
camera.capture(output, 'rgb')
#camera.capture(str(i) + "-r.jpg")
imgR = output.array
output.truncate(0)
# set pixel green
pixels[i] = (0,255,0)
# read from camera -> img 3
camera.capture(output, 'rgb')
imgG = output.array
output.truncate(0)
# set pixel blue
pixels[i] = (0,0,255)
# read from camera -> imgB
camera.capture(output, 'rgb')
imgB = output.array
output.truncate(0)
# set pixel black
pixels[i] = (0,0,0)
# compare images
collect(i, img0, imgR, imgG, imgB)
locs = [(0,0) for i in range(NUM_PIXELS)]
grid = [[0 for i in range(RESOLUTION[0])] for j in range(RESOLUTION[1])]
gridDetailed = [ [ [] for i in range(RESOLUTION[0] ) ] for j in range(RESOLUTION[1])]
for i in range(NUM_PIXELS):
if (data[i] != None):
#if two of the three agree... good enough for me.
rx = data[i][0][0]
ry = data[i][0][1]
gx = data[i][1][0]
gy = data[i][1][1]
bx = data[i][2][0]
by = data[i][2][1]
if (rx == gx and ry == gy):
locs[i] == (gx,gy)
grid[gx][gy] += 1
gridDetailed[gx][gy].append(i)
elif (rx == bx and ry == by):
locs[i] == (rx,ry)
grid[rx][ry] += 1
gridDetailed[rx][ry].append(i)
elif (gx == bx and gy == by):
locs[i] == (gx,gy)
grid[gx][gy] += 1
gridDetailed[gx][gy].append(i)
for x in range(RESOLUTION[1]):
for y in range(RESOLUTION[0]):
if(grid[x][y]>0):
print(grid[x][y], end='')
else:
print(" ",end='')
print()
with open('grid.json', 'w') as out:
json.dump(gridDetailed, out)
```
#### File: eff_led_lights/vis/gridder.py
```python
POS = 0
def getDefaultParams():
return {"grid_size": 40, "colour": (100, 100, 100)}
def customParams():
return [
{"name": "grid_size", "type": "number"},
{"name": "colour", "type": "rgb"},
]
def go(params, h, w, led_control):
global POS
POS = POS+1
grid_size = params["grid_size"]
r = params["colour"][0]
g = params["colour"][1]
b = params["colour"][2]
for x in range(w):
for y in range(h):
led_control.setLEDs(x, y, 0, 0, 0)
if ((POS+x) % grid_size == 0):
led_control.setLEDs(x, y, r, g, b)
if ((POS+y) % grid_size == 0):
led_control.setLEDs(x, y, r, g, b)
if ((POS+x) % grid_size == 1 or (POS+x) % grid_size == grid_size - 1):
led_control.setLEDs(x, y, r/3, g/3, b/3)
if ((POS+y) % grid_size == 1 or (POS+y) % grid_size == grid_size - 1):
led_control.setLEDs(x, y, r/3, g/3, b/3)
```
#### File: eff_led_lights/vis/starfall.py
```python
import random
SKY = []
STARS = []
def getDefaultParams():
return {"colour": (100, 100, 100)}
def customParams():
return [
{"name": "colour", "type": "rgb"},
]
def assignSafe(grid, x, y, w, h, value):
if (x < w and x>= 0 and y < h and y>= 0):
grid[x][y] = value
def capAt255(x):
return capAt(x,255)
def capAt(x,c):
if (x>c):
return c
return x
def floorAt(x,c):
if (x<c):
return c
return x
def getV():
r = random.random()
if(r<0.5):
r -= 1
return r
def go(params, h, w, led_control):
global SKY, STARS
if len(SKY) == 0:
SKY = [ [ 0 for x in range(h)] for y in range(w) ]
STARS = [(w/2,h/2,1,1),(w/2,h/2,-0.1,1),(w/2,h/2,1,-1),(w/2,h/2,-1,-1)]
r = params["colour"][0]
g = params["colour"][1]
b = params["colour"][2]
SKY = [ [ int(x*0.9) for x in y] for y in SKY ]
for i, star in enumerate(STARS):
if (star[0] >= w or star[0] < 0 or star[1] >= h or star[1] < 0):
STARS[i] = (w/2,h/2,getV(), getV())
star = STARS[i]
assignSafe(SKY, int(star[0]), int(star[1]), w, h, capAt255(200+SKY[int(star[0])][int(star[1])]))
[ assignSafe(SKY, int(star[0]+x), int(star[1]+y), w, h, capAt255(100+ SKY[capAt(floorAt(int(star[0]+x),0),w-1)][capAt(floorAt(int(star[1]+y),0),h-1)]) ) for x in [-1,1] for y in [-1,1] ]
STARS[i] = (star[0] + star[2], star[1] + star[3], star[2], star[3])
for x in range(w):
for y in range(h):
led_control.setLEDs(x,y,int(r*SKY[x][y]/255),int(g*SKY[x][y]/255),int(b*SKY[x][y]/255))
``` |
{
"source": "jonDuke/lambdata-jduke",
"score": 4
} |
#### File: lambdata-jduke/my_lambdata/my_class.py
```python
import random
class My_Class():
"""
an example class
"""
def __init__(self, name, fav_color, fav_number):
"""
Initializes my_class
name (string) - the name of this object
fav_color (string) - this object's favorite color
fav_number (int) - this object's favorite number
"""
self.name = str(name)
self.fav_color = str(fav_color)
self.fav_number = int(fav_number)
def introduce_self(self):
"""
says hello
"""
print('Hi! My name is ' + self.name +
'. My favorite color is ' + self.fav_color +
' and my favorite number is ' + str(self.fav_number) + '.')
def pick_new_number(self):
"""
randomly picks a new favorite number between 0 and 100 (inclusive)
"""
self.fav_number = random.randrange(101)
def pick_new_color(self):
"""
randomly picks a new favorite color
"""
colors = ['red', 'orange', 'yellow', 'green', 'blue', 'purple']
choice = random.randrange(len(colors))
self.fav_color = colors[choice]
``` |
{
"source": "jondurrant/TwinThingPyMQTT",
"score": 2
} |
#### File: TwinThingPyMQTT/example/mainThing.py
```python
from mqttAgent import MQTTAgent
import logging
from mqttObserver import MQTTObserver
from mqttRouterPing import MQTTRouterPing
from mqttRouterState import MQTTRouterState
from exampleStateRouter import ExampleStateRouter
from exampleStateObserver import ExampleStateObserver
import threading
import os
from twinState import TwinState
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
logging.basicConfig(level=LOGLEVEL,
format= '[%(asctime)s] {%(name)s:%(lineno)d} %(levelname)s - %(message)s')
#MQTT Credentials and targets
#Credentials = needs to look at picking up from network
mqttUser=os.environ.get("MQTT_USER")
mqttPwd=<PASSWORD>("MQTT_PASSWD")
mqttTarget= os.environ.get("MQTT_HOST")
mqttPort=int(os.environ.get("MQTT_PORT"))
mqttCert=os.environ.get("MQTT_CERT", None)
tls=""
if (mqttCert != None):
tls="TLS"
print("MQTT %s:%d %s - %s\n"%(mqttTarget,mqttPort,tls,mqttUser))
#Setup the twin state
state = TwinState()
state.setState({
'trn': 0,
'ok': True,
'count': 0,
'on': False
})
#The MQTT Client Agent
mqttAgent = MQTTAgent(mqttUser)
mqttAgent.credentials(mqttUser, mqttPwd)
mqttAgent.mqttHub(mqttTarget, mqttPort, True, mqttCert)
#Consigure the observers and routers
mqttObs = MQTTObserver()
stateObs = ExampleStateObserver()
pingRouter = MQTTRouterPing(mqttUser)
stateRouter = ExampleStateRouter(mqttUser, state, mqttAgent) #MQTTRouterState(mqttUser, state)
stateRouter.attachObserver(stateObs)
#Add observers and reouter to client agent
mqttAgent.addObserver(mqttObs)
mqttAgent.addRouter(pingRouter)
mqttAgent.addRouter(stateRouter)
#Set up a time to update the state locally
xTimer = None
def tick():
delta = {
'count': stateRouter.getState()['count'] +1
}
stateRouter.updateState(delta, mqttAgent)
xTimer = threading.Timer(5.0, tick)
xTimer.start()
xTimer = threading.Timer(5.0, tick)
xTimer.start()
#Start the client agent
mqttAgent.start()
```
#### File: TwinThingPyMQTT/src/mqttGroup.py
```python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Column
from sqlalchemy.sql.sqltypes import Boolean, String, JSON
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.orm import exc
exceptions = exc.sa_exc
from array import array
import pandas as pd
import logging
class MQTTGroup:
#===========================================================================
# Constructor
#===========================================================================
def __init__(self, name: str):
self.name = name
self.xLogging = logging.getLogger(__name__)
#===========================================================================
# Get twin ID that belong to a group
# session: Data base session - sqlalchemy
#===========================================================================
def getGroupTwinIds(self, session):
con = session.connection()
try:
rs = con.execute('SELECT DISTINCT clientid FROM mqtt_acl where topic = "GRP/%s/TPC/#";'%self.name)
res = []
for row in rs:
res.append(row[0])
return res
except exceptions.SQLAlchemyError as Argument:
self.xLogging.exception("Unable to retrieve group from db")
return []
#=======================================================================
# Select twins that confirm to sql like query
# session: Data base session - sqlalchemy
# select: array of strings naming fields to pull back. if json inside a fiend then dot notation allowed
# asColumn: will rename the select to a new name
# where: where dict form: {column: <name>, op: <op>, value: <value>}, {and/or: [set of wheres]}
# orient: Pandas allowed formats of ["split", "records", "index", "values", "table", "columns"]
#=======================================================================
def selectTwinFrame(self, session, select: array = [], asColumn: array = [], where: dict = {}, orient: str="split"):
sql = 'SELECT '
selectCount = len(select)
if (selectCount == 0):
sql = sql + '* '
for i in range(selectCount):
s = self.sqlSelect(select[i])
if (s == "groupId"):
sql = sql + '"' + self.name + '" as groupId'
else:
if (len(asColumn) > i):
sql = sql + s + " as `" + asColumn[i] +"`"
else:
sql = sql + s
if (i == (selectCount -1)):
sql = sql + " "
else :
sql = sql + ", "
sql = sql + "FROM twin WHERE clientId in ("
targets = self.getGroupTwinIds(session)
targetCount = len(targets)
for i in range(targetCount):
target = targets[i]
sql = sql + '"' +target + '" '
if (i < (targetCount -1)):
sql = sql + ", "
sql = sql + ')'
if (not where == {}):
sql = sql + " AND (" + self.whereToSQL(where)+")"
sql = sql + ';'
try:
frame = pd.read_sql(sql, session.connection())
if (not orient in ["split", "records", "index", "values", "table", "columns"]):
orient = "split"
return frame.to_json( orient=orient)
except exceptions.SQLAlchemyError as Argument:
self.xLogging.exception("Select failed")
return "{}"
#===========================================================================
# Convert any select name to sql format, handles itnernam json query
#===========================================================================
def sqlSelect(self, column: str):
parts = column.split(".", 1)
if (len(parts) == 1):
return parts[0]
else:
return ("json_extract(%s, \'$.%s\')"%(parts[0], parts[1]))
#===========================================================================
# Convert a where dict to sql query format
#===========================================================================
def whereToSQL(self, where: dict):
res=""
if ("column" in where):
if (("op" in where) and ("value" in where)):
res = "%s %s %s"%(
self.sqlSelect(where["column"]),
where["op"],
self.sqlLiteral(where["value"])
)
if ("and" in where):
count = len(where["and"])
if (count > 0):
res = "("
for i in range(count):
res = res + self.whereToSQL(where["and"][i])
if (i < (count -1)):
res = res + " AND "
res = res + ")"
if ("or" in where):
count = len(where["or"])
if (count > 0):
res = "("
for i in range(count):
res = res + self.whereToSQL(where["or"][i])
if (i < (count -1)):
res = res + " OR "
res = res + ")"
return res
#=======================================================================
# Appropriate handle literates for query
#=======================================================================
def sqlLiteral(self, o):
if (isinstance(o, str)):
return '"%s"'%o
elif (isinstance(o, int)):
return '%d'%o
elif (isinstance(o, float)):
return '%f'%o
else:
return ""+o
```
#### File: TwinThingPyMQTT/src/mqttRouterTwin.py
```python
import logging
import paho.mqtt.client as mqtt
from mqttRouter import MQTTRouter
import mqttTopicHelper as topicHelper
from twinState import TwinState
from twinDb import TwinDb
from mqttGroup import MQTTGroup
from mqttTwin import MQTTTwin
from twinState import TwinState
import twinProtocol
import json
from sqlalchemy import exc
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from itertools import groupby
class MQTTRouterTwin(MQTTRouter):
#=======================================================================
# Constructor
# state: TwinState: state object that holds the states about the Twin management
# client_id: str - Client id for this thing
# dbHost: str - DB host
# dbPort: int - port for the DB connection
# dbSchema: str - DB Schema,
# dbUser: str - User name for DB access
# dbPwd: str - password for DB access
#=======================================================================
def __init__(self, state: TwinState, client_id: str, dbHost: str, dbPort: int, dbSchema: str,
dbUser: str, dbPwd: str):
super().__init__(client_id)
self.xState = state
self.xLogging = logging.getLogger(__name__)
self.xSet = topicHelper.getTwinSet("+")
self.xGet = topicHelper.getTwinGet("+")
self.xLC = topicHelper.genLifeCycleTopic("+", topicHelper.MQTT_TOPIC_LIFECYCLE_ONLINE)
self.xUpdate = topicHelper.getThingUpdate("+")
self.xGrpSet = topicHelper.getTwinGroupSet("+")
self.xGrpGet = topicHelper.getTwinGroupGet("+")
self.xCache = {}
self.connectStr='mysql+pymysql://%s:%s@%s:%d/%s'%(
dbUser,
dbPwd,
dbHost,
dbPort,
dbSchema
)
self.session=None
self.openDb()
#===========================================================================
# Open the DB, local used function
#===========================================================================
def openDb(self):
try:
engine = create_engine(self.connectStr)
session = sessionmaker()
session.configure(bind=engine)
self.session=session()
#set Count
fakeTwin = TwinDb("UNKNOWN")
count = fakeTwin.getTwinCount(self.session)
delta = {
'things': count
}
self.xState.updateState(delta)
except exc.SQLAlchemyError:
self.xLogging.debug("Failed to open DB")
self.session = None
#========================================================================
# Subscribe to the Get and Set twin and groups
# Also subscrine to LC connects
#========================================================================
def subscribe(self, interface: mqtt):
interface.subscribe(self.xGet, qos=1)
interface.subscribe(self.xSet, qos=1)
interface.subscribe(self.xLC, qos=1)
interface.subscribe(self.xUpdate, qos=1)
interface.subscribe(self.xGrpGet, qos=1)
interface.subscribe(self.xGrpSet, qos=1)
#=======================================================================
# Route the published messahes
#=======================================================================
def route(self, topic: str, payload: str, interface: mqtt):
#Twin Get
if ( topicHelper.topicEquals(self.xGet, topic)):
target = self.tngTarget(topic)
j = json.loads(payload)
if ("select" in j):
target = self.tngTarget(topic)
j = json.loads(payload)
self.twinGet(target, j, interface)
else:
twin = self.getTwin(target, interface)
self.pubUpdated(target, twin, interface)
return True
#Twin Set
if ( topicHelper.topicEquals(self.xSet, topic)):
target = self.tngTarget(topic)
j = json.loads(payload)
self.twinSet(target, j, interface)
return True
#THing Online Sequence
if ( topicHelper.topicEquals(self.xLC, topic)):
target = self.tngTarget(topic)
if (not self.isNewTwin(target)):
twin = self.getTwin(target, interface)
setTopic = topicHelper.getThingSet(target)
setState = {'state': twin.getReportedState()}
#setState = {'delta': twin.getReportedState()}
self.xLogging.debug("Set state on returning thing %s state %s"%(target, json.dumps(setState,sort_keys=True) ))
interface.publish(setTopic, json.dumps(setState), retain=False, qos=1)
if (not twin.isUptoDate()):
deltaState = {'delta': twin.getDelta()}
self.xLogging.debug("Set delta for returning thing %s delta %s"%(target, json.dumps(deltaState,sort_keys=True)))
interface.publish(setTopic, json.dumps(deltaState), retain=False, qos=1)
else:
self.xLogging.debug("Unknown thing, so requesting get %s"%target)
getTopic = topicHelper.getThingGet(target)
interface.publish(getTopic, "{'GET': 1}", retain=False, qos=1)
self.xLogging.debug("LC event on %s"%topic)
return True
#Habdle Update from Thing
if (topicHelper.topicEquals(self.xUpdate, topic)):
target = self.tngTarget(topic)
n = self.isNewTwin(target)
twin = self.getTwin(target, interface)
j = json.loads(payload)
if ("delta" in j):
twin.updateFromThing(j["delta"])
if (n):
getTopic = topicHelper.getThingGet(target)
interface.publish(getTopic, "{'GET': 1}", retain=False, qos=1)
elif ("state" in j):
twin.stateFromThing(j["state"])
#self.xLogging.debug("Twin %s state Payload %s"%(target, payload))
self.xLogging.debug("Twin %s Reported %s"%(target, json.dumps(twin.getReportedState(), sort_keys=True)))
self.pubUpdated(target, twin, interface)
self.storeTwin(twin)
return True
#Group Set
if (topicHelper.topicEquals(self.xGrpSet, topic)):
grp = self.tngTarget(topic)
j = json.loads(payload)
self.groupSet(grp, j, interface)
#Group Get
if (topicHelper.topicEquals(self.xGrpGet, topic)):
grp = self.tngTarget(topic)
j = json.loads(payload)
self.groupGet(grp, j, interface)
return False
#===========================================================================
# Split out the target ID or group from the topic
#===========================================================================
def tngTarget(self, topic: str):
target = topic.split("/")[1]
return target
#===========================================================================
# Get the twin based on it's client id
# target: str, - Client id of the target thing
# interface: mqtt - mqtt interface to allow request of a get
#===========================================================================
def getTwin(self, target: str, interface: mqtt):
self.xLogging.debug("****GET TWIN %s"%target)
if (not target in self.xCache):
self.xLogging.debug("Twin not in cache %s"%target)
self.xCache[target] = TwinDb(target)
try:
if (self.xCache[target].loadFromDb(self.session)):
self.xLogging.debug("Twin %s loaded from DB"%target)
twin = self.xCache[target]
setTopic = topicHelper.getThingSet(target)
setState = {'state': twin.getReportedState()}
self.xLogging.debug("Set state on returning thing %s state %s"%(target, json.dumps(setState,sort_keys=True) ))
interface.publish(setTopic, json.dumps(setState), retain=False, qos=1)
if (not twin.isUptoDate()):
deltaState = {'delta': twin.getDelta()}
self.xLogging.debug("Set delta for returning thing %s delta %s"%(target, json.dumps(deltaState,sort_keys=True)))
interface.publish(setTopic, json.dumps(deltaState), retain=False, qos=1)
#Update Cache Stats
delta = {
'cache': self.xState.getState().get('cache', 0)+1
}
self.xState.updateState(delta)
else:
self.xLogging.debug("Twin %s not in DB"%target)
getTopic = topicHelper.getThingGet(target)
interface.publish(getTopic, "{'GET': 1}", retain=False, qos=1)
#Update Cache Stats
delta = {
'cache': self.xState.getState().get('cache', 0)+1,
'things': self.xState.getState().get('things', 0)+1,
}
self.xState.updateState(delta)
except exc.SQLAlchemyError:
self.xLogging.error("Failed to read from DB, reopen")
self.openDb()
else:
self.xLogging.debug("Twin in cache %s"%target)
return self.xCache[target]
#===========================================================================
# Is the twin new and not in the DB
# target - client_id of the thing
#===========================================================================
def isNewTwin(self, target: str):
if (not target in self.xCache):
twin = TwinDb(target)
#self.xCache[target] = twin
try:
if (twin.loadFromDb(self.session)):
return False
else:
return True
except exc.SQLAlchemyError:
self.xLogging.error("Failed to read from DB, reopen")
self.openDb()
return True
else:
return False
#===========================================================================
# Publish update of a twin to the UPD channel. Has full reported status
#===========================================================================
def pubUpdated(self, target: str, twin: TwinDb, interface: mqtt):
upd = { "desired": twin.getDesiredState(),
"desiredMeta": twin.getDesiredMeta(),
"reported": twin.getReportedState(),
"reportedMeta": twin.getReportedMeta(),
"declined": twin.getDeclinedState(),
"declinedMeta": twin.getDeclinedMeta()
}
updTopic = topicHelper.getTwinUpdate(target)
interface.publish(updTopic, json.dumps(upd), retain=False, qos=1)
#===========================================================================
# Stoew the twin into the database
#===========================================================================
def storeTwin(self, twin: TwinDb):
try:
twin.updateDb(self.session)
except exc.SQLAlchemyError:
self.xLogging.error("Failed to write to DB, reopen")
self.openDb()
#===========================================================================
# Handle a Twin set request
#===========================================================================
def twinSet(self, target: str, j: dict, interface: mqtt):
twin = self.getTwin(target, interface)
newStates = {}
if ("set" in j):
newStates = j["set"]
elif ("delta" in j):
newStates = j["delta"]
elif ("state" in j):
newStates = j["state"]
else:
self.xLogging.error("Unknown format for set %s"%json.dumps(j))
return
#Update twin
self.xLogging.debug("Updating with %s"%json.dumps(newStates, sort_keys=True))
twin.updateDesiredState(newStates)
self.storeTwin(twin)
self.pubUpdated(target, twin, interface)
#Update thing
setTopic = topicHelper.getThingSet(target)
delta = json.dumps({"delta": twin.getDelta()})
self.xLogging.debug("Sending Thing delta-> %s"%delta)
interface.publish(setTopic, delta, retain=False, qos=1)
#===========================================================================
# Handle a group set request
#===========================================================================
def groupSet(self, grp: str, d: dict, interface: mqtt):
mGroup = MQTTGroup(grp)
allGroup = mGroup.getGroupTwinIds(self.session)
targets=[]
reqTargets = d.get("from", [])
if (len(reqTargets) == 0):
targets = allGroup
else:
#Validate that target is in group
for t in reqTargets:
if (t in allGroup):
targets.append(t)
else:
self.xLogging.info("Attempted to post to clientId that is not in group %s"%self.session)
for target in targets:
self.twinSet(target, d, interface)
#===========================================================================
# Handle a group get request. This will be a query structure
#===========================================================================
def groupGet(self, grp: str, d: dict, interface: mqtt):
mGroup = MQTTGroup(grp)
select = d.get("select", ["*"])
columnAs = d.get("as", [])
where = d.get("where", {})
orient = d.get("orient", "records")
query = d.get("query", 0)
s = mGroup.selectTwinFrame(self.session, select, columnAs, where, orient)
j = {
"query": query,
"res": json.loads(s)
}
topic = topicHelper.getTwinGroupResult(grp)
interface.publish(topic, json.dumps(j), retain=False, qos=1)
#=======================================================================
# Handle a twin get request of query format
#=======================================================================
def twinGet(self, target: str, d: dict, interface: mqtt):
mTwin = MQTTTwin(target)
select = d.get("select", ["*"])
columnAs = d.get("as", [])
where = d.get("where", {})
orient = d.get("orient", "records")
query = d.get("query", 0)
s = mTwin.selectTwinFrame(self.session, select, columnAs, where, orient)
j = {
"query": query,
"res": json.loads(s)
}
topic = topicHelper.getTwinResult(target)
interface.publish(topic, json.dumps(j), retain=False, qos=1)
#=======================================================================
# Cach housekeep to remoev items older that parameter from the cache
#=======================================================================
def cacheHousekeeping(self, removeOlderSeconds: int):
ms = removeOlderSeconds * 1000
purgeList = []
for target in self.xCache:
twin = self.xCache[target]
if (twin.timeSinceConversation() > ms):
purgeList.append(target)
for target in purgeList:
del self.xCache[target]
self.xLogging.debug("Purged %s", json.dumps(purgeList))
#Update Cache Stats
delta = {
'cache': len(self.xCache)
}
self.xState.updateState(delta)
```
#### File: TwinThingPyMQTT/src/mqttStateObserver.py
```python
from twinState import TwinState
class MQTTStateObserver:
def stateNotify(self, state: TwinState):
return
```
#### File: TwinThingPyMQTT/test/testTwinDb.py
```python
import unittest
from twinDb import TwinDb
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
connectStr='mysql+pymysql://%s:%s@%s:%d/%s'%(
"oracrad",
"Hm4CeqH7hkUf",
"nas3",
3307,
"OracRad"
)
class TwinDbTests(unittest.TestCase):
def testStore(self):
engine = create_engine(connectStr)
session = sessionmaker()
session.configure(bind=engine)
s=session()
twin = TwinDb("foo")
twin.stateFromThing({
"trn":1,
"count": 22
})
twin.updateDb(s)
def testLoad(self):
engine = create_engine(connectStr)
session = sessionmaker()
session.configure(bind=engine)
s=session()
twin = TwinDb("jon")
twin.stateFromThing({
"trn":1,
"count": 23
})
twin.updateDb(s)
twin2 = TwinDb("jon")
self.assertEqual(twin2.loadFromDb(s), True)
state = twin2.getReportedState()
self.assertEqual(state["count"], 23)
def testLoadUnknown(self):
engine = create_engine(connectStr)
session = sessionmaker()
session.configure(bind=engine)
s=session()
twin = TwinDb("unknown")
twin.stateFromThing({
"trn":1,
"count": 13
})
self.assertEqual(twin.loadFromDb(s), False)
state = twin.getReportedState()
self.assertEqual(state["count"], 13)
def testCount(self):
engine = create_engine(connectStr)
session = sessionmaker()
session.configure(bind=engine)
s=session()
twin = TwinDb("unknown")
self.assertGreater(twin.getTwinCount(s), 0)
if __name__ == '__main__':
unittest.main()
```
#### File: test/things/get.py
```python
import paho.mqtt.client as mqtt
import json
import time
import sys
import os
if (len(sys.argv) != 2):
print("Require target ID as parater")
sys.exit()
targetId = sys.argv[1]
user=os.environ.get("MQTT_USER")
passwd=os.environ.get("MQTT_PASSWD")
host= os.environ.get("MQTT_HOST")
port=int(os.environ.get("MQTT_PORT"))
print("MQTT %s:%d - %s\n"%(host,port, user))
connected_topic = "TNG/" + user + "/LC/ON"
lc_topic = "TNG/" + targetId + "/LC/#"
state_topics = "TNG/" + targetId + "/STATE/#"
get_topic = "TNG/" + targetId + "/STATE/GET"
set_topic = "TNG/" + targetId + "/STATE/SET"
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print("Rcv topic=" +msg.topic+" msg="+str(msg.payload))
client = mqtt.Client(client_id=user)
client.username_pw_set(username=user, password=<PASSWORD>)
client.on_connect = on_connect
client.on_message = on_message
j = {'online':0}
p = json.dumps(j)
client.will_set(connected_topic, p, qos=1, retain=False) #set will
client.connect(host, port, 60)
client.loop_start()
client.subscribe( lc_topic )
client.subscribe( state_topics )
print("publishing connect")
j = {'online':1}
p = json.dumps(j)
client.publish(connected_topic,p,retain=False,qos=1)
j = {'GET': 1}
p = json.dumps(j)
print("Publishing ping %s"%p)
infot = client.publish(get_topic, p,retain=False, qos=1)
infot.wait_for_publish()
time.sleep(30)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.